* gas/tc-avr.c: Add new devices
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright (C) 2009-2014 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
5
6 This file is part of GAS.
7
8 GAS is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
12
13 GAS is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
21
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35
36 #include "dwarf2dbg.h"
37
38 /* Types of processor to assemble for. */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42
43 #define streq(a, b) (strcmp (a, b) == 0)
44
45 static aarch64_feature_set cpu_variant;
46
47 /* Variables that we set while parsing command-line options. Once all
48 options have been read we re-process these values to set the real
49 assembly flags. */
50 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
51 static const aarch64_feature_set *march_cpu_opt = NULL;
52
53 /* Constants for known architecture features. */
54 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
55
56 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
57 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
58
59 #ifdef OBJ_ELF
60 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
61 static symbolS *GOT_symbol;
62
63 /* Which ABI to use. */
64 enum aarch64_abi_type
65 {
66 AARCH64_ABI_LP64 = 0,
67 AARCH64_ABI_ILP32 = 1
68 };
69
70 /* AArch64 ABI for the output file. */
71 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
72
73 /* When non-zero, program to a 32-bit model, in which the C data types
74 int, long and all pointer types are 32-bit objects (ILP32); or to a
75 64-bit model, in which the C int type is 32-bits but the C long type
76 and all pointer types are 64-bit objects (LP64). */
77 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
78 #endif
79
80 enum neon_el_type
81 {
82 NT_invtype = -1,
83 NT_b,
84 NT_h,
85 NT_s,
86 NT_d,
87 NT_q
88 };
89
90 /* Bits for DEFINED field in neon_type_el. */
91 #define NTA_HASTYPE 1
92 #define NTA_HASINDEX 2
93
94 struct neon_type_el
95 {
96 enum neon_el_type type;
97 unsigned char defined;
98 unsigned width;
99 int64_t index;
100 };
101
102 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
103
104 struct reloc
105 {
106 bfd_reloc_code_real_type type;
107 expressionS exp;
108 int pc_rel;
109 enum aarch64_opnd opnd;
110 uint32_t flags;
111 unsigned need_libopcodes_p : 1;
112 };
113
114 struct aarch64_instruction
115 {
116 /* libopcodes structure for instruction intermediate representation. */
117 aarch64_inst base;
118 /* Record assembly errors found during the parsing. */
119 struct
120 {
121 enum aarch64_operand_error_kind kind;
122 const char *error;
123 } parsing_error;
124 /* The condition that appears in the assembly line. */
125 int cond;
126 /* Relocation information (including the GAS internal fixup). */
127 struct reloc reloc;
128 /* Need to generate an immediate in the literal pool. */
129 unsigned gen_lit_pool : 1;
130 };
131
132 typedef struct aarch64_instruction aarch64_instruction;
133
134 static aarch64_instruction inst;
135
136 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
137 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
138
139 /* Diagnostics inline function utilites.
140
141 These are lightweight utlities which should only be called by parse_operands
142 and other parsers. GAS processes each assembly line by parsing it against
143 instruction template(s), in the case of multiple templates (for the same
144 mnemonic name), those templates are tried one by one until one succeeds or
145 all fail. An assembly line may fail a few templates before being
146 successfully parsed; an error saved here in most cases is not a user error
147 but an error indicating the current template is not the right template.
148 Therefore it is very important that errors can be saved at a low cost during
149 the parsing; we don't want to slow down the whole parsing by recording
150 non-user errors in detail.
151
152 Remember that the objective is to help GAS pick up the most approapriate
153 error message in the case of multiple templates, e.g. FMOV which has 8
154 templates. */
155
156 static inline void
157 clear_error (void)
158 {
159 inst.parsing_error.kind = AARCH64_OPDE_NIL;
160 inst.parsing_error.error = NULL;
161 }
162
163 static inline bfd_boolean
164 error_p (void)
165 {
166 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
167 }
168
169 static inline const char *
170 get_error_message (void)
171 {
172 return inst.parsing_error.error;
173 }
174
175 static inline void
176 set_error_message (const char *error)
177 {
178 inst.parsing_error.error = error;
179 }
180
181 static inline enum aarch64_operand_error_kind
182 get_error_kind (void)
183 {
184 return inst.parsing_error.kind;
185 }
186
187 static inline void
188 set_error_kind (enum aarch64_operand_error_kind kind)
189 {
190 inst.parsing_error.kind = kind;
191 }
192
193 static inline void
194 set_error (enum aarch64_operand_error_kind kind, const char *error)
195 {
196 inst.parsing_error.kind = kind;
197 inst.parsing_error.error = error;
198 }
199
200 static inline void
201 set_recoverable_error (const char *error)
202 {
203 set_error (AARCH64_OPDE_RECOVERABLE, error);
204 }
205
206 /* Use the DESC field of the corresponding aarch64_operand entry to compose
207 the error message. */
208 static inline void
209 set_default_error (void)
210 {
211 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
212 }
213
214 static inline void
215 set_syntax_error (const char *error)
216 {
217 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
218 }
219
220 static inline void
221 set_first_syntax_error (const char *error)
222 {
223 if (! error_p ())
224 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
225 }
226
227 static inline void
228 set_fatal_syntax_error (const char *error)
229 {
230 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
231 }
232 \f
233 /* Number of littlenums required to hold an extended precision number. */
234 #define MAX_LITTLENUMS 6
235
236 /* Return value for certain parsers when the parsing fails; those parsers
237 return the information of the parsed result, e.g. register number, on
238 success. */
239 #define PARSE_FAIL -1
240
241 /* This is an invalid condition code that means no conditional field is
242 present. */
243 #define COND_ALWAYS 0x10
244
245 typedef struct
246 {
247 const char *template;
248 unsigned long value;
249 } asm_barrier_opt;
250
251 typedef struct
252 {
253 const char *template;
254 uint32_t value;
255 } asm_nzcv;
256
257 struct reloc_entry
258 {
259 char *name;
260 bfd_reloc_code_real_type reloc;
261 };
262
263 /* Structure for a hash table entry for a register. */
264 typedef struct
265 {
266 const char *name;
267 unsigned char number;
268 unsigned char type;
269 unsigned char builtin;
270 } reg_entry;
271
272 /* Macros to define the register types and masks for the purpose
273 of parsing. */
274
275 #undef AARCH64_REG_TYPES
276 #define AARCH64_REG_TYPES \
277 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
278 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
279 BASIC_REG_TYPE(SP_32) /* wsp */ \
280 BASIC_REG_TYPE(SP_64) /* sp */ \
281 BASIC_REG_TYPE(Z_32) /* wzr */ \
282 BASIC_REG_TYPE(Z_64) /* xzr */ \
283 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
284 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
285 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
286 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
287 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
288 BASIC_REG_TYPE(CN) /* c[0-7] */ \
289 BASIC_REG_TYPE(VN) /* v[0-31] */ \
290 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
291 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
292 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
293 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
294 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
295 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
296 /* Typecheck: any [BHSDQ]P FP. */ \
297 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
298 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
299 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
300 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
301 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
302 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
303 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
304 /* Any integer register; used for error messages only. */ \
305 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
306 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
307 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
308 /* Pseudo type to mark the end of the enumerator sequence. */ \
309 BASIC_REG_TYPE(MAX)
310
311 #undef BASIC_REG_TYPE
312 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
313 #undef MULTI_REG_TYPE
314 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
315
316 /* Register type enumerators. */
317 typedef enum
318 {
319 /* A list of REG_TYPE_*. */
320 AARCH64_REG_TYPES
321 } aarch64_reg_type;
322
323 #undef BASIC_REG_TYPE
324 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
325 #undef REG_TYPE
326 #define REG_TYPE(T) (1 << REG_TYPE_##T)
327 #undef MULTI_REG_TYPE
328 #define MULTI_REG_TYPE(T,V) V,
329
330 /* Values indexed by aarch64_reg_type to assist the type checking. */
331 static const unsigned reg_type_masks[] =
332 {
333 AARCH64_REG_TYPES
334 };
335
336 #undef BASIC_REG_TYPE
337 #undef REG_TYPE
338 #undef MULTI_REG_TYPE
339 #undef AARCH64_REG_TYPES
340
341 /* Diagnostics used when we don't get a register of the expected type.
342 Note: this has to synchronized with aarch64_reg_type definitions
343 above. */
344 static const char *
345 get_reg_expected_msg (aarch64_reg_type reg_type)
346 {
347 const char *msg;
348
349 switch (reg_type)
350 {
351 case REG_TYPE_R_32:
352 msg = N_("integer 32-bit register expected");
353 break;
354 case REG_TYPE_R_64:
355 msg = N_("integer 64-bit register expected");
356 break;
357 case REG_TYPE_R_N:
358 msg = N_("integer register expected");
359 break;
360 case REG_TYPE_R_Z_SP:
361 msg = N_("integer, zero or SP register expected");
362 break;
363 case REG_TYPE_FP_B:
364 msg = N_("8-bit SIMD scalar register expected");
365 break;
366 case REG_TYPE_FP_H:
367 msg = N_("16-bit SIMD scalar or floating-point half precision "
368 "register expected");
369 break;
370 case REG_TYPE_FP_S:
371 msg = N_("32-bit SIMD scalar or floating-point single precision "
372 "register expected");
373 break;
374 case REG_TYPE_FP_D:
375 msg = N_("64-bit SIMD scalar or floating-point double precision "
376 "register expected");
377 break;
378 case REG_TYPE_FP_Q:
379 msg = N_("128-bit SIMD scalar or floating-point quad precision "
380 "register expected");
381 break;
382 case REG_TYPE_CN:
383 msg = N_("C0 - C15 expected");
384 break;
385 case REG_TYPE_R_Z_BHSDQ_V:
386 msg = N_("register expected");
387 break;
388 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
389 msg = N_("SIMD scalar or floating-point register expected");
390 break;
391 case REG_TYPE_VN: /* any V reg */
392 msg = N_("vector register expected");
393 break;
394 default:
395 as_fatal (_("invalid register type %d"), reg_type);
396 }
397 return msg;
398 }
399
400 /* Some well known registers that we refer to directly elsewhere. */
401 #define REG_SP 31
402
403 /* Instructions take 4 bytes in the object file. */
404 #define INSN_SIZE 4
405
406 /* Define some common error messages. */
407 #define BAD_SP _("SP not allowed here")
408
409 static struct hash_control *aarch64_ops_hsh;
410 static struct hash_control *aarch64_cond_hsh;
411 static struct hash_control *aarch64_shift_hsh;
412 static struct hash_control *aarch64_sys_regs_hsh;
413 static struct hash_control *aarch64_pstatefield_hsh;
414 static struct hash_control *aarch64_sys_regs_ic_hsh;
415 static struct hash_control *aarch64_sys_regs_dc_hsh;
416 static struct hash_control *aarch64_sys_regs_at_hsh;
417 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
418 static struct hash_control *aarch64_reg_hsh;
419 static struct hash_control *aarch64_barrier_opt_hsh;
420 static struct hash_control *aarch64_nzcv_hsh;
421 static struct hash_control *aarch64_pldop_hsh;
422
423 /* Stuff needed to resolve the label ambiguity
424 As:
425 ...
426 label: <insn>
427 may differ from:
428 ...
429 label:
430 <insn> */
431
432 static symbolS *last_label_seen;
433
434 /* Literal pool structure. Held on a per-section
435 and per-sub-section basis. */
436
437 #define MAX_LITERAL_POOL_SIZE 1024
438 typedef struct literal_pool
439 {
440 expressionS literals[MAX_LITERAL_POOL_SIZE];
441 unsigned int next_free_entry;
442 unsigned int id;
443 symbolS *symbol;
444 segT section;
445 subsegT sub_section;
446 int size;
447 struct literal_pool *next;
448 } literal_pool;
449
450 /* Pointer to a linked list of literal pools. */
451 static literal_pool *list_of_pools = NULL;
452 \f
453 /* Pure syntax. */
454
455 /* This array holds the chars that always start a comment. If the
456 pre-processor is disabled, these aren't very useful. */
457 const char comment_chars[] = "";
458
459 /* This array holds the chars that only start a comment at the beginning of
460 a line. If the line seems to have the form '# 123 filename'
461 .line and .file directives will appear in the pre-processed output. */
462 /* Note that input_file.c hand checks for '#' at the beginning of the
463 first line of the input file. This is because the compiler outputs
464 #NO_APP at the beginning of its output. */
465 /* Also note that comments like this one will always work. */
466 const char line_comment_chars[] = "#";
467
468 const char line_separator_chars[] = ";";
469
470 /* Chars that can be used to separate mant
471 from exp in floating point numbers. */
472 const char EXP_CHARS[] = "eE";
473
474 /* Chars that mean this number is a floating point constant. */
475 /* As in 0f12.456 */
476 /* or 0d1.2345e12 */
477
478 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
479
480 /* Prefix character that indicates the start of an immediate value. */
481 #define is_immediate_prefix(C) ((C) == '#')
482
483 /* Separator character handling. */
484
485 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
486
487 static inline bfd_boolean
488 skip_past_char (char **str, char c)
489 {
490 if (**str == c)
491 {
492 (*str)++;
493 return TRUE;
494 }
495 else
496 return FALSE;
497 }
498
499 #define skip_past_comma(str) skip_past_char (str, ',')
500
501 /* Arithmetic expressions (possibly involving symbols). */
502
503 static bfd_boolean in_my_get_expression_p = FALSE;
504
505 /* Third argument to my_get_expression. */
506 #define GE_NO_PREFIX 0
507 #define GE_OPT_PREFIX 1
508
509 /* Return TRUE if the string pointed by *STR is successfully parsed
510 as an valid expression; *EP will be filled with the information of
511 such an expression. Otherwise return FALSE. */
512
513 static bfd_boolean
514 my_get_expression (expressionS * ep, char **str, int prefix_mode,
515 int reject_absent)
516 {
517 char *save_in;
518 segT seg;
519 int prefix_present_p = 0;
520
521 switch (prefix_mode)
522 {
523 case GE_NO_PREFIX:
524 break;
525 case GE_OPT_PREFIX:
526 if (is_immediate_prefix (**str))
527 {
528 (*str)++;
529 prefix_present_p = 1;
530 }
531 break;
532 default:
533 abort ();
534 }
535
536 memset (ep, 0, sizeof (expressionS));
537
538 save_in = input_line_pointer;
539 input_line_pointer = *str;
540 in_my_get_expression_p = TRUE;
541 seg = expression (ep);
542 in_my_get_expression_p = FALSE;
543
544 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
545 {
546 /* We found a bad expression in md_operand(). */
547 *str = input_line_pointer;
548 input_line_pointer = save_in;
549 if (prefix_present_p && ! error_p ())
550 set_fatal_syntax_error (_("bad expression"));
551 else
552 set_first_syntax_error (_("bad expression"));
553 return FALSE;
554 }
555
556 #ifdef OBJ_AOUT
557 if (seg != absolute_section
558 && seg != text_section
559 && seg != data_section
560 && seg != bss_section && seg != undefined_section)
561 {
562 set_syntax_error (_("bad segment"));
563 *str = input_line_pointer;
564 input_line_pointer = save_in;
565 return FALSE;
566 }
567 #else
568 (void) seg;
569 #endif
570
571 *str = input_line_pointer;
572 input_line_pointer = save_in;
573 return TRUE;
574 }
575
576 /* Turn a string in input_line_pointer into a floating point constant
577 of type TYPE, and store the appropriate bytes in *LITP. The number
578 of LITTLENUMS emitted is stored in *SIZEP. An error message is
579 returned, or NULL on OK. */
580
581 char *
582 md_atof (int type, char *litP, int *sizeP)
583 {
584 return ieee_md_atof (type, litP, sizeP, target_big_endian);
585 }
586
587 /* We handle all bad expressions here, so that we can report the faulty
588 instruction in the error message. */
589 void
590 md_operand (expressionS * exp)
591 {
592 if (in_my_get_expression_p)
593 exp->X_op = O_illegal;
594 }
595
596 /* Immediate values. */
597
598 /* Errors may be set multiple times during parsing or bit encoding
599 (particularly in the Neon bits), but usually the earliest error which is set
600 will be the most meaningful. Avoid overwriting it with later (cascading)
601 errors by calling this function. */
602
603 static void
604 first_error (const char *error)
605 {
606 if (! error_p ())
607 set_syntax_error (error);
608 }
609
610 /* Similiar to first_error, but this function accepts formatted error
611 message. */
612 static void
613 first_error_fmt (const char *format, ...)
614 {
615 va_list args;
616 enum
617 { size = 100 };
618 /* N.B. this single buffer will not cause error messages for different
619 instructions to pollute each other; this is because at the end of
620 processing of each assembly line, error message if any will be
621 collected by as_bad. */
622 static char buffer[size];
623
624 if (! error_p ())
625 {
626 int ret ATTRIBUTE_UNUSED;
627 va_start (args, format);
628 ret = vsnprintf (buffer, size, format, args);
629 know (ret <= size - 1 && ret >= 0);
630 va_end (args);
631 set_syntax_error (buffer);
632 }
633 }
634
635 /* Register parsing. */
636
637 /* Generic register parser which is called by other specialized
638 register parsers.
639 CCP points to what should be the beginning of a register name.
640 If it is indeed a valid register name, advance CCP over it and
641 return the reg_entry structure; otherwise return NULL.
642 It does not issue diagnostics. */
643
644 static reg_entry *
645 parse_reg (char **ccp)
646 {
647 char *start = *ccp;
648 char *p;
649 reg_entry *reg;
650
651 #ifdef REGISTER_PREFIX
652 if (*start != REGISTER_PREFIX)
653 return NULL;
654 start++;
655 #endif
656
657 p = start;
658 if (!ISALPHA (*p) || !is_name_beginner (*p))
659 return NULL;
660
661 do
662 p++;
663 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
664
665 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
666
667 if (!reg)
668 return NULL;
669
670 *ccp = p;
671 return reg;
672 }
673
674 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
675 return FALSE. */
676 static bfd_boolean
677 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
678 {
679 if (reg->type == type)
680 return TRUE;
681
682 switch (type)
683 {
684 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
685 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
686 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
687 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
688 case REG_TYPE_VN: /* Vector register. */
689 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
690 return ((reg_type_masks[reg->type] & reg_type_masks[type])
691 == reg_type_masks[reg->type]);
692 default:
693 as_fatal ("unhandled type %d", type);
694 abort ();
695 }
696 }
697
698 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
699 Return the register number otherwise. *ISREG32 is set to one if the
700 register is 32-bit wide; *ISREGZERO is set to one if the register is
701 of type Z_32 or Z_64.
702 Note that this function does not issue any diagnostics. */
703
704 static int
705 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
706 int *isreg32, int *isregzero)
707 {
708 char *str = *ccp;
709 const reg_entry *reg = parse_reg (&str);
710
711 if (reg == NULL)
712 return PARSE_FAIL;
713
714 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
715 return PARSE_FAIL;
716
717 switch (reg->type)
718 {
719 case REG_TYPE_SP_32:
720 case REG_TYPE_SP_64:
721 if (reject_sp)
722 return PARSE_FAIL;
723 *isreg32 = reg->type == REG_TYPE_SP_32;
724 *isregzero = 0;
725 break;
726 case REG_TYPE_R_32:
727 case REG_TYPE_R_64:
728 *isreg32 = reg->type == REG_TYPE_R_32;
729 *isregzero = 0;
730 break;
731 case REG_TYPE_Z_32:
732 case REG_TYPE_Z_64:
733 if (reject_rz)
734 return PARSE_FAIL;
735 *isreg32 = reg->type == REG_TYPE_Z_32;
736 *isregzero = 1;
737 break;
738 default:
739 return PARSE_FAIL;
740 }
741
742 *ccp = str;
743
744 return reg->number;
745 }
746
747 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
748 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
749 otherwise return FALSE.
750
751 Accept only one occurrence of:
752 8b 16b 4h 8h 2s 4s 1d 2d
753 b h s d q */
754 static bfd_boolean
755 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
756 {
757 char *ptr = *str;
758 unsigned width;
759 unsigned element_size;
760 enum neon_el_type type;
761
762 /* skip '.' */
763 ptr++;
764
765 if (!ISDIGIT (*ptr))
766 {
767 width = 0;
768 goto elt_size;
769 }
770 width = strtoul (ptr, &ptr, 10);
771 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
772 {
773 first_error_fmt (_("bad size %d in vector width specifier"), width);
774 return FALSE;
775 }
776
777 elt_size:
778 switch (TOLOWER (*ptr))
779 {
780 case 'b':
781 type = NT_b;
782 element_size = 8;
783 break;
784 case 'h':
785 type = NT_h;
786 element_size = 16;
787 break;
788 case 's':
789 type = NT_s;
790 element_size = 32;
791 break;
792 case 'd':
793 type = NT_d;
794 element_size = 64;
795 break;
796 case 'q':
797 if (width == 1)
798 {
799 type = NT_q;
800 element_size = 128;
801 break;
802 }
803 /* fall through. */
804 default:
805 if (*ptr != '\0')
806 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
807 else
808 first_error (_("missing element size"));
809 return FALSE;
810 }
811 if (width != 0 && width * element_size != 64 && width * element_size != 128)
812 {
813 first_error_fmt (_
814 ("invalid element size %d and vector size combination %c"),
815 width, *ptr);
816 return FALSE;
817 }
818 ptr++;
819
820 parsed_type->type = type;
821 parsed_type->width = width;
822
823 *str = ptr;
824
825 return TRUE;
826 }
827
828 /* Parse a single type, e.g. ".8b", leading period included.
829 Only applicable to Vn registers.
830
831 Return TRUE on success; otherwise return FALSE. */
832 static bfd_boolean
833 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
834 {
835 char *str = *ccp;
836
837 if (*str == '.')
838 {
839 if (! parse_neon_type_for_operand (vectype, &str))
840 {
841 first_error (_("vector type expected"));
842 return FALSE;
843 }
844 }
845 else
846 return FALSE;
847
848 *ccp = str;
849
850 return TRUE;
851 }
852
853 /* Parse a register of the type TYPE.
854
855 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
856 name or the parsed register is not of TYPE.
857
858 Otherwise return the register number, and optionally fill in the actual
859 type of the register in *RTYPE when multiple alternatives were given, and
860 return the register shape and element index information in *TYPEINFO.
861
862 IN_REG_LIST should be set with TRUE if the caller is parsing a register
863 list. */
864
865 static int
866 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
867 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
868 {
869 char *str = *ccp;
870 const reg_entry *reg = parse_reg (&str);
871 struct neon_type_el atype;
872 struct neon_type_el parsetype;
873 bfd_boolean is_typed_vecreg = FALSE;
874
875 atype.defined = 0;
876 atype.type = NT_invtype;
877 atype.width = -1;
878 atype.index = 0;
879
880 if (reg == NULL)
881 {
882 if (typeinfo)
883 *typeinfo = atype;
884 set_default_error ();
885 return PARSE_FAIL;
886 }
887
888 if (! aarch64_check_reg_type (reg, type))
889 {
890 DEBUG_TRACE ("reg type check failed");
891 set_default_error ();
892 return PARSE_FAIL;
893 }
894 type = reg->type;
895
896 if (type == REG_TYPE_VN
897 && parse_neon_operand_type (&parsetype, &str))
898 {
899 /* Register if of the form Vn.[bhsdq]. */
900 is_typed_vecreg = TRUE;
901
902 if (parsetype.width == 0)
903 /* Expect index. In the new scheme we cannot have
904 Vn.[bhsdq] represent a scalar. Therefore any
905 Vn.[bhsdq] should have an index following it.
906 Except in reglists ofcourse. */
907 atype.defined |= NTA_HASINDEX;
908 else
909 atype.defined |= NTA_HASTYPE;
910
911 atype.type = parsetype.type;
912 atype.width = parsetype.width;
913 }
914
915 if (skip_past_char (&str, '['))
916 {
917 expressionS exp;
918
919 /* Reject Sn[index] syntax. */
920 if (!is_typed_vecreg)
921 {
922 first_error (_("this type of register can't be indexed"));
923 return PARSE_FAIL;
924 }
925
926 if (in_reg_list == TRUE)
927 {
928 first_error (_("index not allowed inside register list"));
929 return PARSE_FAIL;
930 }
931
932 atype.defined |= NTA_HASINDEX;
933
934 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
935
936 if (exp.X_op != O_constant)
937 {
938 first_error (_("constant expression required"));
939 return PARSE_FAIL;
940 }
941
942 if (! skip_past_char (&str, ']'))
943 return PARSE_FAIL;
944
945 atype.index = exp.X_add_number;
946 }
947 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
948 {
949 /* Indexed vector register expected. */
950 first_error (_("indexed vector register expected"));
951 return PARSE_FAIL;
952 }
953
954 /* A vector reg Vn should be typed or indexed. */
955 if (type == REG_TYPE_VN && atype.defined == 0)
956 {
957 first_error (_("invalid use of vector register"));
958 }
959
960 if (typeinfo)
961 *typeinfo = atype;
962
963 if (rtype)
964 *rtype = type;
965
966 *ccp = str;
967
968 return reg->number;
969 }
970
971 /* Parse register.
972
973 Return the register number on success; return PARSE_FAIL otherwise.
974
975 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
976 the register (e.g. NEON double or quad reg when either has been requested).
977
978 If this is a NEON vector register with additional type information, fill
979 in the struct pointed to by VECTYPE (if non-NULL).
980
981 This parser does not handle register list. */
982
983 static int
984 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
985 aarch64_reg_type *rtype, struct neon_type_el *vectype)
986 {
987 struct neon_type_el atype;
988 char *str = *ccp;
989 int reg = parse_typed_reg (&str, type, rtype, &atype,
990 /*in_reg_list= */ FALSE);
991
992 if (reg == PARSE_FAIL)
993 return PARSE_FAIL;
994
995 if (vectype)
996 *vectype = atype;
997
998 *ccp = str;
999
1000 return reg;
1001 }
1002
1003 static inline bfd_boolean
1004 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1005 {
1006 return
1007 e1.type == e2.type
1008 && e1.defined == e2.defined
1009 && e1.width == e2.width && e1.index == e2.index;
1010 }
1011
1012 /* This function parses the NEON register list. On success, it returns
1013 the parsed register list information in the following encoded format:
1014
1015 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1016 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1017
1018 The information of the register shape and/or index is returned in
1019 *VECTYPE.
1020
1021 It returns PARSE_FAIL if the register list is invalid.
1022
1023 The list contains one to four registers.
1024 Each register can be one of:
1025 <Vt>.<T>[<index>]
1026 <Vt>.<T>
1027 All <T> should be identical.
1028 All <index> should be identical.
1029 There are restrictions on <Vt> numbers which are checked later
1030 (by reg_list_valid_p). */
1031
1032 static int
1033 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1034 {
1035 char *str = *ccp;
1036 int nb_regs;
1037 struct neon_type_el typeinfo, typeinfo_first;
1038 int val, val_range;
1039 int in_range;
1040 int ret_val;
1041 int i;
1042 bfd_boolean error = FALSE;
1043 bfd_boolean expect_index = FALSE;
1044
1045 if (*str != '{')
1046 {
1047 set_syntax_error (_("expecting {"));
1048 return PARSE_FAIL;
1049 }
1050 str++;
1051
1052 nb_regs = 0;
1053 typeinfo_first.defined = 0;
1054 typeinfo_first.type = NT_invtype;
1055 typeinfo_first.width = -1;
1056 typeinfo_first.index = 0;
1057 ret_val = 0;
1058 val = -1;
1059 val_range = -1;
1060 in_range = 0;
1061 do
1062 {
1063 if (in_range)
1064 {
1065 str++; /* skip over '-' */
1066 val_range = val;
1067 }
1068 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1069 /*in_reg_list= */ TRUE);
1070 if (val == PARSE_FAIL)
1071 {
1072 set_first_syntax_error (_("invalid vector register in list"));
1073 error = TRUE;
1074 continue;
1075 }
1076 /* reject [bhsd]n */
1077 if (typeinfo.defined == 0)
1078 {
1079 set_first_syntax_error (_("invalid scalar register in list"));
1080 error = TRUE;
1081 continue;
1082 }
1083
1084 if (typeinfo.defined & NTA_HASINDEX)
1085 expect_index = TRUE;
1086
1087 if (in_range)
1088 {
1089 if (val < val_range)
1090 {
1091 set_first_syntax_error
1092 (_("invalid range in vector register list"));
1093 error = TRUE;
1094 }
1095 val_range++;
1096 }
1097 else
1098 {
1099 val_range = val;
1100 if (nb_regs == 0)
1101 typeinfo_first = typeinfo;
1102 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1103 {
1104 set_first_syntax_error
1105 (_("type mismatch in vector register list"));
1106 error = TRUE;
1107 }
1108 }
1109 if (! error)
1110 for (i = val_range; i <= val; i++)
1111 {
1112 ret_val |= i << (5 * nb_regs);
1113 nb_regs++;
1114 }
1115 in_range = 0;
1116 }
1117 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1118
1119 skip_whitespace (str);
1120 if (*str != '}')
1121 {
1122 set_first_syntax_error (_("end of vector register list not found"));
1123 error = TRUE;
1124 }
1125 str++;
1126
1127 skip_whitespace (str);
1128
1129 if (expect_index)
1130 {
1131 if (skip_past_char (&str, '['))
1132 {
1133 expressionS exp;
1134
1135 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1136 if (exp.X_op != O_constant)
1137 {
1138 set_first_syntax_error (_("constant expression required."));
1139 error = TRUE;
1140 }
1141 if (! skip_past_char (&str, ']'))
1142 error = TRUE;
1143 else
1144 typeinfo_first.index = exp.X_add_number;
1145 }
1146 else
1147 {
1148 set_first_syntax_error (_("expected index"));
1149 error = TRUE;
1150 }
1151 }
1152
1153 if (nb_regs > 4)
1154 {
1155 set_first_syntax_error (_("too many registers in vector register list"));
1156 error = TRUE;
1157 }
1158 else if (nb_regs == 0)
1159 {
1160 set_first_syntax_error (_("empty vector register list"));
1161 error = TRUE;
1162 }
1163
1164 *ccp = str;
1165 if (! error)
1166 *vectype = typeinfo_first;
1167
1168 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1169 }
1170
1171 /* Directives: register aliases. */
1172
1173 static reg_entry *
1174 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1175 {
1176 reg_entry *new;
1177 const char *name;
1178
1179 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1180 {
1181 if (new->builtin)
1182 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1183 str);
1184
1185 /* Only warn about a redefinition if it's not defined as the
1186 same register. */
1187 else if (new->number != number || new->type != type)
1188 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1189
1190 return NULL;
1191 }
1192
1193 name = xstrdup (str);
1194 new = xmalloc (sizeof (reg_entry));
1195
1196 new->name = name;
1197 new->number = number;
1198 new->type = type;
1199 new->builtin = FALSE;
1200
1201 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1202 abort ();
1203
1204 return new;
1205 }
1206
1207 /* Look for the .req directive. This is of the form:
1208
1209 new_register_name .req existing_register_name
1210
1211 If we find one, or if it looks sufficiently like one that we want to
1212 handle any error here, return TRUE. Otherwise return FALSE. */
1213
1214 static bfd_boolean
1215 create_register_alias (char *newname, char *p)
1216 {
1217 const reg_entry *old;
1218 char *oldname, *nbuf;
1219 size_t nlen;
1220
1221 /* The input scrubber ensures that whitespace after the mnemonic is
1222 collapsed to single spaces. */
1223 oldname = p;
1224 if (strncmp (oldname, " .req ", 6) != 0)
1225 return FALSE;
1226
1227 oldname += 6;
1228 if (*oldname == '\0')
1229 return FALSE;
1230
1231 old = hash_find (aarch64_reg_hsh, oldname);
1232 if (!old)
1233 {
1234 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1235 return TRUE;
1236 }
1237
1238 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1239 the desired alias name, and p points to its end. If not, then
1240 the desired alias name is in the global original_case_string. */
1241 #ifdef TC_CASE_SENSITIVE
1242 nlen = p - newname;
1243 #else
1244 newname = original_case_string;
1245 nlen = strlen (newname);
1246 #endif
1247
1248 nbuf = alloca (nlen + 1);
1249 memcpy (nbuf, newname, nlen);
1250 nbuf[nlen] = '\0';
1251
1252 /* Create aliases under the new name as stated; an all-lowercase
1253 version of the new name; and an all-uppercase version of the new
1254 name. */
1255 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1256 {
1257 for (p = nbuf; *p; p++)
1258 *p = TOUPPER (*p);
1259
1260 if (strncmp (nbuf, newname, nlen))
1261 {
1262 /* If this attempt to create an additional alias fails, do not bother
1263 trying to create the all-lower case alias. We will fail and issue
1264 a second, duplicate error message. This situation arises when the
1265 programmer does something like:
1266 foo .req r0
1267 Foo .req r1
1268 The second .req creates the "Foo" alias but then fails to create
1269 the artificial FOO alias because it has already been created by the
1270 first .req. */
1271 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1272 return TRUE;
1273 }
1274
1275 for (p = nbuf; *p; p++)
1276 *p = TOLOWER (*p);
1277
1278 if (strncmp (nbuf, newname, nlen))
1279 insert_reg_alias (nbuf, old->number, old->type);
1280 }
1281
1282 return TRUE;
1283 }
1284
1285 /* Should never be called, as .req goes between the alias and the
1286 register name, not at the beginning of the line. */
1287 static void
1288 s_req (int a ATTRIBUTE_UNUSED)
1289 {
1290 as_bad (_("invalid syntax for .req directive"));
1291 }
1292
1293 /* The .unreq directive deletes an alias which was previously defined
1294 by .req. For example:
1295
1296 my_alias .req r11
1297 .unreq my_alias */
1298
1299 static void
1300 s_unreq (int a ATTRIBUTE_UNUSED)
1301 {
1302 char *name;
1303 char saved_char;
1304
1305 name = input_line_pointer;
1306
1307 while (*input_line_pointer != 0
1308 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1309 ++input_line_pointer;
1310
1311 saved_char = *input_line_pointer;
1312 *input_line_pointer = 0;
1313
1314 if (!*name)
1315 as_bad (_("invalid syntax for .unreq directive"));
1316 else
1317 {
1318 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1319
1320 if (!reg)
1321 as_bad (_("unknown register alias '%s'"), name);
1322 else if (reg->builtin)
1323 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1324 name);
1325 else
1326 {
1327 char *p;
1328 char *nbuf;
1329
1330 hash_delete (aarch64_reg_hsh, name, FALSE);
1331 free ((char *) reg->name);
1332 free (reg);
1333
1334 /* Also locate the all upper case and all lower case versions.
1335 Do not complain if we cannot find one or the other as it
1336 was probably deleted above. */
1337
1338 nbuf = strdup (name);
1339 for (p = nbuf; *p; p++)
1340 *p = TOUPPER (*p);
1341 reg = hash_find (aarch64_reg_hsh, nbuf);
1342 if (reg)
1343 {
1344 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1345 free ((char *) reg->name);
1346 free (reg);
1347 }
1348
1349 for (p = nbuf; *p; p++)
1350 *p = TOLOWER (*p);
1351 reg = hash_find (aarch64_reg_hsh, nbuf);
1352 if (reg)
1353 {
1354 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1355 free ((char *) reg->name);
1356 free (reg);
1357 }
1358
1359 free (nbuf);
1360 }
1361 }
1362
1363 *input_line_pointer = saved_char;
1364 demand_empty_rest_of_line ();
1365 }
1366
1367 /* Directives: Instruction set selection. */
1368
1369 #ifdef OBJ_ELF
1370 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1371 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1372 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1373 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1374
1375 /* Create a new mapping symbol for the transition to STATE. */
1376
1377 static void
1378 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1379 {
1380 symbolS *symbolP;
1381 const char *symname;
1382 int type;
1383
1384 switch (state)
1385 {
1386 case MAP_DATA:
1387 symname = "$d";
1388 type = BSF_NO_FLAGS;
1389 break;
1390 case MAP_INSN:
1391 symname = "$x";
1392 type = BSF_NO_FLAGS;
1393 break;
1394 default:
1395 abort ();
1396 }
1397
1398 symbolP = symbol_new (symname, now_seg, value, frag);
1399 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1400
1401 /* Save the mapping symbols for future reference. Also check that
1402 we do not place two mapping symbols at the same offset within a
1403 frag. We'll handle overlap between frags in
1404 check_mapping_symbols.
1405
1406 If .fill or other data filling directive generates zero sized data,
1407 the mapping symbol for the following code will have the same value
1408 as the one generated for the data filling directive. In this case,
1409 we replace the old symbol with the new one at the same address. */
1410 if (value == 0)
1411 {
1412 if (frag->tc_frag_data.first_map != NULL)
1413 {
1414 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1415 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1416 &symbol_lastP);
1417 }
1418 frag->tc_frag_data.first_map = symbolP;
1419 }
1420 if (frag->tc_frag_data.last_map != NULL)
1421 {
1422 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1423 S_GET_VALUE (symbolP));
1424 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1425 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1426 &symbol_lastP);
1427 }
1428 frag->tc_frag_data.last_map = symbolP;
1429 }
1430
1431 /* We must sometimes convert a region marked as code to data during
1432 code alignment, if an odd number of bytes have to be padded. The
1433 code mapping symbol is pushed to an aligned address. */
1434
1435 static void
1436 insert_data_mapping_symbol (enum mstate state,
1437 valueT value, fragS * frag, offsetT bytes)
1438 {
1439 /* If there was already a mapping symbol, remove it. */
1440 if (frag->tc_frag_data.last_map != NULL
1441 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1442 frag->fr_address + value)
1443 {
1444 symbolS *symp = frag->tc_frag_data.last_map;
1445
1446 if (value == 0)
1447 {
1448 know (frag->tc_frag_data.first_map == symp);
1449 frag->tc_frag_data.first_map = NULL;
1450 }
1451 frag->tc_frag_data.last_map = NULL;
1452 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1453 }
1454
1455 make_mapping_symbol (MAP_DATA, value, frag);
1456 make_mapping_symbol (state, value + bytes, frag);
1457 }
1458
1459 static void mapping_state_2 (enum mstate state, int max_chars);
1460
1461 /* Set the mapping state to STATE. Only call this when about to
1462 emit some STATE bytes to the file. */
1463
1464 void
1465 mapping_state (enum mstate state)
1466 {
1467 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1468
1469 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1470
1471 if (mapstate == state)
1472 /* The mapping symbol has already been emitted.
1473 There is nothing else to do. */
1474 return;
1475 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1476 /* This case will be evaluated later in the next else. */
1477 return;
1478 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1479 {
1480 /* Only add the symbol if the offset is > 0:
1481 if we're at the first frag, check it's size > 0;
1482 if we're not at the first frag, then for sure
1483 the offset is > 0. */
1484 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1485 const int add_symbol = (frag_now != frag_first)
1486 || (frag_now_fix () > 0);
1487
1488 if (add_symbol)
1489 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1490 }
1491
1492 mapping_state_2 (state, 0);
1493 #undef TRANSITION
1494 }
1495
1496 /* Same as mapping_state, but MAX_CHARS bytes have already been
1497 allocated. Put the mapping symbol that far back. */
1498
1499 static void
1500 mapping_state_2 (enum mstate state, int max_chars)
1501 {
1502 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1503
1504 if (!SEG_NORMAL (now_seg))
1505 return;
1506
1507 if (mapstate == state)
1508 /* The mapping symbol has already been emitted.
1509 There is nothing else to do. */
1510 return;
1511
1512 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1513 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1514 }
1515 #else
1516 #define mapping_state(x) /* nothing */
1517 #define mapping_state_2(x, y) /* nothing */
1518 #endif
1519
1520 /* Directives: sectioning and alignment. */
1521
1522 static void
1523 s_bss (int ignore ATTRIBUTE_UNUSED)
1524 {
1525 /* We don't support putting frags in the BSS segment, we fake it by
1526 marking in_bss, then looking at s_skip for clues. */
1527 subseg_set (bss_section, 0);
1528 demand_empty_rest_of_line ();
1529 mapping_state (MAP_DATA);
1530 }
1531
1532 static void
1533 s_even (int ignore ATTRIBUTE_UNUSED)
1534 {
1535 /* Never make frag if expect extra pass. */
1536 if (!need_pass_2)
1537 frag_align (1, 0, 0);
1538
1539 record_alignment (now_seg, 1);
1540
1541 demand_empty_rest_of_line ();
1542 }
1543
1544 /* Directives: Literal pools. */
1545
1546 static literal_pool *
1547 find_literal_pool (int size)
1548 {
1549 literal_pool *pool;
1550
1551 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1552 {
1553 if (pool->section == now_seg
1554 && pool->sub_section == now_subseg && pool->size == size)
1555 break;
1556 }
1557
1558 return pool;
1559 }
1560
1561 static literal_pool *
1562 find_or_make_literal_pool (int size)
1563 {
1564 /* Next literal pool ID number. */
1565 static unsigned int latest_pool_num = 1;
1566 literal_pool *pool;
1567
1568 pool = find_literal_pool (size);
1569
1570 if (pool == NULL)
1571 {
1572 /* Create a new pool. */
1573 pool = xmalloc (sizeof (*pool));
1574 if (!pool)
1575 return NULL;
1576
1577 /* Currently we always put the literal pool in the current text
1578 section. If we were generating "small" model code where we
1579 knew that all code and initialised data was within 1MB then
1580 we could output literals to mergeable, read-only data
1581 sections. */
1582
1583 pool->next_free_entry = 0;
1584 pool->section = now_seg;
1585 pool->sub_section = now_subseg;
1586 pool->size = size;
1587 pool->next = list_of_pools;
1588 pool->symbol = NULL;
1589
1590 /* Add it to the list. */
1591 list_of_pools = pool;
1592 }
1593
1594 /* New pools, and emptied pools, will have a NULL symbol. */
1595 if (pool->symbol == NULL)
1596 {
1597 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1598 (valueT) 0, &zero_address_frag);
1599 pool->id = latest_pool_num++;
1600 }
1601
1602 /* Done. */
1603 return pool;
1604 }
1605
1606 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1607 Return TRUE on success, otherwise return FALSE. */
1608 static bfd_boolean
1609 add_to_lit_pool (expressionS *exp, int size)
1610 {
1611 literal_pool *pool;
1612 unsigned int entry;
1613
1614 pool = find_or_make_literal_pool (size);
1615
1616 /* Check if this literal value is already in the pool. */
1617 for (entry = 0; entry < pool->next_free_entry; entry++)
1618 {
1619 if ((pool->literals[entry].X_op == exp->X_op)
1620 && (exp->X_op == O_constant)
1621 && (pool->literals[entry].X_add_number == exp->X_add_number)
1622 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1623 break;
1624
1625 if ((pool->literals[entry].X_op == exp->X_op)
1626 && (exp->X_op == O_symbol)
1627 && (pool->literals[entry].X_add_number == exp->X_add_number)
1628 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1629 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1630 break;
1631 }
1632
1633 /* Do we need to create a new entry? */
1634 if (entry == pool->next_free_entry)
1635 {
1636 if (entry >= MAX_LITERAL_POOL_SIZE)
1637 {
1638 set_syntax_error (_("literal pool overflow"));
1639 return FALSE;
1640 }
1641
1642 pool->literals[entry] = *exp;
1643 pool->next_free_entry += 1;
1644 }
1645
1646 exp->X_op = O_symbol;
1647 exp->X_add_number = ((int) entry) * size;
1648 exp->X_add_symbol = pool->symbol;
1649
1650 return TRUE;
1651 }
1652
1653 /* Can't use symbol_new here, so have to create a symbol and then at
1654 a later date assign it a value. Thats what these functions do. */
1655
1656 static void
1657 symbol_locate (symbolS * symbolP,
1658 const char *name,/* It is copied, the caller can modify. */
1659 segT segment, /* Segment identifier (SEG_<something>). */
1660 valueT valu, /* Symbol value. */
1661 fragS * frag) /* Associated fragment. */
1662 {
1663 unsigned int name_length;
1664 char *preserved_copy_of_name;
1665
1666 name_length = strlen (name) + 1; /* +1 for \0. */
1667 obstack_grow (&notes, name, name_length);
1668 preserved_copy_of_name = obstack_finish (&notes);
1669
1670 #ifdef tc_canonicalize_symbol_name
1671 preserved_copy_of_name =
1672 tc_canonicalize_symbol_name (preserved_copy_of_name);
1673 #endif
1674
1675 S_SET_NAME (symbolP, preserved_copy_of_name);
1676
1677 S_SET_SEGMENT (symbolP, segment);
1678 S_SET_VALUE (symbolP, valu);
1679 symbol_clear_list_pointers (symbolP);
1680
1681 symbol_set_frag (symbolP, frag);
1682
1683 /* Link to end of symbol chain. */
1684 {
1685 extern int symbol_table_frozen;
1686
1687 if (symbol_table_frozen)
1688 abort ();
1689 }
1690
1691 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1692
1693 obj_symbol_new_hook (symbolP);
1694
1695 #ifdef tc_symbol_new_hook
1696 tc_symbol_new_hook (symbolP);
1697 #endif
1698
1699 #ifdef DEBUG_SYMS
1700 verify_symbol_chain (symbol_rootP, symbol_lastP);
1701 #endif /* DEBUG_SYMS */
1702 }
1703
1704
1705 static void
1706 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1707 {
1708 unsigned int entry;
1709 literal_pool *pool;
1710 char sym_name[20];
1711 int align;
1712
1713 for (align = 2; align <= 4; align++)
1714 {
1715 int size = 1 << align;
1716
1717 pool = find_literal_pool (size);
1718 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1719 continue;
1720
1721 mapping_state (MAP_DATA);
1722
1723 /* Align pool as you have word accesses.
1724 Only make a frag if we have to. */
1725 if (!need_pass_2)
1726 frag_align (align, 0, 0);
1727
1728 record_alignment (now_seg, align);
1729
1730 sprintf (sym_name, "$$lit_\002%x", pool->id);
1731
1732 symbol_locate (pool->symbol, sym_name, now_seg,
1733 (valueT) frag_now_fix (), frag_now);
1734 symbol_table_insert (pool->symbol);
1735
1736 for (entry = 0; entry < pool->next_free_entry; entry++)
1737 /* First output the expression in the instruction to the pool. */
1738 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1739
1740 /* Mark the pool as empty. */
1741 pool->next_free_entry = 0;
1742 pool->symbol = NULL;
1743 }
1744 }
1745
1746 #ifdef OBJ_ELF
1747 /* Forward declarations for functions below, in the MD interface
1748 section. */
1749 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1750 static struct reloc_table_entry * find_reloc_table_entry (char **);
1751
1752 /* Directives: Data. */
1753 /* N.B. the support for relocation suffix in this directive needs to be
1754 implemented properly. */
1755
1756 static void
1757 s_aarch64_elf_cons (int nbytes)
1758 {
1759 expressionS exp;
1760
1761 #ifdef md_flush_pending_output
1762 md_flush_pending_output ();
1763 #endif
1764
1765 if (is_it_end_of_statement ())
1766 {
1767 demand_empty_rest_of_line ();
1768 return;
1769 }
1770
1771 #ifdef md_cons_align
1772 md_cons_align (nbytes);
1773 #endif
1774
1775 mapping_state (MAP_DATA);
1776 do
1777 {
1778 struct reloc_table_entry *reloc;
1779
1780 expression (&exp);
1781
1782 if (exp.X_op != O_symbol)
1783 emit_expr (&exp, (unsigned int) nbytes);
1784 else
1785 {
1786 skip_past_char (&input_line_pointer, '#');
1787 if (skip_past_char (&input_line_pointer, ':'))
1788 {
1789 reloc = find_reloc_table_entry (&input_line_pointer);
1790 if (reloc == NULL)
1791 as_bad (_("unrecognized relocation suffix"));
1792 else
1793 as_bad (_("unimplemented relocation suffix"));
1794 ignore_rest_of_line ();
1795 return;
1796 }
1797 else
1798 emit_expr (&exp, (unsigned int) nbytes);
1799 }
1800 }
1801 while (*input_line_pointer++ == ',');
1802
1803 /* Put terminator back into stream. */
1804 input_line_pointer--;
1805 demand_empty_rest_of_line ();
1806 }
1807
1808 #endif /* OBJ_ELF */
1809
1810 /* Output a 32-bit word, but mark as an instruction. */
1811
1812 static void
1813 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1814 {
1815 expressionS exp;
1816
1817 #ifdef md_flush_pending_output
1818 md_flush_pending_output ();
1819 #endif
1820
1821 if (is_it_end_of_statement ())
1822 {
1823 demand_empty_rest_of_line ();
1824 return;
1825 }
1826
1827 if (!need_pass_2)
1828 frag_align_code (2, 0);
1829 #ifdef OBJ_ELF
1830 mapping_state (MAP_INSN);
1831 #endif
1832
1833 do
1834 {
1835 expression (&exp);
1836 if (exp.X_op != O_constant)
1837 {
1838 as_bad (_("constant expression required"));
1839 ignore_rest_of_line ();
1840 return;
1841 }
1842
1843 if (target_big_endian)
1844 {
1845 unsigned int val = exp.X_add_number;
1846 exp.X_add_number = SWAP_32 (val);
1847 }
1848 emit_expr (&exp, 4);
1849 }
1850 while (*input_line_pointer++ == ',');
1851
1852 /* Put terminator back into stream. */
1853 input_line_pointer--;
1854 demand_empty_rest_of_line ();
1855 }
1856
1857 #ifdef OBJ_ELF
1858 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1859
1860 static void
1861 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1862 {
1863 expressionS exp;
1864
1865 /* Since we're just labelling the code, there's no need to define a
1866 mapping symbol. */
1867 expression (&exp);
1868 /* Make sure there is enough room in this frag for the following
1869 blr. This trick only works if the blr follows immediately after
1870 the .tlsdesc directive. */
1871 frag_grow (4);
1872 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1873 BFD_RELOC_AARCH64_TLSDESC_CALL);
1874
1875 demand_empty_rest_of_line ();
1876 }
1877 #endif /* OBJ_ELF */
1878
1879 static void s_aarch64_arch (int);
1880 static void s_aarch64_cpu (int);
1881
1882 /* This table describes all the machine specific pseudo-ops the assembler
1883 has to support. The fields are:
1884 pseudo-op name without dot
1885 function to call to execute this pseudo-op
1886 Integer arg to pass to the function. */
1887
1888 const pseudo_typeS md_pseudo_table[] = {
1889 /* Never called because '.req' does not start a line. */
1890 {"req", s_req, 0},
1891 {"unreq", s_unreq, 0},
1892 {"bss", s_bss, 0},
1893 {"even", s_even, 0},
1894 {"ltorg", s_ltorg, 0},
1895 {"pool", s_ltorg, 0},
1896 {"cpu", s_aarch64_cpu, 0},
1897 {"arch", s_aarch64_arch, 0},
1898 {"inst", s_aarch64_inst, 0},
1899 #ifdef OBJ_ELF
1900 {"tlsdesccall", s_tlsdesccall, 0},
1901 {"word", s_aarch64_elf_cons, 4},
1902 {"long", s_aarch64_elf_cons, 4},
1903 {"xword", s_aarch64_elf_cons, 8},
1904 {"dword", s_aarch64_elf_cons, 8},
1905 #endif
1906 {0, 0, 0}
1907 };
1908 \f
1909
1910 /* Check whether STR points to a register name followed by a comma or the
1911 end of line; REG_TYPE indicates which register types are checked
1912 against. Return TRUE if STR is such a register name; otherwise return
1913 FALSE. The function does not intend to produce any diagnostics, but since
1914 the register parser aarch64_reg_parse, which is called by this function,
1915 does produce diagnostics, we call clear_error to clear any diagnostics
1916 that may be generated by aarch64_reg_parse.
1917 Also, the function returns FALSE directly if there is any user error
1918 present at the function entry. This prevents the existing diagnostics
1919 state from being spoiled.
1920 The function currently serves parse_constant_immediate and
1921 parse_big_immediate only. */
1922 static bfd_boolean
1923 reg_name_p (char *str, aarch64_reg_type reg_type)
1924 {
1925 int reg;
1926
1927 /* Prevent the diagnostics state from being spoiled. */
1928 if (error_p ())
1929 return FALSE;
1930
1931 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1932
1933 /* Clear the parsing error that may be set by the reg parser. */
1934 clear_error ();
1935
1936 if (reg == PARSE_FAIL)
1937 return FALSE;
1938
1939 skip_whitespace (str);
1940 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1941 return TRUE;
1942
1943 return FALSE;
1944 }
1945
1946 /* Parser functions used exclusively in instruction operands. */
1947
1948 /* Parse an immediate expression which may not be constant.
1949
1950 To prevent the expression parser from pushing a register name
1951 into the symbol table as an undefined symbol, firstly a check is
1952 done to find out whether STR is a valid register name followed
1953 by a comma or the end of line. Return FALSE if STR is such a
1954 string. */
1955
1956 static bfd_boolean
1957 parse_immediate_expression (char **str, expressionS *exp)
1958 {
1959 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1960 {
1961 set_recoverable_error (_("immediate operand required"));
1962 return FALSE;
1963 }
1964
1965 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1966
1967 if (exp->X_op == O_absent)
1968 {
1969 set_fatal_syntax_error (_("missing immediate expression"));
1970 return FALSE;
1971 }
1972
1973 return TRUE;
1974 }
1975
1976 /* Constant immediate-value read function for use in insn parsing.
1977 STR points to the beginning of the immediate (with the optional
1978 leading #); *VAL receives the value.
1979
1980 Return TRUE on success; otherwise return FALSE. */
1981
1982 static bfd_boolean
1983 parse_constant_immediate (char **str, int64_t * val)
1984 {
1985 expressionS exp;
1986
1987 if (! parse_immediate_expression (str, &exp))
1988 return FALSE;
1989
1990 if (exp.X_op != O_constant)
1991 {
1992 set_syntax_error (_("constant expression required"));
1993 return FALSE;
1994 }
1995
1996 *val = exp.X_add_number;
1997 return TRUE;
1998 }
1999
2000 static uint32_t
2001 encode_imm_float_bits (uint32_t imm)
2002 {
2003 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2004 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2005 }
2006
2007 /* Return TRUE if the single-precision floating-point value encoded in IMM
2008 can be expressed in the AArch64 8-bit signed floating-point format with
2009 3-bit exponent and normalized 4 bits of precision; in other words, the
2010 floating-point value must be expressable as
2011 (+/-) n / 16 * power (2, r)
2012 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2013
2014 static bfd_boolean
2015 aarch64_imm_float_p (uint32_t imm)
2016 {
2017 /* If a single-precision floating-point value has the following bit
2018 pattern, it can be expressed in the AArch64 8-bit floating-point
2019 format:
2020
2021 3 32222222 2221111111111
2022 1 09876543 21098765432109876543210
2023 n Eeeeeexx xxxx0000000000000000000
2024
2025 where n, e and each x are either 0 or 1 independently, with
2026 E == ~ e. */
2027
2028 uint32_t pattern;
2029
2030 /* Prepare the pattern for 'Eeeeee'. */
2031 if (((imm >> 30) & 0x1) == 0)
2032 pattern = 0x3e000000;
2033 else
2034 pattern = 0x40000000;
2035
2036 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2037 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2038 }
2039
2040 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2041
2042 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2043 8-bit signed floating-point format with 3-bit exponent and normalized 4
2044 bits of precision (i.e. can be used in an FMOV instruction); return the
2045 equivalent single-precision encoding in *FPWORD.
2046
2047 Otherwise return FALSE. */
2048
2049 static bfd_boolean
2050 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2051 {
2052 /* If a double-precision floating-point value has the following bit
2053 pattern, it can be expressed in the AArch64 8-bit floating-point
2054 format:
2055
2056 6 66655555555 554444444...21111111111
2057 3 21098765432 109876543...098765432109876543210
2058 n Eeeeeeeeexx xxxx00000...000000000000000000000
2059
2060 where n, e and each x are either 0 or 1 independently, with
2061 E == ~ e. */
2062
2063 uint32_t pattern;
2064 uint32_t high32 = imm >> 32;
2065
2066 /* Lower 32 bits need to be 0s. */
2067 if ((imm & 0xffffffff) != 0)
2068 return FALSE;
2069
2070 /* Prepare the pattern for 'Eeeeeeeee'. */
2071 if (((high32 >> 30) & 0x1) == 0)
2072 pattern = 0x3fc00000;
2073 else
2074 pattern = 0x40000000;
2075
2076 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2077 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2078 {
2079 /* Convert to the single-precision encoding.
2080 i.e. convert
2081 n Eeeeeeeeexx xxxx00000...000000000000000000000
2082 to
2083 n Eeeeeexx xxxx0000000000000000000. */
2084 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2085 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2086 return TRUE;
2087 }
2088 else
2089 return FALSE;
2090 }
2091
2092 /* Parse a floating-point immediate. Return TRUE on success and return the
2093 value in *IMMED in the format of IEEE754 single-precision encoding.
2094 *CCP points to the start of the string; DP_P is TRUE when the immediate
2095 is expected to be in double-precision (N.B. this only matters when
2096 hexadecimal representation is involved).
2097
2098 N.B. 0.0 is accepted by this function. */
2099
2100 static bfd_boolean
2101 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2102 {
2103 char *str = *ccp;
2104 char *fpnum;
2105 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2106 int found_fpchar = 0;
2107 int64_t val = 0;
2108 unsigned fpword = 0;
2109 bfd_boolean hex_p = FALSE;
2110
2111 skip_past_char (&str, '#');
2112
2113 fpnum = str;
2114 skip_whitespace (fpnum);
2115
2116 if (strncmp (fpnum, "0x", 2) == 0)
2117 {
2118 /* Support the hexadecimal representation of the IEEE754 encoding.
2119 Double-precision is expected when DP_P is TRUE, otherwise the
2120 representation should be in single-precision. */
2121 if (! parse_constant_immediate (&str, &val))
2122 goto invalid_fp;
2123
2124 if (dp_p)
2125 {
2126 if (! aarch64_double_precision_fmovable (val, &fpword))
2127 goto invalid_fp;
2128 }
2129 else if ((uint64_t) val > 0xffffffff)
2130 goto invalid_fp;
2131 else
2132 fpword = val;
2133
2134 hex_p = TRUE;
2135 }
2136 else
2137 {
2138 /* We must not accidentally parse an integer as a floating-point number.
2139 Make sure that the value we parse is not an integer by checking for
2140 special characters '.' or 'e'. */
2141 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2142 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2143 {
2144 found_fpchar = 1;
2145 break;
2146 }
2147
2148 if (!found_fpchar)
2149 return FALSE;
2150 }
2151
2152 if (! hex_p)
2153 {
2154 int i;
2155
2156 if ((str = atof_ieee (str, 's', words)) == NULL)
2157 goto invalid_fp;
2158
2159 /* Our FP word must be 32 bits (single-precision FP). */
2160 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2161 {
2162 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2163 fpword |= words[i];
2164 }
2165 }
2166
2167 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2168 {
2169 *immed = fpword;
2170 *ccp = str;
2171 return TRUE;
2172 }
2173
2174 invalid_fp:
2175 set_fatal_syntax_error (_("invalid floating-point constant"));
2176 return FALSE;
2177 }
2178
2179 /* Less-generic immediate-value read function with the possibility of loading
2180 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2181 instructions.
2182
2183 To prevent the expression parser from pushing a register name into the
2184 symbol table as an undefined symbol, a check is firstly done to find
2185 out whether STR is a valid register name followed by a comma or the end
2186 of line. Return FALSE if STR is such a register. */
2187
2188 static bfd_boolean
2189 parse_big_immediate (char **str, int64_t *imm)
2190 {
2191 char *ptr = *str;
2192
2193 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2194 {
2195 set_syntax_error (_("immediate operand required"));
2196 return FALSE;
2197 }
2198
2199 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2200
2201 if (inst.reloc.exp.X_op == O_constant)
2202 *imm = inst.reloc.exp.X_add_number;
2203
2204 *str = ptr;
2205
2206 return TRUE;
2207 }
2208
2209 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2210 if NEED_LIBOPCODES is non-zero, the fixup will need
2211 assistance from the libopcodes. */
2212
2213 static inline void
2214 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2215 const aarch64_opnd_info *operand,
2216 int need_libopcodes_p)
2217 {
2218 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2219 reloc->opnd = operand->type;
2220 if (need_libopcodes_p)
2221 reloc->need_libopcodes_p = 1;
2222 };
2223
2224 /* Return TRUE if the instruction needs to be fixed up later internally by
2225 the GAS; otherwise return FALSE. */
2226
2227 static inline bfd_boolean
2228 aarch64_gas_internal_fixup_p (void)
2229 {
2230 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2231 }
2232
2233 /* Assign the immediate value to the relavant field in *OPERAND if
2234 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2235 needs an internal fixup in a later stage.
2236 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2237 IMM.VALUE that may get assigned with the constant. */
2238 static inline void
2239 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2240 aarch64_opnd_info *operand,
2241 int addr_off_p,
2242 int need_libopcodes_p,
2243 int skip_p)
2244 {
2245 if (reloc->exp.X_op == O_constant)
2246 {
2247 if (addr_off_p)
2248 operand->addr.offset.imm = reloc->exp.X_add_number;
2249 else
2250 operand->imm.value = reloc->exp.X_add_number;
2251 reloc->type = BFD_RELOC_UNUSED;
2252 }
2253 else
2254 {
2255 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2256 /* Tell libopcodes to ignore this operand or not. This is helpful
2257 when one of the operands needs to be fixed up later but we need
2258 libopcodes to check the other operands. */
2259 operand->skip = skip_p;
2260 }
2261 }
2262
2263 /* Relocation modifiers. Each entry in the table contains the textual
2264 name for the relocation which may be placed before a symbol used as
2265 a load/store offset, or add immediate. It must be surrounded by a
2266 leading and trailing colon, for example:
2267
2268 ldr x0, [x1, #:rello:varsym]
2269 add x0, x1, #:rello:varsym */
2270
2271 struct reloc_table_entry
2272 {
2273 const char *name;
2274 int pc_rel;
2275 bfd_reloc_code_real_type adrp_type;
2276 bfd_reloc_code_real_type movw_type;
2277 bfd_reloc_code_real_type add_type;
2278 bfd_reloc_code_real_type ldst_type;
2279 };
2280
2281 static struct reloc_table_entry reloc_table[] = {
2282 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2283 {"lo12", 0,
2284 0,
2285 0,
2286 BFD_RELOC_AARCH64_ADD_LO12,
2287 BFD_RELOC_AARCH64_LDST_LO12},
2288
2289 /* Higher 21 bits of pc-relative page offset: ADRP */
2290 {"pg_hi21", 1,
2291 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2292 0,
2293 0,
2294 0},
2295
2296 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2297 {"pg_hi21_nc", 1,
2298 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2299 0,
2300 0,
2301 0},
2302
2303 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2304 {"abs_g0", 0,
2305 0,
2306 BFD_RELOC_AARCH64_MOVW_G0,
2307 0,
2308 0},
2309
2310 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2311 {"abs_g0_s", 0,
2312 0,
2313 BFD_RELOC_AARCH64_MOVW_G0_S,
2314 0,
2315 0},
2316
2317 /* Less significant bits 0-15 of address/value: MOVK, no check */
2318 {"abs_g0_nc", 0,
2319 0,
2320 BFD_RELOC_AARCH64_MOVW_G0_NC,
2321 0,
2322 0},
2323
2324 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2325 {"abs_g1", 0,
2326 0,
2327 BFD_RELOC_AARCH64_MOVW_G1,
2328 0,
2329 0},
2330
2331 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2332 {"abs_g1_s", 0,
2333 0,
2334 BFD_RELOC_AARCH64_MOVW_G1_S,
2335 0,
2336 0},
2337
2338 /* Less significant bits 16-31 of address/value: MOVK, no check */
2339 {"abs_g1_nc", 0,
2340 0,
2341 BFD_RELOC_AARCH64_MOVW_G1_NC,
2342 0,
2343 0},
2344
2345 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2346 {"abs_g2", 0,
2347 0,
2348 BFD_RELOC_AARCH64_MOVW_G2,
2349 0,
2350 0},
2351
2352 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2353 {"abs_g2_s", 0,
2354 0,
2355 BFD_RELOC_AARCH64_MOVW_G2_S,
2356 0,
2357 0},
2358
2359 /* Less significant bits 32-47 of address/value: MOVK, no check */
2360 {"abs_g2_nc", 0,
2361 0,
2362 BFD_RELOC_AARCH64_MOVW_G2_NC,
2363 0,
2364 0},
2365
2366 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2367 {"abs_g3", 0,
2368 0,
2369 BFD_RELOC_AARCH64_MOVW_G3,
2370 0,
2371 0},
2372
2373 /* Get to the page containing GOT entry for a symbol. */
2374 {"got", 1,
2375 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2376 0,
2377 0,
2378 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2379
2380 /* 12 bit offset into the page containing GOT entry for that symbol. */
2381 {"got_lo12", 0,
2382 0,
2383 0,
2384 0,
2385 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2386
2387 /* Get to the page containing GOT TLS entry for a symbol */
2388 {"tlsgd", 0,
2389 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2390 0,
2391 0,
2392 0},
2393
2394 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2395 {"tlsgd_lo12", 0,
2396 0,
2397 0,
2398 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2399 0},
2400
2401 /* Get to the page containing GOT TLS entry for a symbol */
2402 {"tlsdesc", 0,
2403 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2404 0,
2405 0,
2406 0},
2407
2408 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2409 {"tlsdesc_lo12", 0,
2410 0,
2411 0,
2412 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2413 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2414
2415 /* Get to the page containing GOT TLS entry for a symbol */
2416 {"gottprel", 0,
2417 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2418 0,
2419 0,
2420 0},
2421
2422 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2423 {"gottprel_lo12", 0,
2424 0,
2425 0,
2426 0,
2427 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2428
2429 /* Get tp offset for a symbol. */
2430 {"tprel", 0,
2431 0,
2432 0,
2433 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2434 0},
2435
2436 /* Get tp offset for a symbol. */
2437 {"tprel_lo12", 0,
2438 0,
2439 0,
2440 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2441 0},
2442
2443 /* Get tp offset for a symbol. */
2444 {"tprel_hi12", 0,
2445 0,
2446 0,
2447 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2448 0},
2449
2450 /* Get tp offset for a symbol. */
2451 {"tprel_lo12_nc", 0,
2452 0,
2453 0,
2454 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2455 0},
2456
2457 /* Most significant bits 32-47 of address/value: MOVZ. */
2458 {"tprel_g2", 0,
2459 0,
2460 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2461 0,
2462 0},
2463
2464 /* Most significant bits 16-31 of address/value: MOVZ. */
2465 {"tprel_g1", 0,
2466 0,
2467 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2468 0,
2469 0},
2470
2471 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2472 {"tprel_g1_nc", 0,
2473 0,
2474 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2475 0,
2476 0},
2477
2478 /* Most significant bits 0-15 of address/value: MOVZ. */
2479 {"tprel_g0", 0,
2480 0,
2481 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2482 0,
2483 0},
2484
2485 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2486 {"tprel_g0_nc", 0,
2487 0,
2488 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2489 0,
2490 0},
2491 };
2492
2493 /* Given the address of a pointer pointing to the textual name of a
2494 relocation as may appear in assembler source, attempt to find its
2495 details in reloc_table. The pointer will be updated to the character
2496 after the trailing colon. On failure, NULL will be returned;
2497 otherwise return the reloc_table_entry. */
2498
2499 static struct reloc_table_entry *
2500 find_reloc_table_entry (char **str)
2501 {
2502 unsigned int i;
2503 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2504 {
2505 int length = strlen (reloc_table[i].name);
2506
2507 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2508 && (*str)[length] == ':')
2509 {
2510 *str += (length + 1);
2511 return &reloc_table[i];
2512 }
2513 }
2514
2515 return NULL;
2516 }
2517
2518 /* Mode argument to parse_shift and parser_shifter_operand. */
2519 enum parse_shift_mode
2520 {
2521 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2522 "#imm{,lsl #n}" */
2523 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2524 "#imm" */
2525 SHIFTED_LSL, /* bare "lsl #n" */
2526 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2527 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2528 };
2529
2530 /* Parse a <shift> operator on an AArch64 data processing instruction.
2531 Return TRUE on success; otherwise return FALSE. */
2532 static bfd_boolean
2533 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2534 {
2535 const struct aarch64_name_value_pair *shift_op;
2536 enum aarch64_modifier_kind kind;
2537 expressionS exp;
2538 int exp_has_prefix;
2539 char *s = *str;
2540 char *p = s;
2541
2542 for (p = *str; ISALPHA (*p); p++)
2543 ;
2544
2545 if (p == *str)
2546 {
2547 set_syntax_error (_("shift expression expected"));
2548 return FALSE;
2549 }
2550
2551 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2552
2553 if (shift_op == NULL)
2554 {
2555 set_syntax_error (_("shift operator expected"));
2556 return FALSE;
2557 }
2558
2559 kind = aarch64_get_operand_modifier (shift_op);
2560
2561 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2562 {
2563 set_syntax_error (_("invalid use of 'MSL'"));
2564 return FALSE;
2565 }
2566
2567 switch (mode)
2568 {
2569 case SHIFTED_LOGIC_IMM:
2570 if (aarch64_extend_operator_p (kind) == TRUE)
2571 {
2572 set_syntax_error (_("extending shift is not permitted"));
2573 return FALSE;
2574 }
2575 break;
2576
2577 case SHIFTED_ARITH_IMM:
2578 if (kind == AARCH64_MOD_ROR)
2579 {
2580 set_syntax_error (_("'ROR' shift is not permitted"));
2581 return FALSE;
2582 }
2583 break;
2584
2585 case SHIFTED_LSL:
2586 if (kind != AARCH64_MOD_LSL)
2587 {
2588 set_syntax_error (_("only 'LSL' shift is permitted"));
2589 return FALSE;
2590 }
2591 break;
2592
2593 case SHIFTED_REG_OFFSET:
2594 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2595 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2596 {
2597 set_fatal_syntax_error
2598 (_("invalid shift for the register offset addressing mode"));
2599 return FALSE;
2600 }
2601 break;
2602
2603 case SHIFTED_LSL_MSL:
2604 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2605 {
2606 set_syntax_error (_("invalid shift operator"));
2607 return FALSE;
2608 }
2609 break;
2610
2611 default:
2612 abort ();
2613 }
2614
2615 /* Whitespace can appear here if the next thing is a bare digit. */
2616 skip_whitespace (p);
2617
2618 /* Parse shift amount. */
2619 exp_has_prefix = 0;
2620 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2621 exp.X_op = O_absent;
2622 else
2623 {
2624 if (is_immediate_prefix (*p))
2625 {
2626 p++;
2627 exp_has_prefix = 1;
2628 }
2629 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2630 }
2631 if (exp.X_op == O_absent)
2632 {
2633 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2634 {
2635 set_syntax_error (_("missing shift amount"));
2636 return FALSE;
2637 }
2638 operand->shifter.amount = 0;
2639 }
2640 else if (exp.X_op != O_constant)
2641 {
2642 set_syntax_error (_("constant shift amount required"));
2643 return FALSE;
2644 }
2645 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2646 {
2647 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2648 return FALSE;
2649 }
2650 else
2651 {
2652 operand->shifter.amount = exp.X_add_number;
2653 operand->shifter.amount_present = 1;
2654 }
2655
2656 operand->shifter.operator_present = 1;
2657 operand->shifter.kind = kind;
2658
2659 *str = p;
2660 return TRUE;
2661 }
2662
2663 /* Parse a <shifter_operand> for a data processing instruction:
2664
2665 #<immediate>
2666 #<immediate>, LSL #imm
2667
2668 Validation of immediate operands is deferred to md_apply_fix.
2669
2670 Return TRUE on success; otherwise return FALSE. */
2671
2672 static bfd_boolean
2673 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2674 enum parse_shift_mode mode)
2675 {
2676 char *p;
2677
2678 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2679 return FALSE;
2680
2681 p = *str;
2682
2683 /* Accept an immediate expression. */
2684 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2685 return FALSE;
2686
2687 /* Accept optional LSL for arithmetic immediate values. */
2688 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2689 if (! parse_shift (&p, operand, SHIFTED_LSL))
2690 return FALSE;
2691
2692 /* Not accept any shifter for logical immediate values. */
2693 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2694 && parse_shift (&p, operand, mode))
2695 {
2696 set_syntax_error (_("unexpected shift operator"));
2697 return FALSE;
2698 }
2699
2700 *str = p;
2701 return TRUE;
2702 }
2703
2704 /* Parse a <shifter_operand> for a data processing instruction:
2705
2706 <Rm>
2707 <Rm>, <shift>
2708 #<immediate>
2709 #<immediate>, LSL #imm
2710
2711 where <shift> is handled by parse_shift above, and the last two
2712 cases are handled by the function above.
2713
2714 Validation of immediate operands is deferred to md_apply_fix.
2715
2716 Return TRUE on success; otherwise return FALSE. */
2717
2718 static bfd_boolean
2719 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2720 enum parse_shift_mode mode)
2721 {
2722 int reg;
2723 int isreg32, isregzero;
2724 enum aarch64_operand_class opd_class
2725 = aarch64_get_operand_class (operand->type);
2726
2727 if ((reg =
2728 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2729 {
2730 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2731 {
2732 set_syntax_error (_("unexpected register in the immediate operand"));
2733 return FALSE;
2734 }
2735
2736 if (!isregzero && reg == REG_SP)
2737 {
2738 set_syntax_error (BAD_SP);
2739 return FALSE;
2740 }
2741
2742 operand->reg.regno = reg;
2743 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2744
2745 /* Accept optional shift operation on register. */
2746 if (! skip_past_comma (str))
2747 return TRUE;
2748
2749 if (! parse_shift (str, operand, mode))
2750 return FALSE;
2751
2752 return TRUE;
2753 }
2754 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2755 {
2756 set_syntax_error
2757 (_("integer register expected in the extended/shifted operand "
2758 "register"));
2759 return FALSE;
2760 }
2761
2762 /* We have a shifted immediate variable. */
2763 return parse_shifter_operand_imm (str, operand, mode);
2764 }
2765
2766 /* Return TRUE on success; return FALSE otherwise. */
2767
2768 static bfd_boolean
2769 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2770 enum parse_shift_mode mode)
2771 {
2772 char *p = *str;
2773
2774 /* Determine if we have the sequence of characters #: or just :
2775 coming next. If we do, then we check for a :rello: relocation
2776 modifier. If we don't, punt the whole lot to
2777 parse_shifter_operand. */
2778
2779 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2780 {
2781 struct reloc_table_entry *entry;
2782
2783 if (p[0] == '#')
2784 p += 2;
2785 else
2786 p++;
2787 *str = p;
2788
2789 /* Try to parse a relocation. Anything else is an error. */
2790 if (!(entry = find_reloc_table_entry (str)))
2791 {
2792 set_syntax_error (_("unknown relocation modifier"));
2793 return FALSE;
2794 }
2795
2796 if (entry->add_type == 0)
2797 {
2798 set_syntax_error
2799 (_("this relocation modifier is not allowed on this instruction"));
2800 return FALSE;
2801 }
2802
2803 /* Save str before we decompose it. */
2804 p = *str;
2805
2806 /* Next, we parse the expression. */
2807 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2808 return FALSE;
2809
2810 /* Record the relocation type (use the ADD variant here). */
2811 inst.reloc.type = entry->add_type;
2812 inst.reloc.pc_rel = entry->pc_rel;
2813
2814 /* If str is empty, we've reached the end, stop here. */
2815 if (**str == '\0')
2816 return TRUE;
2817
2818 /* Otherwise, we have a shifted reloc modifier, so rewind to
2819 recover the variable name and continue parsing for the shifter. */
2820 *str = p;
2821 return parse_shifter_operand_imm (str, operand, mode);
2822 }
2823
2824 return parse_shifter_operand (str, operand, mode);
2825 }
2826
2827 /* Parse all forms of an address expression. Information is written
2828 to *OPERAND and/or inst.reloc.
2829
2830 The A64 instruction set has the following addressing modes:
2831
2832 Offset
2833 [base] // in SIMD ld/st structure
2834 [base{,#0}] // in ld/st exclusive
2835 [base{,#imm}]
2836 [base,Xm{,LSL #imm}]
2837 [base,Xm,SXTX {#imm}]
2838 [base,Wm,(S|U)XTW {#imm}]
2839 Pre-indexed
2840 [base,#imm]!
2841 Post-indexed
2842 [base],#imm
2843 [base],Xm // in SIMD ld/st structure
2844 PC-relative (literal)
2845 label
2846 =immediate
2847
2848 (As a convenience, the notation "=immediate" is permitted in conjunction
2849 with the pc-relative literal load instructions to automatically place an
2850 immediate value or symbolic address in a nearby literal pool and generate
2851 a hidden label which references it.)
2852
2853 Upon a successful parsing, the address structure in *OPERAND will be
2854 filled in the following way:
2855
2856 .base_regno = <base>
2857 .offset.is_reg // 1 if the offset is a register
2858 .offset.imm = <imm>
2859 .offset.regno = <Rm>
2860
2861 For different addressing modes defined in the A64 ISA:
2862
2863 Offset
2864 .pcrel=0; .preind=1; .postind=0; .writeback=0
2865 Pre-indexed
2866 .pcrel=0; .preind=1; .postind=0; .writeback=1
2867 Post-indexed
2868 .pcrel=0; .preind=0; .postind=1; .writeback=1
2869 PC-relative (literal)
2870 .pcrel=1; .preind=1; .postind=0; .writeback=0
2871
2872 The shift/extension information, if any, will be stored in .shifter.
2873
2874 It is the caller's responsibility to check for addressing modes not
2875 supported by the instruction, and to set inst.reloc.type. */
2876
2877 static bfd_boolean
2878 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2879 int accept_reg_post_index)
2880 {
2881 char *p = *str;
2882 int reg;
2883 int isreg32, isregzero;
2884 expressionS *exp = &inst.reloc.exp;
2885
2886 if (! skip_past_char (&p, '['))
2887 {
2888 /* =immediate or label. */
2889 operand->addr.pcrel = 1;
2890 operand->addr.preind = 1;
2891
2892 /* #:<reloc_op>:<symbol> */
2893 skip_past_char (&p, '#');
2894 if (reloc && skip_past_char (&p, ':'))
2895 {
2896 struct reloc_table_entry *entry;
2897
2898 /* Try to parse a relocation modifier. Anything else is
2899 an error. */
2900 entry = find_reloc_table_entry (&p);
2901 if (! entry)
2902 {
2903 set_syntax_error (_("unknown relocation modifier"));
2904 return FALSE;
2905 }
2906
2907 if (entry->ldst_type == 0)
2908 {
2909 set_syntax_error
2910 (_("this relocation modifier is not allowed on this "
2911 "instruction"));
2912 return FALSE;
2913 }
2914
2915 /* #:<reloc_op>: */
2916 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2917 {
2918 set_syntax_error (_("invalid relocation expression"));
2919 return FALSE;
2920 }
2921
2922 /* #:<reloc_op>:<expr> */
2923 /* Record the load/store relocation type. */
2924 inst.reloc.type = entry->ldst_type;
2925 inst.reloc.pc_rel = entry->pc_rel;
2926 }
2927 else
2928 {
2929
2930 if (skip_past_char (&p, '='))
2931 /* =immediate; need to generate the literal in the literal pool. */
2932 inst.gen_lit_pool = 1;
2933
2934 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2935 {
2936 set_syntax_error (_("invalid address"));
2937 return FALSE;
2938 }
2939 }
2940
2941 *str = p;
2942 return TRUE;
2943 }
2944
2945 /* [ */
2946
2947 /* Accept SP and reject ZR */
2948 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2949 if (reg == PARSE_FAIL || isreg32)
2950 {
2951 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2952 return FALSE;
2953 }
2954 operand->addr.base_regno = reg;
2955
2956 /* [Xn */
2957 if (skip_past_comma (&p))
2958 {
2959 /* [Xn, */
2960 operand->addr.preind = 1;
2961
2962 /* Reject SP and accept ZR */
2963 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2964 if (reg != PARSE_FAIL)
2965 {
2966 /* [Xn,Rm */
2967 operand->addr.offset.regno = reg;
2968 operand->addr.offset.is_reg = 1;
2969 /* Shifted index. */
2970 if (skip_past_comma (&p))
2971 {
2972 /* [Xn,Rm, */
2973 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2974 /* Use the diagnostics set in parse_shift, so not set new
2975 error message here. */
2976 return FALSE;
2977 }
2978 /* We only accept:
2979 [base,Xm{,LSL #imm}]
2980 [base,Xm,SXTX {#imm}]
2981 [base,Wm,(S|U)XTW {#imm}] */
2982 if (operand->shifter.kind == AARCH64_MOD_NONE
2983 || operand->shifter.kind == AARCH64_MOD_LSL
2984 || operand->shifter.kind == AARCH64_MOD_SXTX)
2985 {
2986 if (isreg32)
2987 {
2988 set_syntax_error (_("invalid use of 32-bit register offset"));
2989 return FALSE;
2990 }
2991 }
2992 else if (!isreg32)
2993 {
2994 set_syntax_error (_("invalid use of 64-bit register offset"));
2995 return FALSE;
2996 }
2997 }
2998 else
2999 {
3000 /* [Xn,#:<reloc_op>:<symbol> */
3001 skip_past_char (&p, '#');
3002 if (reloc && skip_past_char (&p, ':'))
3003 {
3004 struct reloc_table_entry *entry;
3005
3006 /* Try to parse a relocation modifier. Anything else is
3007 an error. */
3008 if (!(entry = find_reloc_table_entry (&p)))
3009 {
3010 set_syntax_error (_("unknown relocation modifier"));
3011 return FALSE;
3012 }
3013
3014 if (entry->ldst_type == 0)
3015 {
3016 set_syntax_error
3017 (_("this relocation modifier is not allowed on this "
3018 "instruction"));
3019 return FALSE;
3020 }
3021
3022 /* [Xn,#:<reloc_op>: */
3023 /* We now have the group relocation table entry corresponding to
3024 the name in the assembler source. Next, we parse the
3025 expression. */
3026 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3027 {
3028 set_syntax_error (_("invalid relocation expression"));
3029 return FALSE;
3030 }
3031
3032 /* [Xn,#:<reloc_op>:<expr> */
3033 /* Record the load/store relocation type. */
3034 inst.reloc.type = entry->ldst_type;
3035 inst.reloc.pc_rel = entry->pc_rel;
3036 }
3037 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3038 {
3039 set_syntax_error (_("invalid expression in the address"));
3040 return FALSE;
3041 }
3042 /* [Xn,<expr> */
3043 }
3044 }
3045
3046 if (! skip_past_char (&p, ']'))
3047 {
3048 set_syntax_error (_("']' expected"));
3049 return FALSE;
3050 }
3051
3052 if (skip_past_char (&p, '!'))
3053 {
3054 if (operand->addr.preind && operand->addr.offset.is_reg)
3055 {
3056 set_syntax_error (_("register offset not allowed in pre-indexed "
3057 "addressing mode"));
3058 return FALSE;
3059 }
3060 /* [Xn]! */
3061 operand->addr.writeback = 1;
3062 }
3063 else if (skip_past_comma (&p))
3064 {
3065 /* [Xn], */
3066 operand->addr.postind = 1;
3067 operand->addr.writeback = 1;
3068
3069 if (operand->addr.preind)
3070 {
3071 set_syntax_error (_("cannot combine pre- and post-indexing"));
3072 return FALSE;
3073 }
3074
3075 if (accept_reg_post_index
3076 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3077 &isregzero)) != PARSE_FAIL)
3078 {
3079 /* [Xn],Xm */
3080 if (isreg32)
3081 {
3082 set_syntax_error (_("invalid 32-bit register offset"));
3083 return FALSE;
3084 }
3085 operand->addr.offset.regno = reg;
3086 operand->addr.offset.is_reg = 1;
3087 }
3088 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3089 {
3090 /* [Xn],#expr */
3091 set_syntax_error (_("invalid expression in the address"));
3092 return FALSE;
3093 }
3094 }
3095
3096 /* If at this point neither .preind nor .postind is set, we have a
3097 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3098 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3099 {
3100 if (operand->addr.writeback)
3101 {
3102 /* Reject [Rn]! */
3103 set_syntax_error (_("missing offset in the pre-indexed address"));
3104 return FALSE;
3105 }
3106 operand->addr.preind = 1;
3107 inst.reloc.exp.X_op = O_constant;
3108 inst.reloc.exp.X_add_number = 0;
3109 }
3110
3111 *str = p;
3112 return TRUE;
3113 }
3114
3115 /* Return TRUE on success; otherwise return FALSE. */
3116 static bfd_boolean
3117 parse_address (char **str, aarch64_opnd_info *operand,
3118 int accept_reg_post_index)
3119 {
3120 return parse_address_main (str, operand, 0, accept_reg_post_index);
3121 }
3122
3123 /* Return TRUE on success; otherwise return FALSE. */
3124 static bfd_boolean
3125 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3126 {
3127 return parse_address_main (str, operand, 1, 0);
3128 }
3129
3130 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3131 Return TRUE on success; otherwise return FALSE. */
3132 static bfd_boolean
3133 parse_half (char **str, int *internal_fixup_p)
3134 {
3135 char *p, *saved;
3136 int dummy;
3137
3138 p = *str;
3139 skip_past_char (&p, '#');
3140
3141 gas_assert (internal_fixup_p);
3142 *internal_fixup_p = 0;
3143
3144 if (*p == ':')
3145 {
3146 struct reloc_table_entry *entry;
3147
3148 /* Try to parse a relocation. Anything else is an error. */
3149 ++p;
3150 if (!(entry = find_reloc_table_entry (&p)))
3151 {
3152 set_syntax_error (_("unknown relocation modifier"));
3153 return FALSE;
3154 }
3155
3156 if (entry->movw_type == 0)
3157 {
3158 set_syntax_error
3159 (_("this relocation modifier is not allowed on this instruction"));
3160 return FALSE;
3161 }
3162
3163 inst.reloc.type = entry->movw_type;
3164 }
3165 else
3166 *internal_fixup_p = 1;
3167
3168 /* Avoid parsing a register as a general symbol. */
3169 saved = p;
3170 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3171 return FALSE;
3172 p = saved;
3173
3174 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3175 return FALSE;
3176
3177 *str = p;
3178 return TRUE;
3179 }
3180
3181 /* Parse an operand for an ADRP instruction:
3182 ADRP <Xd>, <label>
3183 Return TRUE on success; otherwise return FALSE. */
3184
3185 static bfd_boolean
3186 parse_adrp (char **str)
3187 {
3188 char *p;
3189
3190 p = *str;
3191 if (*p == ':')
3192 {
3193 struct reloc_table_entry *entry;
3194
3195 /* Try to parse a relocation. Anything else is an error. */
3196 ++p;
3197 if (!(entry = find_reloc_table_entry (&p)))
3198 {
3199 set_syntax_error (_("unknown relocation modifier"));
3200 return FALSE;
3201 }
3202
3203 if (entry->adrp_type == 0)
3204 {
3205 set_syntax_error
3206 (_("this relocation modifier is not allowed on this instruction"));
3207 return FALSE;
3208 }
3209
3210 inst.reloc.type = entry->adrp_type;
3211 }
3212 else
3213 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3214
3215 inst.reloc.pc_rel = 1;
3216
3217 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3218 return FALSE;
3219
3220 *str = p;
3221 return TRUE;
3222 }
3223
3224 /* Miscellaneous. */
3225
3226 /* Parse an option for a preload instruction. Returns the encoding for the
3227 option, or PARSE_FAIL. */
3228
3229 static int
3230 parse_pldop (char **str)
3231 {
3232 char *p, *q;
3233 const struct aarch64_name_value_pair *o;
3234
3235 p = q = *str;
3236 while (ISALNUM (*q))
3237 q++;
3238
3239 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3240 if (!o)
3241 return PARSE_FAIL;
3242
3243 *str = q;
3244 return o->value;
3245 }
3246
3247 /* Parse an option for a barrier instruction. Returns the encoding for the
3248 option, or PARSE_FAIL. */
3249
3250 static int
3251 parse_barrier (char **str)
3252 {
3253 char *p, *q;
3254 const asm_barrier_opt *o;
3255
3256 p = q = *str;
3257 while (ISALPHA (*q))
3258 q++;
3259
3260 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3261 if (!o)
3262 return PARSE_FAIL;
3263
3264 *str = q;
3265 return o->value;
3266 }
3267
3268 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3269 Returns the encoding for the option, or PARSE_FAIL.
3270
3271 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3272 implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>. */
3273
3274 static int
3275 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3276 {
3277 char *p, *q;
3278 char buf[32];
3279 const aarch64_sys_reg *o;
3280 int value;
3281
3282 p = buf;
3283 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3284 if (p < buf + 31)
3285 *p++ = TOLOWER (*q);
3286 *p = '\0';
3287 /* Assert that BUF be large enough. */
3288 gas_assert (p - buf == q - *str);
3289
3290 o = hash_find (sys_regs, buf);
3291 if (!o)
3292 {
3293 if (!imple_defined_p)
3294 return PARSE_FAIL;
3295 else
3296 {
3297 /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3298 registers. */
3299 unsigned int op0, op1, cn, cm, op2;
3300 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3301 return PARSE_FAIL;
3302 /* The architecture specifies the encoding space for implementation
3303 defined registers as:
3304 op0 op1 CRn CRm op2
3305 1x xxx 1x11 xxxx xxx
3306 For convenience GAS accepts a wider encoding space, as follows:
3307 op0 op1 CRn CRm op2
3308 1x xxx xxxx xxxx xxx */
3309 if ((op0 != 2 && op0 != 3) || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3310 return PARSE_FAIL;
3311 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3312 }
3313 }
3314 else
3315 {
3316 if (aarch64_sys_reg_deprecated_p (o))
3317 as_warn (_("system register name '%s' is deprecated and may be "
3318 "removed in a future release"), buf);
3319 value = o->value;
3320 }
3321
3322 *str = q;
3323 return value;
3324 }
3325
3326 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3327 for the option, or NULL. */
3328
3329 static const aarch64_sys_ins_reg *
3330 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3331 {
3332 char *p, *q;
3333 char buf[32];
3334 const aarch64_sys_ins_reg *o;
3335
3336 p = buf;
3337 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3338 if (p < buf + 31)
3339 *p++ = TOLOWER (*q);
3340 *p = '\0';
3341
3342 o = hash_find (sys_ins_regs, buf);
3343 if (!o)
3344 return NULL;
3345
3346 *str = q;
3347 return o;
3348 }
3349 \f
3350 #define po_char_or_fail(chr) do { \
3351 if (! skip_past_char (&str, chr)) \
3352 goto failure; \
3353 } while (0)
3354
3355 #define po_reg_or_fail(regtype) do { \
3356 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3357 if (val == PARSE_FAIL) \
3358 { \
3359 set_default_error (); \
3360 goto failure; \
3361 } \
3362 } while (0)
3363
3364 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3365 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3366 &isreg32, &isregzero); \
3367 if (val == PARSE_FAIL) \
3368 { \
3369 set_default_error (); \
3370 goto failure; \
3371 } \
3372 info->reg.regno = val; \
3373 if (isreg32) \
3374 info->qualifier = AARCH64_OPND_QLF_W; \
3375 else \
3376 info->qualifier = AARCH64_OPND_QLF_X; \
3377 } while (0)
3378
3379 #define po_imm_nc_or_fail() do { \
3380 if (! parse_constant_immediate (&str, &val)) \
3381 goto failure; \
3382 } while (0)
3383
3384 #define po_imm_or_fail(min, max) do { \
3385 if (! parse_constant_immediate (&str, &val)) \
3386 goto failure; \
3387 if (val < min || val > max) \
3388 { \
3389 set_fatal_syntax_error (_("immediate value out of range "\
3390 #min " to "#max)); \
3391 goto failure; \
3392 } \
3393 } while (0)
3394
3395 #define po_misc_or_fail(expr) do { \
3396 if (!expr) \
3397 goto failure; \
3398 } while (0)
3399 \f
3400 /* encode the 12-bit imm field of Add/sub immediate */
3401 static inline uint32_t
3402 encode_addsub_imm (uint32_t imm)
3403 {
3404 return imm << 10;
3405 }
3406
3407 /* encode the shift amount field of Add/sub immediate */
3408 static inline uint32_t
3409 encode_addsub_imm_shift_amount (uint32_t cnt)
3410 {
3411 return cnt << 22;
3412 }
3413
3414
3415 /* encode the imm field of Adr instruction */
3416 static inline uint32_t
3417 encode_adr_imm (uint32_t imm)
3418 {
3419 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3420 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3421 }
3422
3423 /* encode the immediate field of Move wide immediate */
3424 static inline uint32_t
3425 encode_movw_imm (uint32_t imm)
3426 {
3427 return imm << 5;
3428 }
3429
3430 /* encode the 26-bit offset of unconditional branch */
3431 static inline uint32_t
3432 encode_branch_ofs_26 (uint32_t ofs)
3433 {
3434 return ofs & ((1 << 26) - 1);
3435 }
3436
3437 /* encode the 19-bit offset of conditional branch and compare & branch */
3438 static inline uint32_t
3439 encode_cond_branch_ofs_19 (uint32_t ofs)
3440 {
3441 return (ofs & ((1 << 19) - 1)) << 5;
3442 }
3443
3444 /* encode the 19-bit offset of ld literal */
3445 static inline uint32_t
3446 encode_ld_lit_ofs_19 (uint32_t ofs)
3447 {
3448 return (ofs & ((1 << 19) - 1)) << 5;
3449 }
3450
3451 /* Encode the 14-bit offset of test & branch. */
3452 static inline uint32_t
3453 encode_tst_branch_ofs_14 (uint32_t ofs)
3454 {
3455 return (ofs & ((1 << 14) - 1)) << 5;
3456 }
3457
3458 /* Encode the 16-bit imm field of svc/hvc/smc. */
3459 static inline uint32_t
3460 encode_svc_imm (uint32_t imm)
3461 {
3462 return imm << 5;
3463 }
3464
3465 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3466 static inline uint32_t
3467 reencode_addsub_switch_add_sub (uint32_t opcode)
3468 {
3469 return opcode ^ (1 << 30);
3470 }
3471
3472 static inline uint32_t
3473 reencode_movzn_to_movz (uint32_t opcode)
3474 {
3475 return opcode | (1 << 30);
3476 }
3477
3478 static inline uint32_t
3479 reencode_movzn_to_movn (uint32_t opcode)
3480 {
3481 return opcode & ~(1 << 30);
3482 }
3483
3484 /* Overall per-instruction processing. */
3485
3486 /* We need to be able to fix up arbitrary expressions in some statements.
3487 This is so that we can handle symbols that are an arbitrary distance from
3488 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3489 which returns part of an address in a form which will be valid for
3490 a data instruction. We do this by pushing the expression into a symbol
3491 in the expr_section, and creating a fix for that. */
3492
3493 static fixS *
3494 fix_new_aarch64 (fragS * frag,
3495 int where,
3496 short int size, expressionS * exp, int pc_rel, int reloc)
3497 {
3498 fixS *new_fix;
3499
3500 switch (exp->X_op)
3501 {
3502 case O_constant:
3503 case O_symbol:
3504 case O_add:
3505 case O_subtract:
3506 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3507 break;
3508
3509 default:
3510 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3511 pc_rel, reloc);
3512 break;
3513 }
3514 return new_fix;
3515 }
3516 \f
3517 /* Diagnostics on operands errors. */
3518
3519 /* By default, output one-line error message only.
3520 Enable the verbose error message by -merror-verbose. */
3521 static int verbose_error_p = 0;
3522
3523 #ifdef DEBUG_AARCH64
3524 /* N.B. this is only for the purpose of debugging. */
3525 const char* operand_mismatch_kind_names[] =
3526 {
3527 "AARCH64_OPDE_NIL",
3528 "AARCH64_OPDE_RECOVERABLE",
3529 "AARCH64_OPDE_SYNTAX_ERROR",
3530 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3531 "AARCH64_OPDE_INVALID_VARIANT",
3532 "AARCH64_OPDE_OUT_OF_RANGE",
3533 "AARCH64_OPDE_UNALIGNED",
3534 "AARCH64_OPDE_REG_LIST",
3535 "AARCH64_OPDE_OTHER_ERROR",
3536 };
3537 #endif /* DEBUG_AARCH64 */
3538
3539 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3540
3541 When multiple errors of different kinds are found in the same assembly
3542 line, only the error of the highest severity will be picked up for
3543 issuing the diagnostics. */
3544
3545 static inline bfd_boolean
3546 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3547 enum aarch64_operand_error_kind rhs)
3548 {
3549 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3550 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3551 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3552 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3553 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3554 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3555 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3556 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3557 return lhs > rhs;
3558 }
3559
3560 /* Helper routine to get the mnemonic name from the assembly instruction
3561 line; should only be called for the diagnosis purpose, as there is
3562 string copy operation involved, which may affect the runtime
3563 performance if used in elsewhere. */
3564
3565 static const char*
3566 get_mnemonic_name (const char *str)
3567 {
3568 static char mnemonic[32];
3569 char *ptr;
3570
3571 /* Get the first 15 bytes and assume that the full name is included. */
3572 strncpy (mnemonic, str, 31);
3573 mnemonic[31] = '\0';
3574
3575 /* Scan up to the end of the mnemonic, which must end in white space,
3576 '.', or end of string. */
3577 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3578 ;
3579
3580 *ptr = '\0';
3581
3582 /* Append '...' to the truncated long name. */
3583 if (ptr - mnemonic == 31)
3584 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3585
3586 return mnemonic;
3587 }
3588
3589 static void
3590 reset_aarch64_instruction (aarch64_instruction *instruction)
3591 {
3592 memset (instruction, '\0', sizeof (aarch64_instruction));
3593 instruction->reloc.type = BFD_RELOC_UNUSED;
3594 }
3595
3596 /* Data strutures storing one user error in the assembly code related to
3597 operands. */
3598
3599 struct operand_error_record
3600 {
3601 const aarch64_opcode *opcode;
3602 aarch64_operand_error detail;
3603 struct operand_error_record *next;
3604 };
3605
3606 typedef struct operand_error_record operand_error_record;
3607
3608 struct operand_errors
3609 {
3610 operand_error_record *head;
3611 operand_error_record *tail;
3612 };
3613
3614 typedef struct operand_errors operand_errors;
3615
3616 /* Top-level data structure reporting user errors for the current line of
3617 the assembly code.
3618 The way md_assemble works is that all opcodes sharing the same mnemonic
3619 name are iterated to find a match to the assembly line. In this data
3620 structure, each of the such opcodes will have one operand_error_record
3621 allocated and inserted. In other words, excessive errors related with
3622 a single opcode are disregarded. */
3623 operand_errors operand_error_report;
3624
3625 /* Free record nodes. */
3626 static operand_error_record *free_opnd_error_record_nodes = NULL;
3627
3628 /* Initialize the data structure that stores the operand mismatch
3629 information on assembling one line of the assembly code. */
3630 static void
3631 init_operand_error_report (void)
3632 {
3633 if (operand_error_report.head != NULL)
3634 {
3635 gas_assert (operand_error_report.tail != NULL);
3636 operand_error_report.tail->next = free_opnd_error_record_nodes;
3637 free_opnd_error_record_nodes = operand_error_report.head;
3638 operand_error_report.head = NULL;
3639 operand_error_report.tail = NULL;
3640 return;
3641 }
3642 gas_assert (operand_error_report.tail == NULL);
3643 }
3644
3645 /* Return TRUE if some operand error has been recorded during the
3646 parsing of the current assembly line using the opcode *OPCODE;
3647 otherwise return FALSE. */
3648 static inline bfd_boolean
3649 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3650 {
3651 operand_error_record *record = operand_error_report.head;
3652 return record && record->opcode == opcode;
3653 }
3654
3655 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3656 OPCODE field is initialized with OPCODE.
3657 N.B. only one record for each opcode, i.e. the maximum of one error is
3658 recorded for each instruction template. */
3659
3660 static void
3661 add_operand_error_record (const operand_error_record* new_record)
3662 {
3663 const aarch64_opcode *opcode = new_record->opcode;
3664 operand_error_record* record = operand_error_report.head;
3665
3666 /* The record may have been created for this opcode. If not, we need
3667 to prepare one. */
3668 if (! opcode_has_operand_error_p (opcode))
3669 {
3670 /* Get one empty record. */
3671 if (free_opnd_error_record_nodes == NULL)
3672 {
3673 record = xmalloc (sizeof (operand_error_record));
3674 if (record == NULL)
3675 abort ();
3676 }
3677 else
3678 {
3679 record = free_opnd_error_record_nodes;
3680 free_opnd_error_record_nodes = record->next;
3681 }
3682 record->opcode = opcode;
3683 /* Insert at the head. */
3684 record->next = operand_error_report.head;
3685 operand_error_report.head = record;
3686 if (operand_error_report.tail == NULL)
3687 operand_error_report.tail = record;
3688 }
3689 else if (record->detail.kind != AARCH64_OPDE_NIL
3690 && record->detail.index <= new_record->detail.index
3691 && operand_error_higher_severity_p (record->detail.kind,
3692 new_record->detail.kind))
3693 {
3694 /* In the case of multiple errors found on operands related with a
3695 single opcode, only record the error of the leftmost operand and
3696 only if the error is of higher severity. */
3697 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3698 " the existing error %s on operand %d",
3699 operand_mismatch_kind_names[new_record->detail.kind],
3700 new_record->detail.index,
3701 operand_mismatch_kind_names[record->detail.kind],
3702 record->detail.index);
3703 return;
3704 }
3705
3706 record->detail = new_record->detail;
3707 }
3708
3709 static inline void
3710 record_operand_error_info (const aarch64_opcode *opcode,
3711 aarch64_operand_error *error_info)
3712 {
3713 operand_error_record record;
3714 record.opcode = opcode;
3715 record.detail = *error_info;
3716 add_operand_error_record (&record);
3717 }
3718
3719 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3720 error message *ERROR, for operand IDX (count from 0). */
3721
3722 static void
3723 record_operand_error (const aarch64_opcode *opcode, int idx,
3724 enum aarch64_operand_error_kind kind,
3725 const char* error)
3726 {
3727 aarch64_operand_error info;
3728 memset(&info, 0, sizeof (info));
3729 info.index = idx;
3730 info.kind = kind;
3731 info.error = error;
3732 record_operand_error_info (opcode, &info);
3733 }
3734
3735 static void
3736 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3737 enum aarch64_operand_error_kind kind,
3738 const char* error, const int *extra_data)
3739 {
3740 aarch64_operand_error info;
3741 info.index = idx;
3742 info.kind = kind;
3743 info.error = error;
3744 info.data[0] = extra_data[0];
3745 info.data[1] = extra_data[1];
3746 info.data[2] = extra_data[2];
3747 record_operand_error_info (opcode, &info);
3748 }
3749
3750 static void
3751 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3752 const char* error, int lower_bound,
3753 int upper_bound)
3754 {
3755 int data[3] = {lower_bound, upper_bound, 0};
3756 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3757 error, data);
3758 }
3759
3760 /* Remove the operand error record for *OPCODE. */
3761 static void ATTRIBUTE_UNUSED
3762 remove_operand_error_record (const aarch64_opcode *opcode)
3763 {
3764 if (opcode_has_operand_error_p (opcode))
3765 {
3766 operand_error_record* record = operand_error_report.head;
3767 gas_assert (record != NULL && operand_error_report.tail != NULL);
3768 operand_error_report.head = record->next;
3769 record->next = free_opnd_error_record_nodes;
3770 free_opnd_error_record_nodes = record;
3771 if (operand_error_report.head == NULL)
3772 {
3773 gas_assert (operand_error_report.tail == record);
3774 operand_error_report.tail = NULL;
3775 }
3776 }
3777 }
3778
3779 /* Given the instruction in *INSTR, return the index of the best matched
3780 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3781
3782 Return -1 if there is no qualifier sequence; return the first match
3783 if there is multiple matches found. */
3784
3785 static int
3786 find_best_match (const aarch64_inst *instr,
3787 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3788 {
3789 int i, num_opnds, max_num_matched, idx;
3790
3791 num_opnds = aarch64_num_of_operands (instr->opcode);
3792 if (num_opnds == 0)
3793 {
3794 DEBUG_TRACE ("no operand");
3795 return -1;
3796 }
3797
3798 max_num_matched = 0;
3799 idx = -1;
3800
3801 /* For each pattern. */
3802 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3803 {
3804 int j, num_matched;
3805 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3806
3807 /* Most opcodes has much fewer patterns in the list. */
3808 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3809 {
3810 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3811 if (i != 0 && idx == -1)
3812 /* If nothing has been matched, return the 1st sequence. */
3813 idx = 0;
3814 break;
3815 }
3816
3817 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3818 if (*qualifiers == instr->operands[j].qualifier)
3819 ++num_matched;
3820
3821 if (num_matched > max_num_matched)
3822 {
3823 max_num_matched = num_matched;
3824 idx = i;
3825 }
3826 }
3827
3828 DEBUG_TRACE ("return with %d", idx);
3829 return idx;
3830 }
3831
3832 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3833 corresponding operands in *INSTR. */
3834
3835 static inline void
3836 assign_qualifier_sequence (aarch64_inst *instr,
3837 const aarch64_opnd_qualifier_t *qualifiers)
3838 {
3839 int i = 0;
3840 int num_opnds = aarch64_num_of_operands (instr->opcode);
3841 gas_assert (num_opnds);
3842 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3843 instr->operands[i].qualifier = *qualifiers;
3844 }
3845
3846 /* Print operands for the diagnosis purpose. */
3847
3848 static void
3849 print_operands (char *buf, const aarch64_opcode *opcode,
3850 const aarch64_opnd_info *opnds)
3851 {
3852 int i;
3853
3854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3855 {
3856 const size_t size = 128;
3857 char str[size];
3858
3859 /* We regard the opcode operand info more, however we also look into
3860 the inst->operands to support the disassembling of the optional
3861 operand.
3862 The two operand code should be the same in all cases, apart from
3863 when the operand can be optional. */
3864 if (opcode->operands[i] == AARCH64_OPND_NIL
3865 || opnds[i].type == AARCH64_OPND_NIL)
3866 break;
3867
3868 /* Generate the operand string in STR. */
3869 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3870
3871 /* Delimiter. */
3872 if (str[0] != '\0')
3873 strcat (buf, i == 0 ? " " : ",");
3874
3875 /* Append the operand string. */
3876 strcat (buf, str);
3877 }
3878 }
3879
3880 /* Send to stderr a string as information. */
3881
3882 static void
3883 output_info (const char *format, ...)
3884 {
3885 char *file;
3886 unsigned int line;
3887 va_list args;
3888
3889 as_where (&file, &line);
3890 if (file)
3891 {
3892 if (line != 0)
3893 fprintf (stderr, "%s:%u: ", file, line);
3894 else
3895 fprintf (stderr, "%s: ", file);
3896 }
3897 fprintf (stderr, _("Info: "));
3898 va_start (args, format);
3899 vfprintf (stderr, format, args);
3900 va_end (args);
3901 (void) putc ('\n', stderr);
3902 }
3903
3904 /* Output one operand error record. */
3905
3906 static void
3907 output_operand_error_record (const operand_error_record *record, char *str)
3908 {
3909 int idx = record->detail.index;
3910 const aarch64_opcode *opcode = record->opcode;
3911 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3912 : AARCH64_OPND_NIL);
3913 const aarch64_operand_error *detail = &record->detail;
3914
3915 switch (detail->kind)
3916 {
3917 case AARCH64_OPDE_NIL:
3918 gas_assert (0);
3919 break;
3920
3921 case AARCH64_OPDE_SYNTAX_ERROR:
3922 case AARCH64_OPDE_RECOVERABLE:
3923 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3924 case AARCH64_OPDE_OTHER_ERROR:
3925 gas_assert (idx >= 0);
3926 /* Use the prepared error message if there is, otherwise use the
3927 operand description string to describe the error. */
3928 if (detail->error != NULL)
3929 {
3930 if (detail->index == -1)
3931 as_bad (_("%s -- `%s'"), detail->error, str);
3932 else
3933 as_bad (_("%s at operand %d -- `%s'"),
3934 detail->error, detail->index + 1, str);
3935 }
3936 else
3937 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3938 aarch64_get_operand_desc (opd_code), str);
3939 break;
3940
3941 case AARCH64_OPDE_INVALID_VARIANT:
3942 as_bad (_("operand mismatch -- `%s'"), str);
3943 if (verbose_error_p)
3944 {
3945 /* We will try to correct the erroneous instruction and also provide
3946 more information e.g. all other valid variants.
3947
3948 The string representation of the corrected instruction and other
3949 valid variants are generated by
3950
3951 1) obtaining the intermediate representation of the erroneous
3952 instruction;
3953 2) manipulating the IR, e.g. replacing the operand qualifier;
3954 3) printing out the instruction by calling the printer functions
3955 shared with the disassembler.
3956
3957 The limitation of this method is that the exact input assembly
3958 line cannot be accurately reproduced in some cases, for example an
3959 optional operand present in the actual assembly line will be
3960 omitted in the output; likewise for the optional syntax rules,
3961 e.g. the # before the immediate. Another limitation is that the
3962 assembly symbols and relocation operations in the assembly line
3963 currently cannot be printed out in the error report. Last but not
3964 least, when there is other error(s) co-exist with this error, the
3965 'corrected' instruction may be still incorrect, e.g. given
3966 'ldnp h0,h1,[x0,#6]!'
3967 this diagnosis will provide the version:
3968 'ldnp s0,s1,[x0,#6]!'
3969 which is still not right. */
3970 size_t len = strlen (get_mnemonic_name (str));
3971 int i, qlf_idx;
3972 bfd_boolean result;
3973 const size_t size = 2048;
3974 char buf[size];
3975 aarch64_inst *inst_base = &inst.base;
3976 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3977
3978 /* Init inst. */
3979 reset_aarch64_instruction (&inst);
3980 inst_base->opcode = opcode;
3981
3982 /* Reset the error report so that there is no side effect on the
3983 following operand parsing. */
3984 init_operand_error_report ();
3985
3986 /* Fill inst. */
3987 result = parse_operands (str + len, opcode)
3988 && programmer_friendly_fixup (&inst);
3989 gas_assert (result);
3990 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3991 NULL, NULL);
3992 gas_assert (!result);
3993
3994 /* Find the most matched qualifier sequence. */
3995 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3996 gas_assert (qlf_idx > -1);
3997
3998 /* Assign the qualifiers. */
3999 assign_qualifier_sequence (inst_base,
4000 opcode->qualifiers_list[qlf_idx]);
4001
4002 /* Print the hint. */
4003 output_info (_(" did you mean this?"));
4004 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4005 print_operands (buf, opcode, inst_base->operands);
4006 output_info (_(" %s"), buf);
4007
4008 /* Print out other variant(s) if there is any. */
4009 if (qlf_idx != 0 ||
4010 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4011 output_info (_(" other valid variant(s):"));
4012
4013 /* For each pattern. */
4014 qualifiers_list = opcode->qualifiers_list;
4015 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4016 {
4017 /* Most opcodes has much fewer patterns in the list.
4018 First NIL qualifier indicates the end in the list. */
4019 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4020 break;
4021
4022 if (i != qlf_idx)
4023 {
4024 /* Mnemonics name. */
4025 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4026
4027 /* Assign the qualifiers. */
4028 assign_qualifier_sequence (inst_base, *qualifiers_list);
4029
4030 /* Print instruction. */
4031 print_operands (buf, opcode, inst_base->operands);
4032
4033 output_info (_(" %s"), buf);
4034 }
4035 }
4036 }
4037 break;
4038
4039 case AARCH64_OPDE_OUT_OF_RANGE:
4040 if (detail->data[0] != detail->data[1])
4041 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4042 detail->error ? detail->error : _("immediate value"),
4043 detail->data[0], detail->data[1], detail->index + 1, str);
4044 else
4045 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4046 detail->error ? detail->error : _("immediate value"),
4047 detail->data[0], detail->index + 1, str);
4048 break;
4049
4050 case AARCH64_OPDE_REG_LIST:
4051 if (detail->data[0] == 1)
4052 as_bad (_("invalid number of registers in the list; "
4053 "only 1 register is expected at operand %d -- `%s'"),
4054 detail->index + 1, str);
4055 else
4056 as_bad (_("invalid number of registers in the list; "
4057 "%d registers are expected at operand %d -- `%s'"),
4058 detail->data[0], detail->index + 1, str);
4059 break;
4060
4061 case AARCH64_OPDE_UNALIGNED:
4062 as_bad (_("immediate value should be a multiple of "
4063 "%d at operand %d -- `%s'"),
4064 detail->data[0], detail->index + 1, str);
4065 break;
4066
4067 default:
4068 gas_assert (0);
4069 break;
4070 }
4071 }
4072
4073 /* Process and output the error message about the operand mismatching.
4074
4075 When this function is called, the operand error information had
4076 been collected for an assembly line and there will be multiple
4077 errors in the case of mulitple instruction templates; output the
4078 error message that most closely describes the problem. */
4079
4080 static void
4081 output_operand_error_report (char *str)
4082 {
4083 int largest_error_pos;
4084 const char *msg = NULL;
4085 enum aarch64_operand_error_kind kind;
4086 operand_error_record *curr;
4087 operand_error_record *head = operand_error_report.head;
4088 operand_error_record *record = NULL;
4089
4090 /* No error to report. */
4091 if (head == NULL)
4092 return;
4093
4094 gas_assert (head != NULL && operand_error_report.tail != NULL);
4095
4096 /* Only one error. */
4097 if (head == operand_error_report.tail)
4098 {
4099 DEBUG_TRACE ("single opcode entry with error kind: %s",
4100 operand_mismatch_kind_names[head->detail.kind]);
4101 output_operand_error_record (head, str);
4102 return;
4103 }
4104
4105 /* Find the error kind of the highest severity. */
4106 DEBUG_TRACE ("multiple opcode entres with error kind");
4107 kind = AARCH64_OPDE_NIL;
4108 for (curr = head; curr != NULL; curr = curr->next)
4109 {
4110 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4111 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4112 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4113 kind = curr->detail.kind;
4114 }
4115 gas_assert (kind != AARCH64_OPDE_NIL);
4116
4117 /* Pick up one of errors of KIND to report. */
4118 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4119 for (curr = head; curr != NULL; curr = curr->next)
4120 {
4121 if (curr->detail.kind != kind)
4122 continue;
4123 /* If there are multiple errors, pick up the one with the highest
4124 mismatching operand index. In the case of multiple errors with
4125 the equally highest operand index, pick up the first one or the
4126 first one with non-NULL error message. */
4127 if (curr->detail.index > largest_error_pos
4128 || (curr->detail.index == largest_error_pos && msg == NULL
4129 && curr->detail.error != NULL))
4130 {
4131 largest_error_pos = curr->detail.index;
4132 record = curr;
4133 msg = record->detail.error;
4134 }
4135 }
4136
4137 gas_assert (largest_error_pos != -2 && record != NULL);
4138 DEBUG_TRACE ("Pick up error kind %s to report",
4139 operand_mismatch_kind_names[record->detail.kind]);
4140
4141 /* Output. */
4142 output_operand_error_record (record, str);
4143 }
4144 \f
4145 /* Write an AARCH64 instruction to buf - always little-endian. */
4146 static void
4147 put_aarch64_insn (char *buf, uint32_t insn)
4148 {
4149 unsigned char *where = (unsigned char *) buf;
4150 where[0] = insn;
4151 where[1] = insn >> 8;
4152 where[2] = insn >> 16;
4153 where[3] = insn >> 24;
4154 }
4155
4156 static uint32_t
4157 get_aarch64_insn (char *buf)
4158 {
4159 unsigned char *where = (unsigned char *) buf;
4160 uint32_t result;
4161 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4162 return result;
4163 }
4164
4165 static void
4166 output_inst (struct aarch64_inst *new_inst)
4167 {
4168 char *to = NULL;
4169
4170 to = frag_more (INSN_SIZE);
4171
4172 frag_now->tc_frag_data.recorded = 1;
4173
4174 put_aarch64_insn (to, inst.base.value);
4175
4176 if (inst.reloc.type != BFD_RELOC_UNUSED)
4177 {
4178 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4179 INSN_SIZE, &inst.reloc.exp,
4180 inst.reloc.pc_rel,
4181 inst.reloc.type);
4182 DEBUG_TRACE ("Prepared relocation fix up");
4183 /* Don't check the addend value against the instruction size,
4184 that's the job of our code in md_apply_fix(). */
4185 fixp->fx_no_overflow = 1;
4186 if (new_inst != NULL)
4187 fixp->tc_fix_data.inst = new_inst;
4188 if (aarch64_gas_internal_fixup_p ())
4189 {
4190 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4191 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4192 fixp->fx_addnumber = inst.reloc.flags;
4193 }
4194 }
4195
4196 dwarf2_emit_insn (INSN_SIZE);
4197 }
4198
4199 /* Link together opcodes of the same name. */
4200
4201 struct templates
4202 {
4203 aarch64_opcode *opcode;
4204 struct templates *next;
4205 };
4206
4207 typedef struct templates templates;
4208
4209 static templates *
4210 lookup_mnemonic (const char *start, int len)
4211 {
4212 templates *templ = NULL;
4213
4214 templ = hash_find_n (aarch64_ops_hsh, start, len);
4215 return templ;
4216 }
4217
4218 /* Subroutine of md_assemble, responsible for looking up the primary
4219 opcode from the mnemonic the user wrote. STR points to the
4220 beginning of the mnemonic. */
4221
4222 static templates *
4223 opcode_lookup (char **str)
4224 {
4225 char *end, *base;
4226 const aarch64_cond *cond;
4227 char condname[16];
4228 int len;
4229
4230 /* Scan up to the end of the mnemonic, which must end in white space,
4231 '.', or end of string. */
4232 for (base = end = *str; is_part_of_name(*end); end++)
4233 if (*end == '.')
4234 break;
4235
4236 if (end == base)
4237 return 0;
4238
4239 inst.cond = COND_ALWAYS;
4240
4241 /* Handle a possible condition. */
4242 if (end[0] == '.')
4243 {
4244 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4245 if (cond)
4246 {
4247 inst.cond = cond->value;
4248 *str = end + 3;
4249 }
4250 else
4251 {
4252 *str = end;
4253 return 0;
4254 }
4255 }
4256 else
4257 *str = end;
4258
4259 len = end - base;
4260
4261 if (inst.cond == COND_ALWAYS)
4262 {
4263 /* Look for unaffixed mnemonic. */
4264 return lookup_mnemonic (base, len);
4265 }
4266 else if (len <= 13)
4267 {
4268 /* append ".c" to mnemonic if conditional */
4269 memcpy (condname, base, len);
4270 memcpy (condname + len, ".c", 2);
4271 base = condname;
4272 len += 2;
4273 return lookup_mnemonic (base, len);
4274 }
4275
4276 return NULL;
4277 }
4278
4279 /* Internal helper routine converting a vector neon_type_el structure
4280 *VECTYPE to a corresponding operand qualifier. */
4281
4282 static inline aarch64_opnd_qualifier_t
4283 vectype_to_qualifier (const struct neon_type_el *vectype)
4284 {
4285 /* Element size in bytes indexed by neon_el_type. */
4286 const unsigned char ele_size[5]
4287 = {1, 2, 4, 8, 16};
4288
4289 if (!vectype->defined || vectype->type == NT_invtype)
4290 goto vectype_conversion_fail;
4291
4292 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4293
4294 if (vectype->defined & NTA_HASINDEX)
4295 /* Vector element register. */
4296 return AARCH64_OPND_QLF_S_B + vectype->type;
4297 else
4298 {
4299 /* Vector register. */
4300 int reg_size = ele_size[vectype->type] * vectype->width;
4301 unsigned offset;
4302 if (reg_size != 16 && reg_size != 8)
4303 goto vectype_conversion_fail;
4304 /* The conversion is calculated based on the relation of the order of
4305 qualifiers to the vector element size and vector register size. */
4306 offset = (vectype->type == NT_q)
4307 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4308 gas_assert (offset <= 8);
4309 return AARCH64_OPND_QLF_V_8B + offset;
4310 }
4311
4312 vectype_conversion_fail:
4313 first_error (_("bad vector arrangement type"));
4314 return AARCH64_OPND_QLF_NIL;
4315 }
4316
4317 /* Process an optional operand that is found omitted from the assembly line.
4318 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4319 instruction's opcode entry while IDX is the index of this omitted operand.
4320 */
4321
4322 static void
4323 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4324 int idx, aarch64_opnd_info *operand)
4325 {
4326 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4327 gas_assert (optional_operand_p (opcode, idx));
4328 gas_assert (!operand->present);
4329
4330 switch (type)
4331 {
4332 case AARCH64_OPND_Rd:
4333 case AARCH64_OPND_Rn:
4334 case AARCH64_OPND_Rm:
4335 case AARCH64_OPND_Rt:
4336 case AARCH64_OPND_Rt2:
4337 case AARCH64_OPND_Rs:
4338 case AARCH64_OPND_Ra:
4339 case AARCH64_OPND_Rt_SYS:
4340 case AARCH64_OPND_Rd_SP:
4341 case AARCH64_OPND_Rn_SP:
4342 case AARCH64_OPND_Fd:
4343 case AARCH64_OPND_Fn:
4344 case AARCH64_OPND_Fm:
4345 case AARCH64_OPND_Fa:
4346 case AARCH64_OPND_Ft:
4347 case AARCH64_OPND_Ft2:
4348 case AARCH64_OPND_Sd:
4349 case AARCH64_OPND_Sn:
4350 case AARCH64_OPND_Sm:
4351 case AARCH64_OPND_Vd:
4352 case AARCH64_OPND_Vn:
4353 case AARCH64_OPND_Vm:
4354 case AARCH64_OPND_VdD1:
4355 case AARCH64_OPND_VnD1:
4356 operand->reg.regno = default_value;
4357 break;
4358
4359 case AARCH64_OPND_Ed:
4360 case AARCH64_OPND_En:
4361 case AARCH64_OPND_Em:
4362 operand->reglane.regno = default_value;
4363 break;
4364
4365 case AARCH64_OPND_IDX:
4366 case AARCH64_OPND_BIT_NUM:
4367 case AARCH64_OPND_IMMR:
4368 case AARCH64_OPND_IMMS:
4369 case AARCH64_OPND_SHLL_IMM:
4370 case AARCH64_OPND_IMM_VLSL:
4371 case AARCH64_OPND_IMM_VLSR:
4372 case AARCH64_OPND_CCMP_IMM:
4373 case AARCH64_OPND_FBITS:
4374 case AARCH64_OPND_UIMM4:
4375 case AARCH64_OPND_UIMM3_OP1:
4376 case AARCH64_OPND_UIMM3_OP2:
4377 case AARCH64_OPND_IMM:
4378 case AARCH64_OPND_WIDTH:
4379 case AARCH64_OPND_UIMM7:
4380 case AARCH64_OPND_NZCV:
4381 operand->imm.value = default_value;
4382 break;
4383
4384 case AARCH64_OPND_EXCEPTION:
4385 inst.reloc.type = BFD_RELOC_UNUSED;
4386 break;
4387
4388 case AARCH64_OPND_BARRIER_ISB:
4389 operand->barrier = aarch64_barrier_options + default_value;
4390
4391 default:
4392 break;
4393 }
4394 }
4395
4396 /* Process the relocation type for move wide instructions.
4397 Return TRUE on success; otherwise return FALSE. */
4398
4399 static bfd_boolean
4400 process_movw_reloc_info (void)
4401 {
4402 int is32;
4403 unsigned shift;
4404
4405 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4406
4407 if (inst.base.opcode->op == OP_MOVK)
4408 switch (inst.reloc.type)
4409 {
4410 case BFD_RELOC_AARCH64_MOVW_G0_S:
4411 case BFD_RELOC_AARCH64_MOVW_G1_S:
4412 case BFD_RELOC_AARCH64_MOVW_G2_S:
4413 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4414 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4415 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4416 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4417 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4418 set_syntax_error
4419 (_("the specified relocation type is not allowed for MOVK"));
4420 return FALSE;
4421 default:
4422 break;
4423 }
4424
4425 switch (inst.reloc.type)
4426 {
4427 case BFD_RELOC_AARCH64_MOVW_G0:
4428 case BFD_RELOC_AARCH64_MOVW_G0_S:
4429 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4430 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4431 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4432 shift = 0;
4433 break;
4434 case BFD_RELOC_AARCH64_MOVW_G1:
4435 case BFD_RELOC_AARCH64_MOVW_G1_S:
4436 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4437 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4438 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4439 shift = 16;
4440 break;
4441 case BFD_RELOC_AARCH64_MOVW_G2:
4442 case BFD_RELOC_AARCH64_MOVW_G2_S:
4443 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4444 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4445 if (is32)
4446 {
4447 set_fatal_syntax_error
4448 (_("the specified relocation type is not allowed for 32-bit "
4449 "register"));
4450 return FALSE;
4451 }
4452 shift = 32;
4453 break;
4454 case BFD_RELOC_AARCH64_MOVW_G3:
4455 if (is32)
4456 {
4457 set_fatal_syntax_error
4458 (_("the specified relocation type is not allowed for 32-bit "
4459 "register"));
4460 return FALSE;
4461 }
4462 shift = 48;
4463 break;
4464 default:
4465 /* More cases should be added when more MOVW-related relocation types
4466 are supported in GAS. */
4467 gas_assert (aarch64_gas_internal_fixup_p ());
4468 /* The shift amount should have already been set by the parser. */
4469 return TRUE;
4470 }
4471 inst.base.operands[1].shifter.amount = shift;
4472 return TRUE;
4473 }
4474
4475 /* A primitive log caculator. */
4476
4477 static inline unsigned int
4478 get_logsz (unsigned int size)
4479 {
4480 const unsigned char ls[16] =
4481 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4482 if (size > 16)
4483 {
4484 gas_assert (0);
4485 return -1;
4486 }
4487 gas_assert (ls[size - 1] != (unsigned char)-1);
4488 return ls[size - 1];
4489 }
4490
4491 /* Determine and return the real reloc type code for an instruction
4492 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4493
4494 static inline bfd_reloc_code_real_type
4495 ldst_lo12_determine_real_reloc_type (void)
4496 {
4497 int logsz;
4498 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4499 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4500
4501 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4502 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4503 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4504 BFD_RELOC_AARCH64_LDST128_LO12
4505 };
4506
4507 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4508 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4509
4510 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4511 opd1_qlf =
4512 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4513 1, opd0_qlf, 0);
4514 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4515
4516 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4517 gas_assert (logsz >= 0 && logsz <= 4);
4518
4519 return reloc_ldst_lo12[logsz];
4520 }
4521
4522 /* Check whether a register list REGINFO is valid. The registers must be
4523 numbered in increasing order (modulo 32), in increments of one or two.
4524
4525 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4526 increments of two.
4527
4528 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4529
4530 static bfd_boolean
4531 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4532 {
4533 uint32_t i, nb_regs, prev_regno, incr;
4534
4535 nb_regs = 1 + (reginfo & 0x3);
4536 reginfo >>= 2;
4537 prev_regno = reginfo & 0x1f;
4538 incr = accept_alternate ? 2 : 1;
4539
4540 for (i = 1; i < nb_regs; ++i)
4541 {
4542 uint32_t curr_regno;
4543 reginfo >>= 5;
4544 curr_regno = reginfo & 0x1f;
4545 if (curr_regno != ((prev_regno + incr) & 0x1f))
4546 return FALSE;
4547 prev_regno = curr_regno;
4548 }
4549
4550 return TRUE;
4551 }
4552
4553 /* Generic instruction operand parser. This does no encoding and no
4554 semantic validation; it merely squirrels values away in the inst
4555 structure. Returns TRUE or FALSE depending on whether the
4556 specified grammar matched. */
4557
4558 static bfd_boolean
4559 parse_operands (char *str, const aarch64_opcode *opcode)
4560 {
4561 int i;
4562 char *backtrack_pos = 0;
4563 const enum aarch64_opnd *operands = opcode->operands;
4564
4565 clear_error ();
4566 skip_whitespace (str);
4567
4568 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4569 {
4570 int64_t val;
4571 int isreg32, isregzero;
4572 int comma_skipped_p = 0;
4573 aarch64_reg_type rtype;
4574 struct neon_type_el vectype;
4575 aarch64_opnd_info *info = &inst.base.operands[i];
4576
4577 DEBUG_TRACE ("parse operand %d", i);
4578
4579 /* Assign the operand code. */
4580 info->type = operands[i];
4581
4582 if (optional_operand_p (opcode, i))
4583 {
4584 /* Remember where we are in case we need to backtrack. */
4585 gas_assert (!backtrack_pos);
4586 backtrack_pos = str;
4587 }
4588
4589 /* Expect comma between operands; the backtrack mechanizm will take
4590 care of cases of omitted optional operand. */
4591 if (i > 0 && ! skip_past_char (&str, ','))
4592 {
4593 set_syntax_error (_("comma expected between operands"));
4594 goto failure;
4595 }
4596 else
4597 comma_skipped_p = 1;
4598
4599 switch (operands[i])
4600 {
4601 case AARCH64_OPND_Rd:
4602 case AARCH64_OPND_Rn:
4603 case AARCH64_OPND_Rm:
4604 case AARCH64_OPND_Rt:
4605 case AARCH64_OPND_Rt2:
4606 case AARCH64_OPND_Rs:
4607 case AARCH64_OPND_Ra:
4608 case AARCH64_OPND_Rt_SYS:
4609 po_int_reg_or_fail (1, 0);
4610 break;
4611
4612 case AARCH64_OPND_Rd_SP:
4613 case AARCH64_OPND_Rn_SP:
4614 po_int_reg_or_fail (0, 1);
4615 break;
4616
4617 case AARCH64_OPND_Rm_EXT:
4618 case AARCH64_OPND_Rm_SFT:
4619 po_misc_or_fail (parse_shifter_operand
4620 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4621 ? SHIFTED_ARITH_IMM
4622 : SHIFTED_LOGIC_IMM)));
4623 if (!info->shifter.operator_present)
4624 {
4625 /* Default to LSL if not present. Libopcodes prefers shifter
4626 kind to be explicit. */
4627 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4628 info->shifter.kind = AARCH64_MOD_LSL;
4629 /* For Rm_EXT, libopcodes will carry out further check on whether
4630 or not stack pointer is used in the instruction (Recall that
4631 "the extend operator is not optional unless at least one of
4632 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4633 }
4634 break;
4635
4636 case AARCH64_OPND_Fd:
4637 case AARCH64_OPND_Fn:
4638 case AARCH64_OPND_Fm:
4639 case AARCH64_OPND_Fa:
4640 case AARCH64_OPND_Ft:
4641 case AARCH64_OPND_Ft2:
4642 case AARCH64_OPND_Sd:
4643 case AARCH64_OPND_Sn:
4644 case AARCH64_OPND_Sm:
4645 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4646 if (val == PARSE_FAIL)
4647 {
4648 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4649 goto failure;
4650 }
4651 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4652
4653 info->reg.regno = val;
4654 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4655 break;
4656
4657 case AARCH64_OPND_Vd:
4658 case AARCH64_OPND_Vn:
4659 case AARCH64_OPND_Vm:
4660 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4661 if (val == PARSE_FAIL)
4662 {
4663 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4664 goto failure;
4665 }
4666 if (vectype.defined & NTA_HASINDEX)
4667 goto failure;
4668
4669 info->reg.regno = val;
4670 info->qualifier = vectype_to_qualifier (&vectype);
4671 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4672 goto failure;
4673 break;
4674
4675 case AARCH64_OPND_VdD1:
4676 case AARCH64_OPND_VnD1:
4677 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4678 if (val == PARSE_FAIL)
4679 {
4680 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4681 goto failure;
4682 }
4683 if (vectype.type != NT_d || vectype.index != 1)
4684 {
4685 set_fatal_syntax_error
4686 (_("the top half of a 128-bit FP/SIMD register is expected"));
4687 goto failure;
4688 }
4689 info->reg.regno = val;
4690 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4691 here; it is correct for the purpose of encoding/decoding since
4692 only the register number is explicitly encoded in the related
4693 instructions, although this appears a bit hacky. */
4694 info->qualifier = AARCH64_OPND_QLF_S_D;
4695 break;
4696
4697 case AARCH64_OPND_Ed:
4698 case AARCH64_OPND_En:
4699 case AARCH64_OPND_Em:
4700 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4701 if (val == PARSE_FAIL)
4702 {
4703 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4704 goto failure;
4705 }
4706 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4707 goto failure;
4708
4709 info->reglane.regno = val;
4710 info->reglane.index = vectype.index;
4711 info->qualifier = vectype_to_qualifier (&vectype);
4712 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4713 goto failure;
4714 break;
4715
4716 case AARCH64_OPND_LVn:
4717 case AARCH64_OPND_LVt:
4718 case AARCH64_OPND_LVt_AL:
4719 case AARCH64_OPND_LEt:
4720 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4721 goto failure;
4722 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4723 {
4724 set_fatal_syntax_error (_("invalid register list"));
4725 goto failure;
4726 }
4727 info->reglist.first_regno = (val >> 2) & 0x1f;
4728 info->reglist.num_regs = (val & 0x3) + 1;
4729 if (operands[i] == AARCH64_OPND_LEt)
4730 {
4731 if (!(vectype.defined & NTA_HASINDEX))
4732 goto failure;
4733 info->reglist.has_index = 1;
4734 info->reglist.index = vectype.index;
4735 }
4736 else if (!(vectype.defined & NTA_HASTYPE))
4737 goto failure;
4738 info->qualifier = vectype_to_qualifier (&vectype);
4739 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4740 goto failure;
4741 break;
4742
4743 case AARCH64_OPND_Cn:
4744 case AARCH64_OPND_Cm:
4745 po_reg_or_fail (REG_TYPE_CN);
4746 if (val > 15)
4747 {
4748 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4749 goto failure;
4750 }
4751 inst.base.operands[i].reg.regno = val;
4752 break;
4753
4754 case AARCH64_OPND_SHLL_IMM:
4755 case AARCH64_OPND_IMM_VLSR:
4756 po_imm_or_fail (1, 64);
4757 info->imm.value = val;
4758 break;
4759
4760 case AARCH64_OPND_CCMP_IMM:
4761 case AARCH64_OPND_FBITS:
4762 case AARCH64_OPND_UIMM4:
4763 case AARCH64_OPND_UIMM3_OP1:
4764 case AARCH64_OPND_UIMM3_OP2:
4765 case AARCH64_OPND_IMM_VLSL:
4766 case AARCH64_OPND_IMM:
4767 case AARCH64_OPND_WIDTH:
4768 po_imm_nc_or_fail ();
4769 info->imm.value = val;
4770 break;
4771
4772 case AARCH64_OPND_UIMM7:
4773 po_imm_or_fail (0, 127);
4774 info->imm.value = val;
4775 break;
4776
4777 case AARCH64_OPND_IDX:
4778 case AARCH64_OPND_BIT_NUM:
4779 case AARCH64_OPND_IMMR:
4780 case AARCH64_OPND_IMMS:
4781 po_imm_or_fail (0, 63);
4782 info->imm.value = val;
4783 break;
4784
4785 case AARCH64_OPND_IMM0:
4786 po_imm_nc_or_fail ();
4787 if (val != 0)
4788 {
4789 set_fatal_syntax_error (_("immediate zero expected"));
4790 goto failure;
4791 }
4792 info->imm.value = 0;
4793 break;
4794
4795 case AARCH64_OPND_FPIMM0:
4796 {
4797 int qfloat;
4798 bfd_boolean res1 = FALSE, res2 = FALSE;
4799 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4800 it is probably not worth the effort to support it. */
4801 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4802 && !(res2 = parse_constant_immediate (&str, &val)))
4803 goto failure;
4804 if ((res1 && qfloat == 0) || (res2 && val == 0))
4805 {
4806 info->imm.value = 0;
4807 info->imm.is_fp = 1;
4808 break;
4809 }
4810 set_fatal_syntax_error (_("immediate zero expected"));
4811 goto failure;
4812 }
4813
4814 case AARCH64_OPND_IMM_MOV:
4815 {
4816 char *saved = str;
4817 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4818 reg_name_p (str, REG_TYPE_VN))
4819 goto failure;
4820 str = saved;
4821 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4822 GE_OPT_PREFIX, 1));
4823 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4824 later. fix_mov_imm_insn will try to determine a machine
4825 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4826 message if the immediate cannot be moved by a single
4827 instruction. */
4828 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4829 inst.base.operands[i].skip = 1;
4830 }
4831 break;
4832
4833 case AARCH64_OPND_SIMD_IMM:
4834 case AARCH64_OPND_SIMD_IMM_SFT:
4835 if (! parse_big_immediate (&str, &val))
4836 goto failure;
4837 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4838 /* addr_off_p */ 0,
4839 /* need_libopcodes_p */ 1,
4840 /* skip_p */ 1);
4841 /* Parse shift.
4842 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4843 shift, we don't check it here; we leave the checking to
4844 the libopcodes (operand_general_constraint_met_p). By
4845 doing this, we achieve better diagnostics. */
4846 if (skip_past_comma (&str)
4847 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4848 goto failure;
4849 if (!info->shifter.operator_present
4850 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4851 {
4852 /* Default to LSL if not present. Libopcodes prefers shifter
4853 kind to be explicit. */
4854 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4855 info->shifter.kind = AARCH64_MOD_LSL;
4856 }
4857 break;
4858
4859 case AARCH64_OPND_FPIMM:
4860 case AARCH64_OPND_SIMD_FPIMM:
4861 {
4862 int qfloat;
4863 bfd_boolean dp_p
4864 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4865 == 8);
4866 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4867 goto failure;
4868 if (qfloat == 0)
4869 {
4870 set_fatal_syntax_error (_("invalid floating-point constant"));
4871 goto failure;
4872 }
4873 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4874 inst.base.operands[i].imm.is_fp = 1;
4875 }
4876 break;
4877
4878 case AARCH64_OPND_LIMM:
4879 po_misc_or_fail (parse_shifter_operand (&str, info,
4880 SHIFTED_LOGIC_IMM));
4881 if (info->shifter.operator_present)
4882 {
4883 set_fatal_syntax_error
4884 (_("shift not allowed for bitmask immediate"));
4885 goto failure;
4886 }
4887 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4888 /* addr_off_p */ 0,
4889 /* need_libopcodes_p */ 1,
4890 /* skip_p */ 1);
4891 break;
4892
4893 case AARCH64_OPND_AIMM:
4894 if (opcode->op == OP_ADD)
4895 /* ADD may have relocation types. */
4896 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4897 SHIFTED_ARITH_IMM));
4898 else
4899 po_misc_or_fail (parse_shifter_operand (&str, info,
4900 SHIFTED_ARITH_IMM));
4901 switch (inst.reloc.type)
4902 {
4903 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4904 info->shifter.amount = 12;
4905 break;
4906 case BFD_RELOC_UNUSED:
4907 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4908 if (info->shifter.kind != AARCH64_MOD_NONE)
4909 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4910 inst.reloc.pc_rel = 0;
4911 break;
4912 default:
4913 break;
4914 }
4915 info->imm.value = 0;
4916 if (!info->shifter.operator_present)
4917 {
4918 /* Default to LSL if not present. Libopcodes prefers shifter
4919 kind to be explicit. */
4920 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4921 info->shifter.kind = AARCH64_MOD_LSL;
4922 }
4923 break;
4924
4925 case AARCH64_OPND_HALF:
4926 {
4927 /* #<imm16> or relocation. */
4928 int internal_fixup_p;
4929 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4930 if (internal_fixup_p)
4931 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4932 skip_whitespace (str);
4933 if (skip_past_comma (&str))
4934 {
4935 /* {, LSL #<shift>} */
4936 if (! aarch64_gas_internal_fixup_p ())
4937 {
4938 set_fatal_syntax_error (_("can't mix relocation modifier "
4939 "with explicit shift"));
4940 goto failure;
4941 }
4942 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4943 }
4944 else
4945 inst.base.operands[i].shifter.amount = 0;
4946 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4947 inst.base.operands[i].imm.value = 0;
4948 if (! process_movw_reloc_info ())
4949 goto failure;
4950 }
4951 break;
4952
4953 case AARCH64_OPND_EXCEPTION:
4954 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4955 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4956 /* addr_off_p */ 0,
4957 /* need_libopcodes_p */ 0,
4958 /* skip_p */ 1);
4959 break;
4960
4961 case AARCH64_OPND_NZCV:
4962 {
4963 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4964 if (nzcv != NULL)
4965 {
4966 str += 4;
4967 info->imm.value = nzcv->value;
4968 break;
4969 }
4970 po_imm_or_fail (0, 15);
4971 info->imm.value = val;
4972 }
4973 break;
4974
4975 case AARCH64_OPND_COND:
4976 case AARCH64_OPND_COND1:
4977 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4978 str += 2;
4979 if (info->cond == NULL)
4980 {
4981 set_syntax_error (_("invalid condition"));
4982 goto failure;
4983 }
4984 else if (operands[i] == AARCH64_OPND_COND1
4985 && (info->cond->value & 0xe) == 0xe)
4986 {
4987 /* Not allow AL or NV. */
4988 set_default_error ();
4989 goto failure;
4990 }
4991 break;
4992
4993 case AARCH64_OPND_ADDR_ADRP:
4994 po_misc_or_fail (parse_adrp (&str));
4995 /* Clear the value as operand needs to be relocated. */
4996 info->imm.value = 0;
4997 break;
4998
4999 case AARCH64_OPND_ADDR_PCREL14:
5000 case AARCH64_OPND_ADDR_PCREL19:
5001 case AARCH64_OPND_ADDR_PCREL21:
5002 case AARCH64_OPND_ADDR_PCREL26:
5003 po_misc_or_fail (parse_address_reloc (&str, info));
5004 if (!info->addr.pcrel)
5005 {
5006 set_syntax_error (_("invalid pc-relative address"));
5007 goto failure;
5008 }
5009 if (inst.gen_lit_pool
5010 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5011 {
5012 /* Only permit "=value" in the literal load instructions.
5013 The literal will be generated by programmer_friendly_fixup. */
5014 set_syntax_error (_("invalid use of \"=immediate\""));
5015 goto failure;
5016 }
5017 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5018 {
5019 set_syntax_error (_("unrecognized relocation suffix"));
5020 goto failure;
5021 }
5022 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5023 {
5024 info->imm.value = inst.reloc.exp.X_add_number;
5025 inst.reloc.type = BFD_RELOC_UNUSED;
5026 }
5027 else
5028 {
5029 info->imm.value = 0;
5030 if (inst.reloc.type == BFD_RELOC_UNUSED)
5031 switch (opcode->iclass)
5032 {
5033 case compbranch:
5034 case condbranch:
5035 /* e.g. CBZ or B.COND */
5036 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5037 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5038 break;
5039 case testbranch:
5040 /* e.g. TBZ */
5041 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5042 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5043 break;
5044 case branch_imm:
5045 /* e.g. B or BL */
5046 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5047 inst.reloc.type =
5048 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5049 : BFD_RELOC_AARCH64_JUMP26;
5050 break;
5051 case loadlit:
5052 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5053 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5054 break;
5055 case pcreladdr:
5056 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5057 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5058 break;
5059 default:
5060 gas_assert (0);
5061 abort ();
5062 }
5063 inst.reloc.pc_rel = 1;
5064 }
5065 break;
5066
5067 case AARCH64_OPND_ADDR_SIMPLE:
5068 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5069 /* [<Xn|SP>{, #<simm>}] */
5070 po_char_or_fail ('[');
5071 po_reg_or_fail (REG_TYPE_R64_SP);
5072 /* Accept optional ", #0". */
5073 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5074 && skip_past_char (&str, ','))
5075 {
5076 skip_past_char (&str, '#');
5077 if (! skip_past_char (&str, '0'))
5078 {
5079 set_fatal_syntax_error
5080 (_("the optional immediate offset can only be 0"));
5081 goto failure;
5082 }
5083 }
5084 po_char_or_fail (']');
5085 info->addr.base_regno = val;
5086 break;
5087
5088 case AARCH64_OPND_ADDR_REGOFF:
5089 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5090 po_misc_or_fail (parse_address (&str, info, 0));
5091 if (info->addr.pcrel || !info->addr.offset.is_reg
5092 || !info->addr.preind || info->addr.postind
5093 || info->addr.writeback)
5094 {
5095 set_syntax_error (_("invalid addressing mode"));
5096 goto failure;
5097 }
5098 if (!info->shifter.operator_present)
5099 {
5100 /* Default to LSL if not present. Libopcodes prefers shifter
5101 kind to be explicit. */
5102 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5103 info->shifter.kind = AARCH64_MOD_LSL;
5104 }
5105 /* Qualifier to be deduced by libopcodes. */
5106 break;
5107
5108 case AARCH64_OPND_ADDR_SIMM7:
5109 po_misc_or_fail (parse_address (&str, info, 0));
5110 if (info->addr.pcrel || info->addr.offset.is_reg
5111 || (!info->addr.preind && !info->addr.postind))
5112 {
5113 set_syntax_error (_("invalid addressing mode"));
5114 goto failure;
5115 }
5116 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5117 /* addr_off_p */ 1,
5118 /* need_libopcodes_p */ 1,
5119 /* skip_p */ 0);
5120 break;
5121
5122 case AARCH64_OPND_ADDR_SIMM9:
5123 case AARCH64_OPND_ADDR_SIMM9_2:
5124 po_misc_or_fail (parse_address_reloc (&str, info));
5125 if (info->addr.pcrel || info->addr.offset.is_reg
5126 || (!info->addr.preind && !info->addr.postind)
5127 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5128 && info->addr.writeback))
5129 {
5130 set_syntax_error (_("invalid addressing mode"));
5131 goto failure;
5132 }
5133 if (inst.reloc.type != BFD_RELOC_UNUSED)
5134 {
5135 set_syntax_error (_("relocation not allowed"));
5136 goto failure;
5137 }
5138 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5139 /* addr_off_p */ 1,
5140 /* need_libopcodes_p */ 1,
5141 /* skip_p */ 0);
5142 break;
5143
5144 case AARCH64_OPND_ADDR_UIMM12:
5145 po_misc_or_fail (parse_address_reloc (&str, info));
5146 if (info->addr.pcrel || info->addr.offset.is_reg
5147 || !info->addr.preind || info->addr.writeback)
5148 {
5149 set_syntax_error (_("invalid addressing mode"));
5150 goto failure;
5151 }
5152 if (inst.reloc.type == BFD_RELOC_UNUSED)
5153 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5154 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5155 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5156 /* Leave qualifier to be determined by libopcodes. */
5157 break;
5158
5159 case AARCH64_OPND_SIMD_ADDR_POST:
5160 /* [<Xn|SP>], <Xm|#<amount>> */
5161 po_misc_or_fail (parse_address (&str, info, 1));
5162 if (!info->addr.postind || !info->addr.writeback)
5163 {
5164 set_syntax_error (_("invalid addressing mode"));
5165 goto failure;
5166 }
5167 if (!info->addr.offset.is_reg)
5168 {
5169 if (inst.reloc.exp.X_op == O_constant)
5170 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5171 else
5172 {
5173 set_fatal_syntax_error
5174 (_("writeback value should be an immediate constant"));
5175 goto failure;
5176 }
5177 }
5178 /* No qualifier. */
5179 break;
5180
5181 case AARCH64_OPND_SYSREG:
5182 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5183 == PARSE_FAIL)
5184 {
5185 set_syntax_error (_("unknown or missing system register name"));
5186 goto failure;
5187 }
5188 inst.base.operands[i].sysreg = val;
5189 break;
5190
5191 case AARCH64_OPND_PSTATEFIELD:
5192 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5193 == PARSE_FAIL)
5194 {
5195 set_syntax_error (_("unknown or missing PSTATE field name"));
5196 goto failure;
5197 }
5198 inst.base.operands[i].pstatefield = val;
5199 break;
5200
5201 case AARCH64_OPND_SYSREG_IC:
5202 inst.base.operands[i].sysins_op =
5203 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5204 goto sys_reg_ins;
5205 case AARCH64_OPND_SYSREG_DC:
5206 inst.base.operands[i].sysins_op =
5207 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5208 goto sys_reg_ins;
5209 case AARCH64_OPND_SYSREG_AT:
5210 inst.base.operands[i].sysins_op =
5211 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5212 goto sys_reg_ins;
5213 case AARCH64_OPND_SYSREG_TLBI:
5214 inst.base.operands[i].sysins_op =
5215 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5216 sys_reg_ins:
5217 if (inst.base.operands[i].sysins_op == NULL)
5218 {
5219 set_fatal_syntax_error ( _("unknown or missing operation name"));
5220 goto failure;
5221 }
5222 break;
5223
5224 case AARCH64_OPND_BARRIER:
5225 case AARCH64_OPND_BARRIER_ISB:
5226 val = parse_barrier (&str);
5227 if (val != PARSE_FAIL
5228 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5229 {
5230 /* ISB only accepts options name 'sy'. */
5231 set_syntax_error
5232 (_("the specified option is not accepted in ISB"));
5233 /* Turn off backtrack as this optional operand is present. */
5234 backtrack_pos = 0;
5235 goto failure;
5236 }
5237 /* This is an extension to accept a 0..15 immediate. */
5238 if (val == PARSE_FAIL)
5239 po_imm_or_fail (0, 15);
5240 info->barrier = aarch64_barrier_options + val;
5241 break;
5242
5243 case AARCH64_OPND_PRFOP:
5244 val = parse_pldop (&str);
5245 /* This is an extension to accept a 0..31 immediate. */
5246 if (val == PARSE_FAIL)
5247 po_imm_or_fail (0, 31);
5248 inst.base.operands[i].prfop = aarch64_prfops + val;
5249 break;
5250
5251 default:
5252 as_fatal (_("unhandled operand code %d"), operands[i]);
5253 }
5254
5255 /* If we get here, this operand was successfully parsed. */
5256 inst.base.operands[i].present = 1;
5257 continue;
5258
5259 failure:
5260 /* The parse routine should already have set the error, but in case
5261 not, set a default one here. */
5262 if (! error_p ())
5263 set_default_error ();
5264
5265 if (! backtrack_pos)
5266 goto parse_operands_return;
5267
5268 /* Reaching here means we are dealing with an optional operand that is
5269 omitted from the assembly line. */
5270 gas_assert (optional_operand_p (opcode, i));
5271 info->present = 0;
5272 process_omitted_operand (operands[i], opcode, i, info);
5273
5274 /* Try again, skipping the optional operand at backtrack_pos. */
5275 str = backtrack_pos;
5276 backtrack_pos = 0;
5277
5278 /* If this is the last operand that is optional and omitted, but without
5279 the presence of a comma. */
5280 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5281 {
5282 set_fatal_syntax_error
5283 (_("unexpected comma before the omitted optional operand"));
5284 goto parse_operands_return;
5285 }
5286
5287 /* Clear any error record after the omitted optional operand has been
5288 successfully handled. */
5289 clear_error ();
5290 }
5291
5292 /* Check if we have parsed all the operands. */
5293 if (*str != '\0' && ! error_p ())
5294 {
5295 /* Set I to the index of the last present operand; this is
5296 for the purpose of diagnostics. */
5297 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5298 ;
5299 set_fatal_syntax_error
5300 (_("unexpected characters following instruction"));
5301 }
5302
5303 parse_operands_return:
5304
5305 if (error_p ())
5306 {
5307 DEBUG_TRACE ("parsing FAIL: %s - %s",
5308 operand_mismatch_kind_names[get_error_kind ()],
5309 get_error_message ());
5310 /* Record the operand error properly; this is useful when there
5311 are multiple instruction templates for a mnemonic name, so that
5312 later on, we can select the error that most closely describes
5313 the problem. */
5314 record_operand_error (opcode, i, get_error_kind (),
5315 get_error_message ());
5316 return FALSE;
5317 }
5318 else
5319 {
5320 DEBUG_TRACE ("parsing SUCCESS");
5321 return TRUE;
5322 }
5323 }
5324
5325 /* It does some fix-up to provide some programmer friendly feature while
5326 keeping the libopcodes happy, i.e. libopcodes only accepts
5327 the preferred architectural syntax.
5328 Return FALSE if there is any failure; otherwise return TRUE. */
5329
5330 static bfd_boolean
5331 programmer_friendly_fixup (aarch64_instruction *instr)
5332 {
5333 aarch64_inst *base = &instr->base;
5334 const aarch64_opcode *opcode = base->opcode;
5335 enum aarch64_op op = opcode->op;
5336 aarch64_opnd_info *operands = base->operands;
5337
5338 DEBUG_TRACE ("enter");
5339
5340 switch (opcode->iclass)
5341 {
5342 case testbranch:
5343 /* TBNZ Xn|Wn, #uimm6, label
5344 Test and Branch Not Zero: conditionally jumps to label if bit number
5345 uimm6 in register Xn is not zero. The bit number implies the width of
5346 the register, which may be written and should be disassembled as Wn if
5347 uimm is less than 32. */
5348 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5349 {
5350 if (operands[1].imm.value >= 32)
5351 {
5352 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5353 0, 31);
5354 return FALSE;
5355 }
5356 operands[0].qualifier = AARCH64_OPND_QLF_X;
5357 }
5358 break;
5359 case loadlit:
5360 /* LDR Wt, label | =value
5361 As a convenience assemblers will typically permit the notation
5362 "=value" in conjunction with the pc-relative literal load instructions
5363 to automatically place an immediate value or symbolic address in a
5364 nearby literal pool and generate a hidden label which references it.
5365 ISREG has been set to 0 in the case of =value. */
5366 if (instr->gen_lit_pool
5367 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5368 {
5369 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5370 if (op == OP_LDRSW_LIT)
5371 size = 4;
5372 if (instr->reloc.exp.X_op != O_constant
5373 && instr->reloc.exp.X_op != O_big
5374 && instr->reloc.exp.X_op != O_symbol)
5375 {
5376 record_operand_error (opcode, 1,
5377 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5378 _("constant expression expected"));
5379 return FALSE;
5380 }
5381 if (! add_to_lit_pool (&instr->reloc.exp, size))
5382 {
5383 record_operand_error (opcode, 1,
5384 AARCH64_OPDE_OTHER_ERROR,
5385 _("literal pool insertion failed"));
5386 return FALSE;
5387 }
5388 }
5389 break;
5390 case log_shift:
5391 case bitfield:
5392 /* UXT[BHW] Wd, Wn
5393 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5394 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5395 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5396 A programmer-friendly assembler should accept a destination Xd in
5397 place of Wd, however that is not the preferred form for disassembly.
5398 */
5399 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5400 && operands[1].qualifier == AARCH64_OPND_QLF_W
5401 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5402 operands[0].qualifier = AARCH64_OPND_QLF_W;
5403 break;
5404
5405 case addsub_ext:
5406 {
5407 /* In the 64-bit form, the final register operand is written as Wm
5408 for all but the (possibly omitted) UXTX/LSL and SXTX
5409 operators.
5410 As a programmer-friendly assembler, we accept e.g.
5411 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5412 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5413 int idx = aarch64_operand_index (opcode->operands,
5414 AARCH64_OPND_Rm_EXT);
5415 gas_assert (idx == 1 || idx == 2);
5416 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5417 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5418 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5419 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5420 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5421 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5422 }
5423 break;
5424
5425 default:
5426 break;
5427 }
5428
5429 DEBUG_TRACE ("exit with SUCCESS");
5430 return TRUE;
5431 }
5432
5433 /* A wrapper function to interface with libopcodes on encoding and
5434 record the error message if there is any.
5435
5436 Return TRUE on success; otherwise return FALSE. */
5437
5438 static bfd_boolean
5439 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5440 aarch64_insn *code)
5441 {
5442 aarch64_operand_error error_info;
5443 error_info.kind = AARCH64_OPDE_NIL;
5444 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5445 return TRUE;
5446 else
5447 {
5448 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5449 record_operand_error_info (opcode, &error_info);
5450 return FALSE;
5451 }
5452 }
5453
5454 #ifdef DEBUG_AARCH64
5455 static inline void
5456 dump_opcode_operands (const aarch64_opcode *opcode)
5457 {
5458 int i = 0;
5459 while (opcode->operands[i] != AARCH64_OPND_NIL)
5460 {
5461 aarch64_verbose ("\t\t opnd%d: %s", i,
5462 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5463 ? aarch64_get_operand_name (opcode->operands[i])
5464 : aarch64_get_operand_desc (opcode->operands[i]));
5465 ++i;
5466 }
5467 }
5468 #endif /* DEBUG_AARCH64 */
5469
5470 /* This is the guts of the machine-dependent assembler. STR points to a
5471 machine dependent instruction. This function is supposed to emit
5472 the frags/bytes it assembles to. */
5473
5474 void
5475 md_assemble (char *str)
5476 {
5477 char *p = str;
5478 templates *template;
5479 aarch64_opcode *opcode;
5480 aarch64_inst *inst_base;
5481 unsigned saved_cond;
5482
5483 /* Align the previous label if needed. */
5484 if (last_label_seen != NULL)
5485 {
5486 symbol_set_frag (last_label_seen, frag_now);
5487 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5488 S_SET_SEGMENT (last_label_seen, now_seg);
5489 }
5490
5491 inst.reloc.type = BFD_RELOC_UNUSED;
5492
5493 DEBUG_TRACE ("\n\n");
5494 DEBUG_TRACE ("==============================");
5495 DEBUG_TRACE ("Enter md_assemble with %s", str);
5496
5497 template = opcode_lookup (&p);
5498 if (!template)
5499 {
5500 /* It wasn't an instruction, but it might be a register alias of
5501 the form alias .req reg directive. */
5502 if (!create_register_alias (str, p))
5503 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5504 str);
5505 return;
5506 }
5507
5508 skip_whitespace (p);
5509 if (*p == ',')
5510 {
5511 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5512 get_mnemonic_name (str), str);
5513 return;
5514 }
5515
5516 init_operand_error_report ();
5517
5518 saved_cond = inst.cond;
5519 reset_aarch64_instruction (&inst);
5520 inst.cond = saved_cond;
5521
5522 /* Iterate through all opcode entries with the same mnemonic name. */
5523 do
5524 {
5525 opcode = template->opcode;
5526
5527 DEBUG_TRACE ("opcode %s found", opcode->name);
5528 #ifdef DEBUG_AARCH64
5529 if (debug_dump)
5530 dump_opcode_operands (opcode);
5531 #endif /* DEBUG_AARCH64 */
5532
5533 mapping_state (MAP_INSN);
5534
5535 inst_base = &inst.base;
5536 inst_base->opcode = opcode;
5537
5538 /* Truly conditionally executed instructions, e.g. b.cond. */
5539 if (opcode->flags & F_COND)
5540 {
5541 gas_assert (inst.cond != COND_ALWAYS);
5542 inst_base->cond = get_cond_from_value (inst.cond);
5543 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5544 }
5545 else if (inst.cond != COND_ALWAYS)
5546 {
5547 /* It shouldn't arrive here, where the assembly looks like a
5548 conditional instruction but the found opcode is unconditional. */
5549 gas_assert (0);
5550 continue;
5551 }
5552
5553 if (parse_operands (p, opcode)
5554 && programmer_friendly_fixup (&inst)
5555 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5556 {
5557 /* Check that this instruction is supported for this CPU. */
5558 if (!opcode->avariant
5559 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5560 {
5561 as_bad (_("selected processor does not support `%s'"), str);
5562 return;
5563 }
5564
5565 if (inst.reloc.type == BFD_RELOC_UNUSED
5566 || !inst.reloc.need_libopcodes_p)
5567 output_inst (NULL);
5568 else
5569 {
5570 /* If there is relocation generated for the instruction,
5571 store the instruction information for the future fix-up. */
5572 struct aarch64_inst *copy;
5573 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5574 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5575 abort ();
5576 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5577 output_inst (copy);
5578 }
5579 return;
5580 }
5581
5582 template = template->next;
5583 if (template != NULL)
5584 {
5585 reset_aarch64_instruction (&inst);
5586 inst.cond = saved_cond;
5587 }
5588 }
5589 while (template != NULL);
5590
5591 /* Issue the error messages if any. */
5592 output_operand_error_report (str);
5593 }
5594
5595 /* Various frobbings of labels and their addresses. */
5596
5597 void
5598 aarch64_start_line_hook (void)
5599 {
5600 last_label_seen = NULL;
5601 }
5602
5603 void
5604 aarch64_frob_label (symbolS * sym)
5605 {
5606 last_label_seen = sym;
5607
5608 dwarf2_emit_label (sym);
5609 }
5610
5611 int
5612 aarch64_data_in_code (void)
5613 {
5614 if (!strncmp (input_line_pointer + 1, "data:", 5))
5615 {
5616 *input_line_pointer = '/';
5617 input_line_pointer += 5;
5618 *input_line_pointer = 0;
5619 return 1;
5620 }
5621
5622 return 0;
5623 }
5624
5625 char *
5626 aarch64_canonicalize_symbol_name (char *name)
5627 {
5628 int len;
5629
5630 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5631 *(name + len - 5) = 0;
5632
5633 return name;
5634 }
5635 \f
5636 /* Table of all register names defined by default. The user can
5637 define additional names with .req. Note that all register names
5638 should appear in both upper and lowercase variants. Some registers
5639 also have mixed-case names. */
5640
5641 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5642 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5643 #define REGSET31(p,t) \
5644 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5645 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5646 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5647 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5648 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5649 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5650 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5651 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5652 #define REGSET(p,t) \
5653 REGSET31(p,t), REGNUM(p,31,t)
5654
5655 /* These go into aarch64_reg_hsh hash-table. */
5656 static const reg_entry reg_names[] = {
5657 /* Integer registers. */
5658 REGSET31 (x, R_64), REGSET31 (X, R_64),
5659 REGSET31 (w, R_32), REGSET31 (W, R_32),
5660
5661 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5662 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5663
5664 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5665 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5666
5667 /* Coprocessor register numbers. */
5668 REGSET (c, CN), REGSET (C, CN),
5669
5670 /* Floating-point single precision registers. */
5671 REGSET (s, FP_S), REGSET (S, FP_S),
5672
5673 /* Floating-point double precision registers. */
5674 REGSET (d, FP_D), REGSET (D, FP_D),
5675
5676 /* Floating-point half precision registers. */
5677 REGSET (h, FP_H), REGSET (H, FP_H),
5678
5679 /* Floating-point byte precision registers. */
5680 REGSET (b, FP_B), REGSET (B, FP_B),
5681
5682 /* Floating-point quad precision registers. */
5683 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5684
5685 /* FP/SIMD registers. */
5686 REGSET (v, VN), REGSET (V, VN),
5687 };
5688
5689 #undef REGDEF
5690 #undef REGNUM
5691 #undef REGSET
5692
5693 #define N 1
5694 #define n 0
5695 #define Z 1
5696 #define z 0
5697 #define C 1
5698 #define c 0
5699 #define V 1
5700 #define v 0
5701 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5702 static const asm_nzcv nzcv_names[] = {
5703 {"nzcv", B (n, z, c, v)},
5704 {"nzcV", B (n, z, c, V)},
5705 {"nzCv", B (n, z, C, v)},
5706 {"nzCV", B (n, z, C, V)},
5707 {"nZcv", B (n, Z, c, v)},
5708 {"nZcV", B (n, Z, c, V)},
5709 {"nZCv", B (n, Z, C, v)},
5710 {"nZCV", B (n, Z, C, V)},
5711 {"Nzcv", B (N, z, c, v)},
5712 {"NzcV", B (N, z, c, V)},
5713 {"NzCv", B (N, z, C, v)},
5714 {"NzCV", B (N, z, C, V)},
5715 {"NZcv", B (N, Z, c, v)},
5716 {"NZcV", B (N, Z, c, V)},
5717 {"NZCv", B (N, Z, C, v)},
5718 {"NZCV", B (N, Z, C, V)}
5719 };
5720
5721 #undef N
5722 #undef n
5723 #undef Z
5724 #undef z
5725 #undef C
5726 #undef c
5727 #undef V
5728 #undef v
5729 #undef B
5730 \f
5731 /* MD interface: bits in the object file. */
5732
5733 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5734 for use in the a.out file, and stores them in the array pointed to by buf.
5735 This knows about the endian-ness of the target machine and does
5736 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5737 2 (short) and 4 (long) Floating numbers are put out as a series of
5738 LITTLENUMS (shorts, here at least). */
5739
5740 void
5741 md_number_to_chars (char *buf, valueT val, int n)
5742 {
5743 if (target_big_endian)
5744 number_to_chars_bigendian (buf, val, n);
5745 else
5746 number_to_chars_littleendian (buf, val, n);
5747 }
5748
5749 /* MD interface: Sections. */
5750
5751 /* Estimate the size of a frag before relaxing. Assume everything fits in
5752 4 bytes. */
5753
5754 int
5755 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5756 {
5757 fragp->fr_var = 4;
5758 return 4;
5759 }
5760
5761 /* Round up a section size to the appropriate boundary. */
5762
5763 valueT
5764 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5765 {
5766 return size;
5767 }
5768
5769 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5770 of an rs_align_code fragment. */
5771
5772 void
5773 aarch64_handle_align (fragS * fragP)
5774 {
5775 /* NOP = d503201f */
5776 /* AArch64 instructions are always little-endian. */
5777 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5778
5779 int bytes, fix, noop_size;
5780 char *p;
5781 const char *noop;
5782
5783 if (fragP->fr_type != rs_align_code)
5784 return;
5785
5786 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5787 p = fragP->fr_literal + fragP->fr_fix;
5788 fix = 0;
5789
5790 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5791 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5792
5793 #ifdef OBJ_ELF
5794 gas_assert (fragP->tc_frag_data.recorded);
5795 #endif
5796
5797 noop = aarch64_noop;
5798 noop_size = sizeof (aarch64_noop);
5799 fragP->fr_var = noop_size;
5800
5801 if (bytes & (noop_size - 1))
5802 {
5803 fix = bytes & (noop_size - 1);
5804 #ifdef OBJ_ELF
5805 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5806 #endif
5807 memset (p, 0, fix);
5808 p += fix;
5809 bytes -= fix;
5810 }
5811
5812 while (bytes >= noop_size)
5813 {
5814 memcpy (p, noop, noop_size);
5815 p += noop_size;
5816 bytes -= noop_size;
5817 fix += noop_size;
5818 }
5819
5820 fragP->fr_fix += fix;
5821 }
5822
5823 /* Called from md_do_align. Used to create an alignment
5824 frag in a code section. */
5825
5826 void
5827 aarch64_frag_align_code (int n, int max)
5828 {
5829 char *p;
5830
5831 /* We assume that there will never be a requirement
5832 to support alignments greater than x bytes. */
5833 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5834 as_fatal (_
5835 ("alignments greater than %d bytes not supported in .text sections"),
5836 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5837
5838 p = frag_var (rs_align_code,
5839 MAX_MEM_FOR_RS_ALIGN_CODE,
5840 1,
5841 (relax_substateT) max,
5842 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5843 *p = 0;
5844 }
5845
5846 /* Perform target specific initialisation of a frag.
5847 Note - despite the name this initialisation is not done when the frag
5848 is created, but only when its type is assigned. A frag can be created
5849 and used a long time before its type is set, so beware of assuming that
5850 this initialisationis performed first. */
5851
5852 #ifndef OBJ_ELF
5853 void
5854 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5855 int max_chars ATTRIBUTE_UNUSED)
5856 {
5857 }
5858
5859 #else /* OBJ_ELF is defined. */
5860 void
5861 aarch64_init_frag (fragS * fragP, int max_chars)
5862 {
5863 /* Record a mapping symbol for alignment frags. We will delete this
5864 later if the alignment ends up empty. */
5865 if (!fragP->tc_frag_data.recorded)
5866 {
5867 fragP->tc_frag_data.recorded = 1;
5868 switch (fragP->fr_type)
5869 {
5870 case rs_align:
5871 case rs_align_test:
5872 case rs_fill:
5873 mapping_state_2 (MAP_DATA, max_chars);
5874 break;
5875 case rs_align_code:
5876 mapping_state_2 (MAP_INSN, max_chars);
5877 break;
5878 default:
5879 break;
5880 }
5881 }
5882 }
5883 \f
5884 /* Initialize the DWARF-2 unwind information for this procedure. */
5885
5886 void
5887 tc_aarch64_frame_initial_instructions (void)
5888 {
5889 cfi_add_CFA_def_cfa (REG_SP, 0);
5890 }
5891 #endif /* OBJ_ELF */
5892
5893 /* Convert REGNAME to a DWARF-2 register number. */
5894
5895 int
5896 tc_aarch64_regname_to_dw2regnum (char *regname)
5897 {
5898 const reg_entry *reg = parse_reg (&regname);
5899 if (reg == NULL)
5900 return -1;
5901
5902 switch (reg->type)
5903 {
5904 case REG_TYPE_SP_32:
5905 case REG_TYPE_SP_64:
5906 case REG_TYPE_R_32:
5907 case REG_TYPE_R_64:
5908 case REG_TYPE_FP_B:
5909 case REG_TYPE_FP_H:
5910 case REG_TYPE_FP_S:
5911 case REG_TYPE_FP_D:
5912 case REG_TYPE_FP_Q:
5913 return reg->number;
5914 default:
5915 break;
5916 }
5917 return -1;
5918 }
5919
5920 /* Implement DWARF2_ADDR_SIZE. */
5921
5922 int
5923 aarch64_dwarf2_addr_size (void)
5924 {
5925 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5926 if (ilp32_p)
5927 return 4;
5928 #endif
5929 return bfd_arch_bits_per_address (stdoutput) / 8;
5930 }
5931
5932 /* MD interface: Symbol and relocation handling. */
5933
5934 /* Return the address within the segment that a PC-relative fixup is
5935 relative to. For AArch64 PC-relative fixups applied to instructions
5936 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5937
5938 long
5939 md_pcrel_from_section (fixS * fixP, segT seg)
5940 {
5941 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5942
5943 /* If this is pc-relative and we are going to emit a relocation
5944 then we just want to put out any pipeline compensation that the linker
5945 will need. Otherwise we want to use the calculated base. */
5946 if (fixP->fx_pcrel
5947 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5948 || aarch64_force_relocation (fixP)))
5949 base = 0;
5950
5951 /* AArch64 should be consistent for all pc-relative relocations. */
5952 return base + AARCH64_PCREL_OFFSET;
5953 }
5954
5955 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5956 Otherwise we have no need to default values of symbols. */
5957
5958 symbolS *
5959 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5960 {
5961 #ifdef OBJ_ELF
5962 if (name[0] == '_' && name[1] == 'G'
5963 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5964 {
5965 if (!GOT_symbol)
5966 {
5967 if (symbol_find (name))
5968 as_bad (_("GOT already in the symbol table"));
5969
5970 GOT_symbol = symbol_new (name, undefined_section,
5971 (valueT) 0, &zero_address_frag);
5972 }
5973
5974 return GOT_symbol;
5975 }
5976 #endif
5977
5978 return 0;
5979 }
5980
5981 /* Return non-zero if the indicated VALUE has overflowed the maximum
5982 range expressible by a unsigned number with the indicated number of
5983 BITS. */
5984
5985 static bfd_boolean
5986 unsigned_overflow (valueT value, unsigned bits)
5987 {
5988 valueT lim;
5989 if (bits >= sizeof (valueT) * 8)
5990 return FALSE;
5991 lim = (valueT) 1 << bits;
5992 return (value >= lim);
5993 }
5994
5995
5996 /* Return non-zero if the indicated VALUE has overflowed the maximum
5997 range expressible by an signed number with the indicated number of
5998 BITS. */
5999
6000 static bfd_boolean
6001 signed_overflow (offsetT value, unsigned bits)
6002 {
6003 offsetT lim;
6004 if (bits >= sizeof (offsetT) * 8)
6005 return FALSE;
6006 lim = (offsetT) 1 << (bits - 1);
6007 return (value < -lim || value >= lim);
6008 }
6009
6010 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
6011 unsigned immediate offset load/store instruction, try to encode it as
6012 an unscaled, 9-bit, signed immediate offset load/store instruction.
6013 Return TRUE if it is successful; otherwise return FALSE.
6014
6015 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6016 in response to the standard LDR/STR mnemonics when the immediate offset is
6017 unambiguous, i.e. when it is negative or unaligned. */
6018
6019 static bfd_boolean
6020 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6021 {
6022 int idx;
6023 enum aarch64_op new_op;
6024 const aarch64_opcode *new_opcode;
6025
6026 gas_assert (instr->opcode->iclass == ldst_pos);
6027
6028 switch (instr->opcode->op)
6029 {
6030 case OP_LDRB_POS:new_op = OP_LDURB; break;
6031 case OP_STRB_POS: new_op = OP_STURB; break;
6032 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6033 case OP_LDRH_POS: new_op = OP_LDURH; break;
6034 case OP_STRH_POS: new_op = OP_STURH; break;
6035 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6036 case OP_LDR_POS: new_op = OP_LDUR; break;
6037 case OP_STR_POS: new_op = OP_STUR; break;
6038 case OP_LDRF_POS: new_op = OP_LDURV; break;
6039 case OP_STRF_POS: new_op = OP_STURV; break;
6040 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6041 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6042 default: new_op = OP_NIL; break;
6043 }
6044
6045 if (new_op == OP_NIL)
6046 return FALSE;
6047
6048 new_opcode = aarch64_get_opcode (new_op);
6049 gas_assert (new_opcode != NULL);
6050
6051 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6052 instr->opcode->op, new_opcode->op);
6053
6054 aarch64_replace_opcode (instr, new_opcode);
6055
6056 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6057 qualifier matching may fail because the out-of-date qualifier will
6058 prevent the operand being updated with a new and correct qualifier. */
6059 idx = aarch64_operand_index (instr->opcode->operands,
6060 AARCH64_OPND_ADDR_SIMM9);
6061 gas_assert (idx == 1);
6062 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6063
6064 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6065
6066 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6067 return FALSE;
6068
6069 return TRUE;
6070 }
6071
6072 /* Called by fix_insn to fix a MOV immediate alias instruction.
6073
6074 Operand for a generic move immediate instruction, which is an alias
6075 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6076 a 32-bit/64-bit immediate value into general register. An assembler error
6077 shall result if the immediate cannot be created by a single one of these
6078 instructions. If there is a choice, then to ensure reversability an
6079 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6080
6081 static void
6082 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6083 {
6084 const aarch64_opcode *opcode;
6085
6086 /* Need to check if the destination is SP/ZR. The check has to be done
6087 before any aarch64_replace_opcode. */
6088 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6089 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6090
6091 instr->operands[1].imm.value = value;
6092 instr->operands[1].skip = 0;
6093
6094 if (try_mov_wide_p)
6095 {
6096 /* Try the MOVZ alias. */
6097 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6098 aarch64_replace_opcode (instr, opcode);
6099 if (aarch64_opcode_encode (instr->opcode, instr,
6100 &instr->value, NULL, NULL))
6101 {
6102 put_aarch64_insn (buf, instr->value);
6103 return;
6104 }
6105 /* Try the MOVK alias. */
6106 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6107 aarch64_replace_opcode (instr, opcode);
6108 if (aarch64_opcode_encode (instr->opcode, instr,
6109 &instr->value, NULL, NULL))
6110 {
6111 put_aarch64_insn (buf, instr->value);
6112 return;
6113 }
6114 }
6115
6116 if (try_mov_bitmask_p)
6117 {
6118 /* Try the ORR alias. */
6119 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6120 aarch64_replace_opcode (instr, opcode);
6121 if (aarch64_opcode_encode (instr->opcode, instr,
6122 &instr->value, NULL, NULL))
6123 {
6124 put_aarch64_insn (buf, instr->value);
6125 return;
6126 }
6127 }
6128
6129 as_bad_where (fixP->fx_file, fixP->fx_line,
6130 _("immediate cannot be moved by a single instruction"));
6131 }
6132
6133 /* An instruction operand which is immediate related may have symbol used
6134 in the assembly, e.g.
6135
6136 mov w0, u32
6137 .set u32, 0x00ffff00
6138
6139 At the time when the assembly instruction is parsed, a referenced symbol,
6140 like 'u32' in the above example may not have been seen; a fixS is created
6141 in such a case and is handled here after symbols have been resolved.
6142 Instruction is fixed up with VALUE using the information in *FIXP plus
6143 extra information in FLAGS.
6144
6145 This function is called by md_apply_fix to fix up instructions that need
6146 a fix-up described above but does not involve any linker-time relocation. */
6147
6148 static void
6149 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6150 {
6151 int idx;
6152 uint32_t insn;
6153 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6154 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6155 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6156
6157 if (new_inst)
6158 {
6159 /* Now the instruction is about to be fixed-up, so the operand that
6160 was previously marked as 'ignored' needs to be unmarked in order
6161 to get the encoding done properly. */
6162 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6163 new_inst->operands[idx].skip = 0;
6164 }
6165
6166 gas_assert (opnd != AARCH64_OPND_NIL);
6167
6168 switch (opnd)
6169 {
6170 case AARCH64_OPND_EXCEPTION:
6171 if (unsigned_overflow (value, 16))
6172 as_bad_where (fixP->fx_file, fixP->fx_line,
6173 _("immediate out of range"));
6174 insn = get_aarch64_insn (buf);
6175 insn |= encode_svc_imm (value);
6176 put_aarch64_insn (buf, insn);
6177 break;
6178
6179 case AARCH64_OPND_AIMM:
6180 /* ADD or SUB with immediate.
6181 NOTE this assumes we come here with a add/sub shifted reg encoding
6182 3 322|2222|2 2 2 21111 111111
6183 1 098|7654|3 2 1 09876 543210 98765 43210
6184 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6185 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6186 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6187 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6188 ->
6189 3 322|2222|2 2 221111111111
6190 1 098|7654|3 2 109876543210 98765 43210
6191 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6192 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6193 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6194 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6195 Fields sf Rn Rd are already set. */
6196 insn = get_aarch64_insn (buf);
6197 if (value < 0)
6198 {
6199 /* Add <-> sub. */
6200 insn = reencode_addsub_switch_add_sub (insn);
6201 value = -value;
6202 }
6203
6204 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6205 && unsigned_overflow (value, 12))
6206 {
6207 /* Try to shift the value by 12 to make it fit. */
6208 if (((value >> 12) << 12) == value
6209 && ! unsigned_overflow (value, 12 + 12))
6210 {
6211 value >>= 12;
6212 insn |= encode_addsub_imm_shift_amount (1);
6213 }
6214 }
6215
6216 if (unsigned_overflow (value, 12))
6217 as_bad_where (fixP->fx_file, fixP->fx_line,
6218 _("immediate out of range"));
6219
6220 insn |= encode_addsub_imm (value);
6221
6222 put_aarch64_insn (buf, insn);
6223 break;
6224
6225 case AARCH64_OPND_SIMD_IMM:
6226 case AARCH64_OPND_SIMD_IMM_SFT:
6227 case AARCH64_OPND_LIMM:
6228 /* Bit mask immediate. */
6229 gas_assert (new_inst != NULL);
6230 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6231 new_inst->operands[idx].imm.value = value;
6232 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6233 &new_inst->value, NULL, NULL))
6234 put_aarch64_insn (buf, new_inst->value);
6235 else
6236 as_bad_where (fixP->fx_file, fixP->fx_line,
6237 _("invalid immediate"));
6238 break;
6239
6240 case AARCH64_OPND_HALF:
6241 /* 16-bit unsigned immediate. */
6242 if (unsigned_overflow (value, 16))
6243 as_bad_where (fixP->fx_file, fixP->fx_line,
6244 _("immediate out of range"));
6245 insn = get_aarch64_insn (buf);
6246 insn |= encode_movw_imm (value & 0xffff);
6247 put_aarch64_insn (buf, insn);
6248 break;
6249
6250 case AARCH64_OPND_IMM_MOV:
6251 /* Operand for a generic move immediate instruction, which is
6252 an alias instruction that generates a single MOVZ, MOVN or ORR
6253 instruction to loads a 32-bit/64-bit immediate value into general
6254 register. An assembler error shall result if the immediate cannot be
6255 created by a single one of these instructions. If there is a choice,
6256 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6257 and MOVZ or MOVN to ORR. */
6258 gas_assert (new_inst != NULL);
6259 fix_mov_imm_insn (fixP, buf, new_inst, value);
6260 break;
6261
6262 case AARCH64_OPND_ADDR_SIMM7:
6263 case AARCH64_OPND_ADDR_SIMM9:
6264 case AARCH64_OPND_ADDR_SIMM9_2:
6265 case AARCH64_OPND_ADDR_UIMM12:
6266 /* Immediate offset in an address. */
6267 insn = get_aarch64_insn (buf);
6268
6269 gas_assert (new_inst != NULL && new_inst->value == insn);
6270 gas_assert (new_inst->opcode->operands[1] == opnd
6271 || new_inst->opcode->operands[2] == opnd);
6272
6273 /* Get the index of the address operand. */
6274 if (new_inst->opcode->operands[1] == opnd)
6275 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6276 idx = 1;
6277 else
6278 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6279 idx = 2;
6280
6281 /* Update the resolved offset value. */
6282 new_inst->operands[idx].addr.offset.imm = value;
6283
6284 /* Encode/fix-up. */
6285 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6286 &new_inst->value, NULL, NULL))
6287 {
6288 put_aarch64_insn (buf, new_inst->value);
6289 break;
6290 }
6291 else if (new_inst->opcode->iclass == ldst_pos
6292 && try_to_encode_as_unscaled_ldst (new_inst))
6293 {
6294 put_aarch64_insn (buf, new_inst->value);
6295 break;
6296 }
6297
6298 as_bad_where (fixP->fx_file, fixP->fx_line,
6299 _("immediate offset out of range"));
6300 break;
6301
6302 default:
6303 gas_assert (0);
6304 as_fatal (_("unhandled operand code %d"), opnd);
6305 }
6306 }
6307
6308 /* Apply a fixup (fixP) to segment data, once it has been determined
6309 by our caller that we have all the info we need to fix it up.
6310
6311 Parameter valP is the pointer to the value of the bits. */
6312
6313 void
6314 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6315 {
6316 offsetT value = *valP;
6317 uint32_t insn;
6318 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6319 int scale;
6320 unsigned flags = fixP->fx_addnumber;
6321
6322 DEBUG_TRACE ("\n\n");
6323 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6324 DEBUG_TRACE ("Enter md_apply_fix");
6325
6326 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6327
6328 /* Note whether this will delete the relocation. */
6329
6330 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6331 fixP->fx_done = 1;
6332
6333 /* Process the relocations. */
6334 switch (fixP->fx_r_type)
6335 {
6336 case BFD_RELOC_NONE:
6337 /* This will need to go in the object file. */
6338 fixP->fx_done = 0;
6339 break;
6340
6341 case BFD_RELOC_8:
6342 case BFD_RELOC_8_PCREL:
6343 if (fixP->fx_done || !seg->use_rela_p)
6344 md_number_to_chars (buf, value, 1);
6345 break;
6346
6347 case BFD_RELOC_16:
6348 case BFD_RELOC_16_PCREL:
6349 if (fixP->fx_done || !seg->use_rela_p)
6350 md_number_to_chars (buf, value, 2);
6351 break;
6352
6353 case BFD_RELOC_32:
6354 case BFD_RELOC_32_PCREL:
6355 if (fixP->fx_done || !seg->use_rela_p)
6356 md_number_to_chars (buf, value, 4);
6357 break;
6358
6359 case BFD_RELOC_64:
6360 case BFD_RELOC_64_PCREL:
6361 if (fixP->fx_done || !seg->use_rela_p)
6362 md_number_to_chars (buf, value, 8);
6363 break;
6364
6365 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6366 /* We claim that these fixups have been processed here, even if
6367 in fact we generate an error because we do not have a reloc
6368 for them, so tc_gen_reloc() will reject them. */
6369 fixP->fx_done = 1;
6370 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6371 {
6372 as_bad_where (fixP->fx_file, fixP->fx_line,
6373 _("undefined symbol %s used as an immediate value"),
6374 S_GET_NAME (fixP->fx_addsy));
6375 goto apply_fix_return;
6376 }
6377 fix_insn (fixP, flags, value);
6378 break;
6379
6380 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6381 if (fixP->fx_done || !seg->use_rela_p)
6382 {
6383 if (value & 3)
6384 as_bad_where (fixP->fx_file, fixP->fx_line,
6385 _("pc-relative load offset not word aligned"));
6386 if (signed_overflow (value, 21))
6387 as_bad_where (fixP->fx_file, fixP->fx_line,
6388 _("pc-relative load offset out of range"));
6389 insn = get_aarch64_insn (buf);
6390 insn |= encode_ld_lit_ofs_19 (value >> 2);
6391 put_aarch64_insn (buf, insn);
6392 }
6393 break;
6394
6395 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6396 if (fixP->fx_done || !seg->use_rela_p)
6397 {
6398 if (signed_overflow (value, 21))
6399 as_bad_where (fixP->fx_file, fixP->fx_line,
6400 _("pc-relative address offset out of range"));
6401 insn = get_aarch64_insn (buf);
6402 insn |= encode_adr_imm (value);
6403 put_aarch64_insn (buf, insn);
6404 }
6405 break;
6406
6407 case BFD_RELOC_AARCH64_BRANCH19:
6408 if (fixP->fx_done || !seg->use_rela_p)
6409 {
6410 if (value & 3)
6411 as_bad_where (fixP->fx_file, fixP->fx_line,
6412 _("conditional branch target not word aligned"));
6413 if (signed_overflow (value, 21))
6414 as_bad_where (fixP->fx_file, fixP->fx_line,
6415 _("conditional branch out of range"));
6416 insn = get_aarch64_insn (buf);
6417 insn |= encode_cond_branch_ofs_19 (value >> 2);
6418 put_aarch64_insn (buf, insn);
6419 }
6420 break;
6421
6422 case BFD_RELOC_AARCH64_TSTBR14:
6423 if (fixP->fx_done || !seg->use_rela_p)
6424 {
6425 if (value & 3)
6426 as_bad_where (fixP->fx_file, fixP->fx_line,
6427 _("conditional branch target not word aligned"));
6428 if (signed_overflow (value, 16))
6429 as_bad_where (fixP->fx_file, fixP->fx_line,
6430 _("conditional branch out of range"));
6431 insn = get_aarch64_insn (buf);
6432 insn |= encode_tst_branch_ofs_14 (value >> 2);
6433 put_aarch64_insn (buf, insn);
6434 }
6435 break;
6436
6437 case BFD_RELOC_AARCH64_JUMP26:
6438 case BFD_RELOC_AARCH64_CALL26:
6439 if (fixP->fx_done || !seg->use_rela_p)
6440 {
6441 if (value & 3)
6442 as_bad_where (fixP->fx_file, fixP->fx_line,
6443 _("branch target not word aligned"));
6444 if (signed_overflow (value, 28))
6445 as_bad_where (fixP->fx_file, fixP->fx_line,
6446 _("branch out of range"));
6447 insn = get_aarch64_insn (buf);
6448 insn |= encode_branch_ofs_26 (value >> 2);
6449 put_aarch64_insn (buf, insn);
6450 }
6451 break;
6452
6453 case BFD_RELOC_AARCH64_MOVW_G0:
6454 case BFD_RELOC_AARCH64_MOVW_G0_S:
6455 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6456 scale = 0;
6457 goto movw_common;
6458 case BFD_RELOC_AARCH64_MOVW_G1:
6459 case BFD_RELOC_AARCH64_MOVW_G1_S:
6460 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6461 scale = 16;
6462 goto movw_common;
6463 case BFD_RELOC_AARCH64_MOVW_G2:
6464 case BFD_RELOC_AARCH64_MOVW_G2_S:
6465 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6466 scale = 32;
6467 goto movw_common;
6468 case BFD_RELOC_AARCH64_MOVW_G3:
6469 scale = 48;
6470 movw_common:
6471 if (fixP->fx_done || !seg->use_rela_p)
6472 {
6473 insn = get_aarch64_insn (buf);
6474
6475 if (!fixP->fx_done)
6476 {
6477 /* REL signed addend must fit in 16 bits */
6478 if (signed_overflow (value, 16))
6479 as_bad_where (fixP->fx_file, fixP->fx_line,
6480 _("offset out of range"));
6481 }
6482 else
6483 {
6484 /* Check for overflow and scale. */
6485 switch (fixP->fx_r_type)
6486 {
6487 case BFD_RELOC_AARCH64_MOVW_G0:
6488 case BFD_RELOC_AARCH64_MOVW_G1:
6489 case BFD_RELOC_AARCH64_MOVW_G2:
6490 case BFD_RELOC_AARCH64_MOVW_G3:
6491 if (unsigned_overflow (value, scale + 16))
6492 as_bad_where (fixP->fx_file, fixP->fx_line,
6493 _("unsigned value out of range"));
6494 break;
6495 case BFD_RELOC_AARCH64_MOVW_G0_S:
6496 case BFD_RELOC_AARCH64_MOVW_G1_S:
6497 case BFD_RELOC_AARCH64_MOVW_G2_S:
6498 /* NOTE: We can only come here with movz or movn. */
6499 if (signed_overflow (value, scale + 16))
6500 as_bad_where (fixP->fx_file, fixP->fx_line,
6501 _("signed value out of range"));
6502 if (value < 0)
6503 {
6504 /* Force use of MOVN. */
6505 value = ~value;
6506 insn = reencode_movzn_to_movn (insn);
6507 }
6508 else
6509 {
6510 /* Force use of MOVZ. */
6511 insn = reencode_movzn_to_movz (insn);
6512 }
6513 break;
6514 default:
6515 /* Unchecked relocations. */
6516 break;
6517 }
6518 value >>= scale;
6519 }
6520
6521 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6522 insn |= encode_movw_imm (value & 0xffff);
6523
6524 put_aarch64_insn (buf, insn);
6525 }
6526 break;
6527
6528 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6529 fixP->fx_r_type = (ilp32_p
6530 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6531 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6532 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6533 /* Should always be exported to object file, see
6534 aarch64_force_relocation(). */
6535 gas_assert (!fixP->fx_done);
6536 gas_assert (seg->use_rela_p);
6537 break;
6538
6539 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6540 fixP->fx_r_type = (ilp32_p
6541 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6542 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6543 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6544 /* Should always be exported to object file, see
6545 aarch64_force_relocation(). */
6546 gas_assert (!fixP->fx_done);
6547 gas_assert (seg->use_rela_p);
6548 break;
6549
6550 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6551 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6552 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6553 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6554 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6555 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6556 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6557 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6558 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6560 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6561 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6562 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6563 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6564 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6565 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6566 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6567 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6568 /* Should always be exported to object file, see
6569 aarch64_force_relocation(). */
6570 gas_assert (!fixP->fx_done);
6571 gas_assert (seg->use_rela_p);
6572 break;
6573
6574 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6575 /* Should always be exported to object file, see
6576 aarch64_force_relocation(). */
6577 fixP->fx_r_type = (ilp32_p
6578 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6579 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6580 gas_assert (!fixP->fx_done);
6581 gas_assert (seg->use_rela_p);
6582 break;
6583
6584 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6585 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6586 case BFD_RELOC_AARCH64_ADD_LO12:
6587 case BFD_RELOC_AARCH64_LDST8_LO12:
6588 case BFD_RELOC_AARCH64_LDST16_LO12:
6589 case BFD_RELOC_AARCH64_LDST32_LO12:
6590 case BFD_RELOC_AARCH64_LDST64_LO12:
6591 case BFD_RELOC_AARCH64_LDST128_LO12:
6592 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6593 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6594 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6595 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6596 /* Should always be exported to object file, see
6597 aarch64_force_relocation(). */
6598 gas_assert (!fixP->fx_done);
6599 gas_assert (seg->use_rela_p);
6600 break;
6601
6602 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6603 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6604 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6605 break;
6606
6607 default:
6608 as_bad_where (fixP->fx_file, fixP->fx_line,
6609 _("unexpected %s fixup"),
6610 bfd_get_reloc_code_name (fixP->fx_r_type));
6611 break;
6612 }
6613
6614 apply_fix_return:
6615 /* Free the allocated the struct aarch64_inst.
6616 N.B. currently there are very limited number of fix-up types actually use
6617 this field, so the impact on the performance should be minimal . */
6618 if (fixP->tc_fix_data.inst != NULL)
6619 free (fixP->tc_fix_data.inst);
6620
6621 return;
6622 }
6623
6624 /* Translate internal representation of relocation info to BFD target
6625 format. */
6626
6627 arelent *
6628 tc_gen_reloc (asection * section, fixS * fixp)
6629 {
6630 arelent *reloc;
6631 bfd_reloc_code_real_type code;
6632
6633 reloc = xmalloc (sizeof (arelent));
6634
6635 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6636 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6637 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6638
6639 if (fixp->fx_pcrel)
6640 {
6641 if (section->use_rela_p)
6642 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6643 else
6644 fixp->fx_offset = reloc->address;
6645 }
6646 reloc->addend = fixp->fx_offset;
6647
6648 code = fixp->fx_r_type;
6649 switch (code)
6650 {
6651 case BFD_RELOC_16:
6652 if (fixp->fx_pcrel)
6653 code = BFD_RELOC_16_PCREL;
6654 break;
6655
6656 case BFD_RELOC_32:
6657 if (fixp->fx_pcrel)
6658 code = BFD_RELOC_32_PCREL;
6659 break;
6660
6661 case BFD_RELOC_64:
6662 if (fixp->fx_pcrel)
6663 code = BFD_RELOC_64_PCREL;
6664 break;
6665
6666 default:
6667 break;
6668 }
6669
6670 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6671 if (reloc->howto == NULL)
6672 {
6673 as_bad_where (fixp->fx_file, fixp->fx_line,
6674 _
6675 ("cannot represent %s relocation in this object file format"),
6676 bfd_get_reloc_code_name (code));
6677 return NULL;
6678 }
6679
6680 return reloc;
6681 }
6682
6683 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6684
6685 void
6686 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6687 {
6688 bfd_reloc_code_real_type type;
6689 int pcrel = 0;
6690
6691 /* Pick a reloc.
6692 FIXME: @@ Should look at CPU word size. */
6693 switch (size)
6694 {
6695 case 1:
6696 type = BFD_RELOC_8;
6697 break;
6698 case 2:
6699 type = BFD_RELOC_16;
6700 break;
6701 case 4:
6702 type = BFD_RELOC_32;
6703 break;
6704 case 8:
6705 type = BFD_RELOC_64;
6706 break;
6707 default:
6708 as_bad (_("cannot do %u-byte relocation"), size);
6709 type = BFD_RELOC_UNUSED;
6710 break;
6711 }
6712
6713 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6714 }
6715
6716 int
6717 aarch64_force_relocation (struct fix *fixp)
6718 {
6719 switch (fixp->fx_r_type)
6720 {
6721 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6722 /* Perform these "immediate" internal relocations
6723 even if the symbol is extern or weak. */
6724 return 0;
6725
6726 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6727 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6728 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6729 /* Pseudo relocs that need to be fixed up according to
6730 ilp32_p. */
6731 return 0;
6732
6733 case BFD_RELOC_AARCH64_ADD_LO12:
6734 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6735 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6736 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6737 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6738 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6739 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6740 case BFD_RELOC_AARCH64_LDST128_LO12:
6741 case BFD_RELOC_AARCH64_LDST16_LO12:
6742 case BFD_RELOC_AARCH64_LDST32_LO12:
6743 case BFD_RELOC_AARCH64_LDST64_LO12:
6744 case BFD_RELOC_AARCH64_LDST8_LO12:
6745 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6746 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6747 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6748 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6749 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6750 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6751 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6752 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6753 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6754 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6755 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6756 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6757 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6758 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6759 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6760 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6761 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6762 /* Always leave these relocations for the linker. */
6763 return 1;
6764
6765 default:
6766 break;
6767 }
6768
6769 return generic_force_reloc (fixp);
6770 }
6771
6772 #ifdef OBJ_ELF
6773
6774 const char *
6775 elf64_aarch64_target_format (void)
6776 {
6777 if (target_big_endian)
6778 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6779 else
6780 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6781 }
6782
6783 void
6784 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6785 {
6786 elf_frob_symbol (symp, puntp);
6787 }
6788 #endif
6789
6790 /* MD interface: Finalization. */
6791
6792 /* A good place to do this, although this was probably not intended
6793 for this kind of use. We need to dump the literal pool before
6794 references are made to a null symbol pointer. */
6795
6796 void
6797 aarch64_cleanup (void)
6798 {
6799 literal_pool *pool;
6800
6801 for (pool = list_of_pools; pool; pool = pool->next)
6802 {
6803 /* Put it at the end of the relevant section. */
6804 subseg_set (pool->section, pool->sub_section);
6805 s_ltorg (0);
6806 }
6807 }
6808
6809 #ifdef OBJ_ELF
6810 /* Remove any excess mapping symbols generated for alignment frags in
6811 SEC. We may have created a mapping symbol before a zero byte
6812 alignment; remove it if there's a mapping symbol after the
6813 alignment. */
6814 static void
6815 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6816 void *dummy ATTRIBUTE_UNUSED)
6817 {
6818 segment_info_type *seginfo = seg_info (sec);
6819 fragS *fragp;
6820
6821 if (seginfo == NULL || seginfo->frchainP == NULL)
6822 return;
6823
6824 for (fragp = seginfo->frchainP->frch_root;
6825 fragp != NULL; fragp = fragp->fr_next)
6826 {
6827 symbolS *sym = fragp->tc_frag_data.last_map;
6828 fragS *next = fragp->fr_next;
6829
6830 /* Variable-sized frags have been converted to fixed size by
6831 this point. But if this was variable-sized to start with,
6832 there will be a fixed-size frag after it. So don't handle
6833 next == NULL. */
6834 if (sym == NULL || next == NULL)
6835 continue;
6836
6837 if (S_GET_VALUE (sym) < next->fr_address)
6838 /* Not at the end of this frag. */
6839 continue;
6840 know (S_GET_VALUE (sym) == next->fr_address);
6841
6842 do
6843 {
6844 if (next->tc_frag_data.first_map != NULL)
6845 {
6846 /* Next frag starts with a mapping symbol. Discard this
6847 one. */
6848 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6849 break;
6850 }
6851
6852 if (next->fr_next == NULL)
6853 {
6854 /* This mapping symbol is at the end of the section. Discard
6855 it. */
6856 know (next->fr_fix == 0 && next->fr_var == 0);
6857 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6858 break;
6859 }
6860
6861 /* As long as we have empty frags without any mapping symbols,
6862 keep looking. */
6863 /* If the next frag is non-empty and does not start with a
6864 mapping symbol, then this mapping symbol is required. */
6865 if (next->fr_address != next->fr_next->fr_address)
6866 break;
6867
6868 next = next->fr_next;
6869 }
6870 while (next != NULL);
6871 }
6872 }
6873 #endif
6874
6875 /* Adjust the symbol table. */
6876
6877 void
6878 aarch64_adjust_symtab (void)
6879 {
6880 #ifdef OBJ_ELF
6881 /* Remove any overlapping mapping symbols generated by alignment frags. */
6882 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6883 /* Now do generic ELF adjustments. */
6884 elf_adjust_symtab ();
6885 #endif
6886 }
6887
6888 static void
6889 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6890 {
6891 const char *hash_err;
6892
6893 hash_err = hash_insert (table, key, value);
6894 if (hash_err)
6895 printf ("Internal Error: Can't hash %s\n", key);
6896 }
6897
6898 static void
6899 fill_instruction_hash_table (void)
6900 {
6901 aarch64_opcode *opcode = aarch64_opcode_table;
6902
6903 while (opcode->name != NULL)
6904 {
6905 templates *templ, *new_templ;
6906 templ = hash_find (aarch64_ops_hsh, opcode->name);
6907
6908 new_templ = (templates *) xmalloc (sizeof (templates));
6909 new_templ->opcode = opcode;
6910 new_templ->next = NULL;
6911
6912 if (!templ)
6913 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6914 else
6915 {
6916 new_templ->next = templ->next;
6917 templ->next = new_templ;
6918 }
6919 ++opcode;
6920 }
6921 }
6922
6923 static inline void
6924 convert_to_upper (char *dst, const char *src, size_t num)
6925 {
6926 unsigned int i;
6927 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6928 *dst = TOUPPER (*src);
6929 *dst = '\0';
6930 }
6931
6932 /* Assume STR point to a lower-case string, allocate, convert and return
6933 the corresponding upper-case string. */
6934 static inline const char*
6935 get_upper_str (const char *str)
6936 {
6937 char *ret;
6938 size_t len = strlen (str);
6939 if ((ret = xmalloc (len + 1)) == NULL)
6940 abort ();
6941 convert_to_upper (ret, str, len);
6942 return ret;
6943 }
6944
6945 /* MD interface: Initialization. */
6946
6947 void
6948 md_begin (void)
6949 {
6950 unsigned mach;
6951 unsigned int i;
6952
6953 if ((aarch64_ops_hsh = hash_new ()) == NULL
6954 || (aarch64_cond_hsh = hash_new ()) == NULL
6955 || (aarch64_shift_hsh = hash_new ()) == NULL
6956 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6957 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6958 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6959 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6960 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6961 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6962 || (aarch64_reg_hsh = hash_new ()) == NULL
6963 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6964 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6965 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6966 as_fatal (_("virtual memory exhausted"));
6967
6968 fill_instruction_hash_table ();
6969
6970 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6971 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6972 (void *) (aarch64_sys_regs + i));
6973
6974 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6975 checked_hash_insert (aarch64_pstatefield_hsh,
6976 aarch64_pstatefields[i].name,
6977 (void *) (aarch64_pstatefields + i));
6978
6979 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6980 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6981 aarch64_sys_regs_ic[i].template,
6982 (void *) (aarch64_sys_regs_ic + i));
6983
6984 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6985 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6986 aarch64_sys_regs_dc[i].template,
6987 (void *) (aarch64_sys_regs_dc + i));
6988
6989 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6990 checked_hash_insert (aarch64_sys_regs_at_hsh,
6991 aarch64_sys_regs_at[i].template,
6992 (void *) (aarch64_sys_regs_at + i));
6993
6994 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6995 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6996 aarch64_sys_regs_tlbi[i].template,
6997 (void *) (aarch64_sys_regs_tlbi + i));
6998
6999 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
7000 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
7001 (void *) (reg_names + i));
7002
7003 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
7004 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
7005 (void *) (nzcv_names + i));
7006
7007 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
7008 {
7009 const char *name = aarch64_operand_modifiers[i].name;
7010 checked_hash_insert (aarch64_shift_hsh, name,
7011 (void *) (aarch64_operand_modifiers + i));
7012 /* Also hash the name in the upper case. */
7013 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7014 (void *) (aarch64_operand_modifiers + i));
7015 }
7016
7017 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7018 {
7019 unsigned int j;
7020 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7021 the same condition code. */
7022 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7023 {
7024 const char *name = aarch64_conds[i].names[j];
7025 if (name == NULL)
7026 break;
7027 checked_hash_insert (aarch64_cond_hsh, name,
7028 (void *) (aarch64_conds + i));
7029 /* Also hash the name in the upper case. */
7030 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7031 (void *) (aarch64_conds + i));
7032 }
7033 }
7034
7035 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7036 {
7037 const char *name = aarch64_barrier_options[i].name;
7038 /* Skip xx00 - the unallocated values of option. */
7039 if ((i & 0x3) == 0)
7040 continue;
7041 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7042 (void *) (aarch64_barrier_options + i));
7043 /* Also hash the name in the upper case. */
7044 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7045 (void *) (aarch64_barrier_options + i));
7046 }
7047
7048 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7049 {
7050 const char* name = aarch64_prfops[i].name;
7051 /* Skip the unallocated hint encodings. */
7052 if (name == NULL)
7053 continue;
7054 checked_hash_insert (aarch64_pldop_hsh, name,
7055 (void *) (aarch64_prfops + i));
7056 /* Also hash the name in the upper case. */
7057 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7058 (void *) (aarch64_prfops + i));
7059 }
7060
7061 /* Set the cpu variant based on the command-line options. */
7062 if (!mcpu_cpu_opt)
7063 mcpu_cpu_opt = march_cpu_opt;
7064
7065 if (!mcpu_cpu_opt)
7066 mcpu_cpu_opt = &cpu_default;
7067
7068 cpu_variant = *mcpu_cpu_opt;
7069
7070 /* Record the CPU type. */
7071 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7072
7073 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7074 }
7075
7076 /* Command line processing. */
7077
7078 const char *md_shortopts = "m:";
7079
7080 #ifdef AARCH64_BI_ENDIAN
7081 #define OPTION_EB (OPTION_MD_BASE + 0)
7082 #define OPTION_EL (OPTION_MD_BASE + 1)
7083 #else
7084 #if TARGET_BYTES_BIG_ENDIAN
7085 #define OPTION_EB (OPTION_MD_BASE + 0)
7086 #else
7087 #define OPTION_EL (OPTION_MD_BASE + 1)
7088 #endif
7089 #endif
7090
7091 struct option md_longopts[] = {
7092 #ifdef OPTION_EB
7093 {"EB", no_argument, NULL, OPTION_EB},
7094 #endif
7095 #ifdef OPTION_EL
7096 {"EL", no_argument, NULL, OPTION_EL},
7097 #endif
7098 {NULL, no_argument, NULL, 0}
7099 };
7100
7101 size_t md_longopts_size = sizeof (md_longopts);
7102
7103 struct aarch64_option_table
7104 {
7105 char *option; /* Option name to match. */
7106 char *help; /* Help information. */
7107 int *var; /* Variable to change. */
7108 int value; /* What to change it to. */
7109 char *deprecated; /* If non-null, print this message. */
7110 };
7111
7112 static struct aarch64_option_table aarch64_opts[] = {
7113 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7114 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7115 NULL},
7116 #ifdef DEBUG_AARCH64
7117 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7118 #endif /* DEBUG_AARCH64 */
7119 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7120 NULL},
7121 {NULL, NULL, NULL, 0, NULL}
7122 };
7123
7124 struct aarch64_cpu_option_table
7125 {
7126 char *name;
7127 const aarch64_feature_set value;
7128 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7129 case. */
7130 const char *canonical_name;
7131 };
7132
7133 /* This list should, at a minimum, contain all the cpu names
7134 recognized by GCC. */
7135 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7136 {"all", AARCH64_ANY, NULL},
7137 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7138 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7139 {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
7140 {"generic", AARCH64_ARCH_V8, NULL},
7141
7142 /* These two are example CPUs supported in GCC, once we have real
7143 CPUs they will be removed. */
7144 {"example-1", AARCH64_ARCH_V8, NULL},
7145 {"example-2", AARCH64_ARCH_V8, NULL},
7146
7147 {NULL, AARCH64_ARCH_NONE, NULL}
7148 };
7149
7150 struct aarch64_arch_option_table
7151 {
7152 char *name;
7153 const aarch64_feature_set value;
7154 };
7155
7156 /* This list should, at a minimum, contain all the architecture names
7157 recognized by GCC. */
7158 static const struct aarch64_arch_option_table aarch64_archs[] = {
7159 {"all", AARCH64_ANY},
7160 {"armv8-a", AARCH64_ARCH_V8},
7161 {NULL, AARCH64_ARCH_NONE}
7162 };
7163
7164 /* ISA extensions. */
7165 struct aarch64_option_cpu_value_table
7166 {
7167 char *name;
7168 const aarch64_feature_set value;
7169 };
7170
7171 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7172 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7173 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7174 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7175 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7176 {NULL, AARCH64_ARCH_NONE}
7177 };
7178
7179 struct aarch64_long_option_table
7180 {
7181 char *option; /* Substring to match. */
7182 char *help; /* Help information. */
7183 int (*func) (char *subopt); /* Function to decode sub-option. */
7184 char *deprecated; /* If non-null, print this message. */
7185 };
7186
7187 static int
7188 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7189 {
7190 /* We insist on extensions being added before being removed. We achieve
7191 this by using the ADDING_VALUE variable to indicate whether we are
7192 adding an extension (1) or removing it (0) and only allowing it to
7193 change in the order -1 -> 1 -> 0. */
7194 int adding_value = -1;
7195 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7196
7197 /* Copy the feature set, so that we can modify it. */
7198 *ext_set = **opt_p;
7199 *opt_p = ext_set;
7200
7201 while (str != NULL && *str != 0)
7202 {
7203 const struct aarch64_option_cpu_value_table *opt;
7204 char *ext;
7205 int optlen;
7206
7207 if (*str != '+')
7208 {
7209 as_bad (_("invalid architectural extension"));
7210 return 0;
7211 }
7212
7213 str++;
7214 ext = strchr (str, '+');
7215
7216 if (ext != NULL)
7217 optlen = ext - str;
7218 else
7219 optlen = strlen (str);
7220
7221 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7222 {
7223 if (adding_value != 0)
7224 adding_value = 0;
7225 optlen -= 2;
7226 str += 2;
7227 }
7228 else if (optlen > 0)
7229 {
7230 if (adding_value == -1)
7231 adding_value = 1;
7232 else if (adding_value != 1)
7233 {
7234 as_bad (_("must specify extensions to add before specifying "
7235 "those to remove"));
7236 return FALSE;
7237 }
7238 }
7239
7240 if (optlen == 0)
7241 {
7242 as_bad (_("missing architectural extension"));
7243 return 0;
7244 }
7245
7246 gas_assert (adding_value != -1);
7247
7248 for (opt = aarch64_features; opt->name != NULL; opt++)
7249 if (strncmp (opt->name, str, optlen) == 0)
7250 {
7251 /* Add or remove the extension. */
7252 if (adding_value)
7253 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7254 else
7255 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7256 break;
7257 }
7258
7259 if (opt->name == NULL)
7260 {
7261 as_bad (_("unknown architectural extension `%s'"), str);
7262 return 0;
7263 }
7264
7265 str = ext;
7266 };
7267
7268 return 1;
7269 }
7270
7271 static int
7272 aarch64_parse_cpu (char *str)
7273 {
7274 const struct aarch64_cpu_option_table *opt;
7275 char *ext = strchr (str, '+');
7276 size_t optlen;
7277
7278 if (ext != NULL)
7279 optlen = ext - str;
7280 else
7281 optlen = strlen (str);
7282
7283 if (optlen == 0)
7284 {
7285 as_bad (_("missing cpu name `%s'"), str);
7286 return 0;
7287 }
7288
7289 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7290 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7291 {
7292 mcpu_cpu_opt = &opt->value;
7293 if (ext != NULL)
7294 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7295
7296 return 1;
7297 }
7298
7299 as_bad (_("unknown cpu `%s'"), str);
7300 return 0;
7301 }
7302
7303 static int
7304 aarch64_parse_arch (char *str)
7305 {
7306 const struct aarch64_arch_option_table *opt;
7307 char *ext = strchr (str, '+');
7308 size_t optlen;
7309
7310 if (ext != NULL)
7311 optlen = ext - str;
7312 else
7313 optlen = strlen (str);
7314
7315 if (optlen == 0)
7316 {
7317 as_bad (_("missing architecture name `%s'"), str);
7318 return 0;
7319 }
7320
7321 for (opt = aarch64_archs; opt->name != NULL; opt++)
7322 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7323 {
7324 march_cpu_opt = &opt->value;
7325 if (ext != NULL)
7326 return aarch64_parse_features (ext, &march_cpu_opt);
7327
7328 return 1;
7329 }
7330
7331 as_bad (_("unknown architecture `%s'\n"), str);
7332 return 0;
7333 }
7334
7335 /* ABIs. */
7336 struct aarch64_option_abi_value_table
7337 {
7338 char *name;
7339 enum aarch64_abi_type value;
7340 };
7341
7342 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7343 {"ilp32", AARCH64_ABI_ILP32},
7344 {"lp64", AARCH64_ABI_LP64},
7345 {NULL, 0}
7346 };
7347
7348 static int
7349 aarch64_parse_abi (char *str)
7350 {
7351 const struct aarch64_option_abi_value_table *opt;
7352 size_t optlen = strlen (str);
7353
7354 if (optlen == 0)
7355 {
7356 as_bad (_("missing abi name `%s'"), str);
7357 return 0;
7358 }
7359
7360 for (opt = aarch64_abis; opt->name != NULL; opt++)
7361 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7362 {
7363 aarch64_abi = opt->value;
7364 return 1;
7365 }
7366
7367 as_bad (_("unknown abi `%s'\n"), str);
7368 return 0;
7369 }
7370
7371 static struct aarch64_long_option_table aarch64_long_opts[] = {
7372 #ifdef OBJ_ELF
7373 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7374 aarch64_parse_abi, NULL},
7375 #endif /* OBJ_ELF */
7376 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7377 aarch64_parse_cpu, NULL},
7378 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7379 aarch64_parse_arch, NULL},
7380 {NULL, NULL, 0, NULL}
7381 };
7382
7383 int
7384 md_parse_option (int c, char *arg)
7385 {
7386 struct aarch64_option_table *opt;
7387 struct aarch64_long_option_table *lopt;
7388
7389 switch (c)
7390 {
7391 #ifdef OPTION_EB
7392 case OPTION_EB:
7393 target_big_endian = 1;
7394 break;
7395 #endif
7396
7397 #ifdef OPTION_EL
7398 case OPTION_EL:
7399 target_big_endian = 0;
7400 break;
7401 #endif
7402
7403 case 'a':
7404 /* Listing option. Just ignore these, we don't support additional
7405 ones. */
7406 return 0;
7407
7408 default:
7409 for (opt = aarch64_opts; opt->option != NULL; opt++)
7410 {
7411 if (c == opt->option[0]
7412 && ((arg == NULL && opt->option[1] == 0)
7413 || streq (arg, opt->option + 1)))
7414 {
7415 /* If the option is deprecated, tell the user. */
7416 if (opt->deprecated != NULL)
7417 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7418 arg ? arg : "", _(opt->deprecated));
7419
7420 if (opt->var != NULL)
7421 *opt->var = opt->value;
7422
7423 return 1;
7424 }
7425 }
7426
7427 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7428 {
7429 /* These options are expected to have an argument. */
7430 if (c == lopt->option[0]
7431 && arg != NULL
7432 && strncmp (arg, lopt->option + 1,
7433 strlen (lopt->option + 1)) == 0)
7434 {
7435 /* If the option is deprecated, tell the user. */
7436 if (lopt->deprecated != NULL)
7437 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7438 _(lopt->deprecated));
7439
7440 /* Call the sup-option parser. */
7441 return lopt->func (arg + strlen (lopt->option) - 1);
7442 }
7443 }
7444
7445 return 0;
7446 }
7447
7448 return 1;
7449 }
7450
7451 void
7452 md_show_usage (FILE * fp)
7453 {
7454 struct aarch64_option_table *opt;
7455 struct aarch64_long_option_table *lopt;
7456
7457 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7458
7459 for (opt = aarch64_opts; opt->option != NULL; opt++)
7460 if (opt->help != NULL)
7461 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7462
7463 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7464 if (lopt->help != NULL)
7465 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7466
7467 #ifdef OPTION_EB
7468 fprintf (fp, _("\
7469 -EB assemble code for a big-endian cpu\n"));
7470 #endif
7471
7472 #ifdef OPTION_EL
7473 fprintf (fp, _("\
7474 -EL assemble code for a little-endian cpu\n"));
7475 #endif
7476 }
7477
7478 /* Parse a .cpu directive. */
7479
7480 static void
7481 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7482 {
7483 const struct aarch64_cpu_option_table *opt;
7484 char saved_char;
7485 char *name;
7486 char *ext;
7487 size_t optlen;
7488
7489 name = input_line_pointer;
7490 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7491 input_line_pointer++;
7492 saved_char = *input_line_pointer;
7493 *input_line_pointer = 0;
7494
7495 ext = strchr (name, '+');
7496
7497 if (ext != NULL)
7498 optlen = ext - name;
7499 else
7500 optlen = strlen (name);
7501
7502 /* Skip the first "all" entry. */
7503 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7504 if (strlen (opt->name) == optlen
7505 && strncmp (name, opt->name, optlen) == 0)
7506 {
7507 mcpu_cpu_opt = &opt->value;
7508 if (ext != NULL)
7509 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7510 return;
7511
7512 cpu_variant = *mcpu_cpu_opt;
7513
7514 *input_line_pointer = saved_char;
7515 demand_empty_rest_of_line ();
7516 return;
7517 }
7518 as_bad (_("unknown cpu `%s'"), name);
7519 *input_line_pointer = saved_char;
7520 ignore_rest_of_line ();
7521 }
7522
7523
7524 /* Parse a .arch directive. */
7525
7526 static void
7527 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7528 {
7529 const struct aarch64_arch_option_table *opt;
7530 char saved_char;
7531 char *name;
7532 char *ext;
7533 size_t optlen;
7534
7535 name = input_line_pointer;
7536 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7537 input_line_pointer++;
7538 saved_char = *input_line_pointer;
7539 *input_line_pointer = 0;
7540
7541 ext = strchr (name, '+');
7542
7543 if (ext != NULL)
7544 optlen = ext - name;
7545 else
7546 optlen = strlen (name);
7547
7548 /* Skip the first "all" entry. */
7549 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7550 if (strlen (opt->name) == optlen
7551 && strncmp (name, opt->name, optlen) == 0)
7552 {
7553 mcpu_cpu_opt = &opt->value;
7554 if (ext != NULL)
7555 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7556 return;
7557
7558 cpu_variant = *mcpu_cpu_opt;
7559
7560 *input_line_pointer = saved_char;
7561 demand_empty_rest_of_line ();
7562 return;
7563 }
7564
7565 as_bad (_("unknown architecture `%s'\n"), name);
7566 *input_line_pointer = saved_char;
7567 ignore_rest_of_line ();
7568 }
7569
7570 /* Copy symbol information. */
7571
7572 void
7573 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7574 {
7575 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7576 }
This page took 0.192684 seconds and 4 git commands to generate.