config/tc-aarch64.c: Avoid trying to parse a vector mov as immediate.
[deliverable/binutils-gdb.git] / gas / config / tc-aarch64.c
1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3 Copyright 2009, 2010, 2011, 2012, 2013
4 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GAS.
8
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the license, or
12 (at your option) any later version.
13
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING3. If not,
21 see <http://www.gnu.org/licenses/>. */
22
23 #include "as.h"
24 #include <limits.h>
25 #include <stdarg.h>
26 #include "bfd_stdint.h"
27 #define NO_RELOC 0
28 #include "safe-ctype.h"
29 #include "subsegs.h"
30 #include "obstack.h"
31
32 #ifdef OBJ_ELF
33 #include "elf/aarch64.h"
34 #include "dw2gencfi.h"
35 #endif
36
37 #include "dwarf2dbg.h"
38
39 /* Types of processor to assemble for. */
40 #ifndef CPU_DEFAULT
41 #define CPU_DEFAULT AARCH64_ARCH_V8
42 #endif
43
44 #define streq(a, b) (strcmp (a, b) == 0)
45
46 static aarch64_feature_set cpu_variant;
47
48 /* Variables that we set while parsing command-line options. Once all
49 options have been read we re-process these values to set the real
50 assembly flags. */
51 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
52 static const aarch64_feature_set *march_cpu_opt = NULL;
53
54 /* Constants for known architecture features. */
55 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
56
57 static const aarch64_feature_set aarch64_arch_any = AARCH64_ANY;
58 static const aarch64_feature_set aarch64_arch_none = AARCH64_ARCH_NONE;
59
60 #ifdef OBJ_ELF
61 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
62 static symbolS *GOT_symbol;
63
64 /* Which ABI to use. */
65 enum aarch64_abi_type
66 {
67 AARCH64_ABI_LP64 = 0,
68 AARCH64_ABI_ILP32 = 1
69 };
70
71 /* AArch64 ABI for the output file. */
72 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
73
74 /* When non-zero, program to a 32-bit model, in which the C data types
75 int, long and all pointer types are 32-bit objects (ILP32); or to a
76 64-bit model, in which the C int type is 32-bits but the C long type
77 and all pointer types are 64-bit objects (LP64). */
78 #define ilp32_p (aarch64_abi == AARCH64_ABI_ILP32)
79 #endif
80
81 enum neon_el_type
82 {
83 NT_invtype = -1,
84 NT_b,
85 NT_h,
86 NT_s,
87 NT_d,
88 NT_q
89 };
90
91 /* Bits for DEFINED field in neon_type_el. */
92 #define NTA_HASTYPE 1
93 #define NTA_HASINDEX 2
94
95 struct neon_type_el
96 {
97 enum neon_el_type type;
98 unsigned char defined;
99 unsigned width;
100 int64_t index;
101 };
102
103 #define FIXUP_F_HAS_EXPLICIT_SHIFT 0x00000001
104
105 struct reloc
106 {
107 bfd_reloc_code_real_type type;
108 expressionS exp;
109 int pc_rel;
110 enum aarch64_opnd opnd;
111 uint32_t flags;
112 unsigned need_libopcodes_p : 1;
113 };
114
115 struct aarch64_instruction
116 {
117 /* libopcodes structure for instruction intermediate representation. */
118 aarch64_inst base;
119 /* Record assembly errors found during the parsing. */
120 struct
121 {
122 enum aarch64_operand_error_kind kind;
123 const char *error;
124 } parsing_error;
125 /* The condition that appears in the assembly line. */
126 int cond;
127 /* Relocation information (including the GAS internal fixup). */
128 struct reloc reloc;
129 /* Need to generate an immediate in the literal pool. */
130 unsigned gen_lit_pool : 1;
131 };
132
133 typedef struct aarch64_instruction aarch64_instruction;
134
135 static aarch64_instruction inst;
136
137 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
138 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
139
140 /* Diagnostics inline function utilites.
141
142 These are lightweight utlities which should only be called by parse_operands
143 and other parsers. GAS processes each assembly line by parsing it against
144 instruction template(s), in the case of multiple templates (for the same
145 mnemonic name), those templates are tried one by one until one succeeds or
146 all fail. An assembly line may fail a few templates before being
147 successfully parsed; an error saved here in most cases is not a user error
148 but an error indicating the current template is not the right template.
149 Therefore it is very important that errors can be saved at a low cost during
150 the parsing; we don't want to slow down the whole parsing by recording
151 non-user errors in detail.
152
153 Remember that the objective is to help GAS pick up the most approapriate
154 error message in the case of multiple templates, e.g. FMOV which has 8
155 templates. */
156
157 static inline void
158 clear_error (void)
159 {
160 inst.parsing_error.kind = AARCH64_OPDE_NIL;
161 inst.parsing_error.error = NULL;
162 }
163
164 static inline bfd_boolean
165 error_p (void)
166 {
167 return inst.parsing_error.kind != AARCH64_OPDE_NIL;
168 }
169
170 static inline const char *
171 get_error_message (void)
172 {
173 return inst.parsing_error.error;
174 }
175
176 static inline void
177 set_error_message (const char *error)
178 {
179 inst.parsing_error.error = error;
180 }
181
182 static inline enum aarch64_operand_error_kind
183 get_error_kind (void)
184 {
185 return inst.parsing_error.kind;
186 }
187
188 static inline void
189 set_error_kind (enum aarch64_operand_error_kind kind)
190 {
191 inst.parsing_error.kind = kind;
192 }
193
194 static inline void
195 set_error (enum aarch64_operand_error_kind kind, const char *error)
196 {
197 inst.parsing_error.kind = kind;
198 inst.parsing_error.error = error;
199 }
200
201 static inline void
202 set_recoverable_error (const char *error)
203 {
204 set_error (AARCH64_OPDE_RECOVERABLE, error);
205 }
206
207 /* Use the DESC field of the corresponding aarch64_operand entry to compose
208 the error message. */
209 static inline void
210 set_default_error (void)
211 {
212 set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
213 }
214
215 static inline void
216 set_syntax_error (const char *error)
217 {
218 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
219 }
220
221 static inline void
222 set_first_syntax_error (const char *error)
223 {
224 if (! error_p ())
225 set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227
228 static inline void
229 set_fatal_syntax_error (const char *error)
230 {
231 set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
232 }
233 \f
234 /* Number of littlenums required to hold an extended precision number. */
235 #define MAX_LITTLENUMS 6
236
237 /* Return value for certain parsers when the parsing fails; those parsers
238 return the information of the parsed result, e.g. register number, on
239 success. */
240 #define PARSE_FAIL -1
241
242 /* This is an invalid condition code that means no conditional field is
243 present. */
244 #define COND_ALWAYS 0x10
245
246 typedef struct
247 {
248 const char *template;
249 unsigned long value;
250 } asm_barrier_opt;
251
252 typedef struct
253 {
254 const char *template;
255 uint32_t value;
256 } asm_nzcv;
257
258 struct reloc_entry
259 {
260 char *name;
261 bfd_reloc_code_real_type reloc;
262 };
263
264 /* Structure for a hash table entry for a register. */
265 typedef struct
266 {
267 const char *name;
268 unsigned char number;
269 unsigned char type;
270 unsigned char builtin;
271 } reg_entry;
272
273 /* Macros to define the register types and masks for the purpose
274 of parsing. */
275
276 #undef AARCH64_REG_TYPES
277 #define AARCH64_REG_TYPES \
278 BASIC_REG_TYPE(R_32) /* w[0-30] */ \
279 BASIC_REG_TYPE(R_64) /* x[0-30] */ \
280 BASIC_REG_TYPE(SP_32) /* wsp */ \
281 BASIC_REG_TYPE(SP_64) /* sp */ \
282 BASIC_REG_TYPE(Z_32) /* wzr */ \
283 BASIC_REG_TYPE(Z_64) /* xzr */ \
284 BASIC_REG_TYPE(FP_B) /* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
285 BASIC_REG_TYPE(FP_H) /* h[0-31] */ \
286 BASIC_REG_TYPE(FP_S) /* s[0-31] */ \
287 BASIC_REG_TYPE(FP_D) /* d[0-31] */ \
288 BASIC_REG_TYPE(FP_Q) /* q[0-31] */ \
289 BASIC_REG_TYPE(CN) /* c[0-7] */ \
290 BASIC_REG_TYPE(VN) /* v[0-31] */ \
291 /* Typecheck: any 64-bit int reg (inc SP exc XZR) */ \
292 MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64)) \
293 /* Typecheck: any int (inc {W}SP inc [WX]ZR) */ \
294 MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64) \
295 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
296 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
297 /* Typecheck: any [BHSDQ]P FP. */ \
298 MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H) \
299 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
300 /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR) */ \
301 MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64) \
302 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN) \
303 | REG_TYPE(FP_B) | REG_TYPE(FP_H) \
304 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)) \
305 /* Any integer register; used for error messages only. */ \
306 MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64) \
307 | REG_TYPE(SP_32) | REG_TYPE(SP_64) \
308 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) \
309 /* Pseudo type to mark the end of the enumerator sequence. */ \
310 BASIC_REG_TYPE(MAX)
311
312 #undef BASIC_REG_TYPE
313 #define BASIC_REG_TYPE(T) REG_TYPE_##T,
314 #undef MULTI_REG_TYPE
315 #define MULTI_REG_TYPE(T,V) BASIC_REG_TYPE(T)
316
317 /* Register type enumerators. */
318 typedef enum
319 {
320 /* A list of REG_TYPE_*. */
321 AARCH64_REG_TYPES
322 } aarch64_reg_type;
323
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T) 1 << REG_TYPE_##T,
326 #undef REG_TYPE
327 #define REG_TYPE(T) (1 << REG_TYPE_##T)
328 #undef MULTI_REG_TYPE
329 #define MULTI_REG_TYPE(T,V) V,
330
331 /* Values indexed by aarch64_reg_type to assist the type checking. */
332 static const unsigned reg_type_masks[] =
333 {
334 AARCH64_REG_TYPES
335 };
336
337 #undef BASIC_REG_TYPE
338 #undef REG_TYPE
339 #undef MULTI_REG_TYPE
340 #undef AARCH64_REG_TYPES
341
342 /* Diagnostics used when we don't get a register of the expected type.
343 Note: this has to synchronized with aarch64_reg_type definitions
344 above. */
345 static const char *
346 get_reg_expected_msg (aarch64_reg_type reg_type)
347 {
348 const char *msg;
349
350 switch (reg_type)
351 {
352 case REG_TYPE_R_32:
353 msg = N_("integer 32-bit register expected");
354 break;
355 case REG_TYPE_R_64:
356 msg = N_("integer 64-bit register expected");
357 break;
358 case REG_TYPE_R_N:
359 msg = N_("integer register expected");
360 break;
361 case REG_TYPE_R_Z_SP:
362 msg = N_("integer, zero or SP register expected");
363 break;
364 case REG_TYPE_FP_B:
365 msg = N_("8-bit SIMD scalar register expected");
366 break;
367 case REG_TYPE_FP_H:
368 msg = N_("16-bit SIMD scalar or floating-point half precision "
369 "register expected");
370 break;
371 case REG_TYPE_FP_S:
372 msg = N_("32-bit SIMD scalar or floating-point single precision "
373 "register expected");
374 break;
375 case REG_TYPE_FP_D:
376 msg = N_("64-bit SIMD scalar or floating-point double precision "
377 "register expected");
378 break;
379 case REG_TYPE_FP_Q:
380 msg = N_("128-bit SIMD scalar or floating-point quad precision "
381 "register expected");
382 break;
383 case REG_TYPE_CN:
384 msg = N_("C0 - C15 expected");
385 break;
386 case REG_TYPE_R_Z_BHSDQ_V:
387 msg = N_("register expected");
388 break;
389 case REG_TYPE_BHSDQ: /* any [BHSDQ]P FP */
390 msg = N_("SIMD scalar or floating-point register expected");
391 break;
392 case REG_TYPE_VN: /* any V reg */
393 msg = N_("vector register expected");
394 break;
395 default:
396 as_fatal (_("invalid register type %d"), reg_type);
397 }
398 return msg;
399 }
400
401 /* Some well known registers that we refer to directly elsewhere. */
402 #define REG_SP 31
403
404 /* Instructions take 4 bytes in the object file. */
405 #define INSN_SIZE 4
406
407 /* Define some common error messages. */
408 #define BAD_SP _("SP not allowed here")
409
410 static struct hash_control *aarch64_ops_hsh;
411 static struct hash_control *aarch64_cond_hsh;
412 static struct hash_control *aarch64_shift_hsh;
413 static struct hash_control *aarch64_sys_regs_hsh;
414 static struct hash_control *aarch64_pstatefield_hsh;
415 static struct hash_control *aarch64_sys_regs_ic_hsh;
416 static struct hash_control *aarch64_sys_regs_dc_hsh;
417 static struct hash_control *aarch64_sys_regs_at_hsh;
418 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
419 static struct hash_control *aarch64_reg_hsh;
420 static struct hash_control *aarch64_barrier_opt_hsh;
421 static struct hash_control *aarch64_nzcv_hsh;
422 static struct hash_control *aarch64_pldop_hsh;
423
424 /* Stuff needed to resolve the label ambiguity
425 As:
426 ...
427 label: <insn>
428 may differ from:
429 ...
430 label:
431 <insn> */
432
433 static symbolS *last_label_seen;
434
435 /* Literal pool structure. Held on a per-section
436 and per-sub-section basis. */
437
438 #define MAX_LITERAL_POOL_SIZE 1024
439 typedef struct literal_pool
440 {
441 expressionS literals[MAX_LITERAL_POOL_SIZE];
442 unsigned int next_free_entry;
443 unsigned int id;
444 symbolS *symbol;
445 segT section;
446 subsegT sub_section;
447 int size;
448 struct literal_pool *next;
449 } literal_pool;
450
451 /* Pointer to a linked list of literal pools. */
452 static literal_pool *list_of_pools = NULL;
453 \f
454 /* Pure syntax. */
455
456 /* This array holds the chars that always start a comment. If the
457 pre-processor is disabled, these aren't very useful. */
458 const char comment_chars[] = "";
459
460 /* This array holds the chars that only start a comment at the beginning of
461 a line. If the line seems to have the form '# 123 filename'
462 .line and .file directives will appear in the pre-processed output. */
463 /* Note that input_file.c hand checks for '#' at the beginning of the
464 first line of the input file. This is because the compiler outputs
465 #NO_APP at the beginning of its output. */
466 /* Also note that comments like this one will always work. */
467 const char line_comment_chars[] = "#";
468
469 const char line_separator_chars[] = ";";
470
471 /* Chars that can be used to separate mant
472 from exp in floating point numbers. */
473 const char EXP_CHARS[] = "eE";
474
475 /* Chars that mean this number is a floating point constant. */
476 /* As in 0f12.456 */
477 /* or 0d1.2345e12 */
478
479 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
480
481 /* Prefix character that indicates the start of an immediate value. */
482 #define is_immediate_prefix(C) ((C) == '#')
483
484 /* Separator character handling. */
485
486 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
487
488 static inline bfd_boolean
489 skip_past_char (char **str, char c)
490 {
491 if (**str == c)
492 {
493 (*str)++;
494 return TRUE;
495 }
496 else
497 return FALSE;
498 }
499
500 #define skip_past_comma(str) skip_past_char (str, ',')
501
502 /* Arithmetic expressions (possibly involving symbols). */
503
504 static bfd_boolean in_my_get_expression_p = FALSE;
505
506 /* Third argument to my_get_expression. */
507 #define GE_NO_PREFIX 0
508 #define GE_OPT_PREFIX 1
509
510 /* Return TRUE if the string pointed by *STR is successfully parsed
511 as an valid expression; *EP will be filled with the information of
512 such an expression. Otherwise return FALSE. */
513
514 static bfd_boolean
515 my_get_expression (expressionS * ep, char **str, int prefix_mode,
516 int reject_absent)
517 {
518 char *save_in;
519 segT seg;
520 int prefix_present_p = 0;
521
522 switch (prefix_mode)
523 {
524 case GE_NO_PREFIX:
525 break;
526 case GE_OPT_PREFIX:
527 if (is_immediate_prefix (**str))
528 {
529 (*str)++;
530 prefix_present_p = 1;
531 }
532 break;
533 default:
534 abort ();
535 }
536
537 memset (ep, 0, sizeof (expressionS));
538
539 save_in = input_line_pointer;
540 input_line_pointer = *str;
541 in_my_get_expression_p = TRUE;
542 seg = expression (ep);
543 in_my_get_expression_p = FALSE;
544
545 if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
546 {
547 /* We found a bad expression in md_operand(). */
548 *str = input_line_pointer;
549 input_line_pointer = save_in;
550 if (prefix_present_p && ! error_p ())
551 set_fatal_syntax_error (_("bad expression"));
552 else
553 set_first_syntax_error (_("bad expression"));
554 return FALSE;
555 }
556
557 #ifdef OBJ_AOUT
558 if (seg != absolute_section
559 && seg != text_section
560 && seg != data_section
561 && seg != bss_section && seg != undefined_section)
562 {
563 set_syntax_error (_("bad segment"));
564 *str = input_line_pointer;
565 input_line_pointer = save_in;
566 return FALSE;
567 }
568 #else
569 (void) seg;
570 #endif
571
572 *str = input_line_pointer;
573 input_line_pointer = save_in;
574 return TRUE;
575 }
576
577 /* Turn a string in input_line_pointer into a floating point constant
578 of type TYPE, and store the appropriate bytes in *LITP. The number
579 of LITTLENUMS emitted is stored in *SIZEP. An error message is
580 returned, or NULL on OK. */
581
582 char *
583 md_atof (int type, char *litP, int *sizeP)
584 {
585 return ieee_md_atof (type, litP, sizeP, target_big_endian);
586 }
587
588 /* We handle all bad expressions here, so that we can report the faulty
589 instruction in the error message. */
590 void
591 md_operand (expressionS * exp)
592 {
593 if (in_my_get_expression_p)
594 exp->X_op = O_illegal;
595 }
596
597 /* Immediate values. */
598
599 /* Errors may be set multiple times during parsing or bit encoding
600 (particularly in the Neon bits), but usually the earliest error which is set
601 will be the most meaningful. Avoid overwriting it with later (cascading)
602 errors by calling this function. */
603
604 static void
605 first_error (const char *error)
606 {
607 if (! error_p ())
608 set_syntax_error (error);
609 }
610
611 /* Similiar to first_error, but this function accepts formatted error
612 message. */
613 static void
614 first_error_fmt (const char *format, ...)
615 {
616 va_list args;
617 enum
618 { size = 100 };
619 /* N.B. this single buffer will not cause error messages for different
620 instructions to pollute each other; this is because at the end of
621 processing of each assembly line, error message if any will be
622 collected by as_bad. */
623 static char buffer[size];
624
625 if (! error_p ())
626 {
627 int ret ATTRIBUTE_UNUSED;
628 va_start (args, format);
629 ret = vsnprintf (buffer, size, format, args);
630 know (ret <= size - 1 && ret >= 0);
631 va_end (args);
632 set_syntax_error (buffer);
633 }
634 }
635
636 /* Register parsing. */
637
638 /* Generic register parser which is called by other specialized
639 register parsers.
640 CCP points to what should be the beginning of a register name.
641 If it is indeed a valid register name, advance CCP over it and
642 return the reg_entry structure; otherwise return NULL.
643 It does not issue diagnostics. */
644
645 static reg_entry *
646 parse_reg (char **ccp)
647 {
648 char *start = *ccp;
649 char *p;
650 reg_entry *reg;
651
652 #ifdef REGISTER_PREFIX
653 if (*start != REGISTER_PREFIX)
654 return NULL;
655 start++;
656 #endif
657
658 p = start;
659 if (!ISALPHA (*p) || !is_name_beginner (*p))
660 return NULL;
661
662 do
663 p++;
664 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
665
666 reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
667
668 if (!reg)
669 return NULL;
670
671 *ccp = p;
672 return reg;
673 }
674
675 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
676 return FALSE. */
677 static bfd_boolean
678 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
679 {
680 if (reg->type == type)
681 return TRUE;
682
683 switch (type)
684 {
685 case REG_TYPE_R64_SP: /* 64-bit integer reg (inc SP exc XZR). */
686 case REG_TYPE_R_Z_SP: /* Integer reg (inc {X}SP inc [WX]ZR). */
687 case REG_TYPE_R_Z_BHSDQ_V: /* Any register apart from Cn. */
688 case REG_TYPE_BHSDQ: /* Any [BHSDQ]P FP or SIMD scalar register. */
689 case REG_TYPE_VN: /* Vector register. */
690 gas_assert (reg->type < REG_TYPE_MAX && type < REG_TYPE_MAX);
691 return ((reg_type_masks[reg->type] & reg_type_masks[type])
692 == reg_type_masks[reg->type]);
693 default:
694 as_fatal ("unhandled type %d", type);
695 abort ();
696 }
697 }
698
699 /* Parse a register and return PARSE_FAIL if the register is not of type R_Z_SP.
700 Return the register number otherwise. *ISREG32 is set to one if the
701 register is 32-bit wide; *ISREGZERO is set to one if the register is
702 of type Z_32 or Z_64.
703 Note that this function does not issue any diagnostics. */
704
705 static int
706 aarch64_reg_parse_32_64 (char **ccp, int reject_sp, int reject_rz,
707 int *isreg32, int *isregzero)
708 {
709 char *str = *ccp;
710 const reg_entry *reg = parse_reg (&str);
711
712 if (reg == NULL)
713 return PARSE_FAIL;
714
715 if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
716 return PARSE_FAIL;
717
718 switch (reg->type)
719 {
720 case REG_TYPE_SP_32:
721 case REG_TYPE_SP_64:
722 if (reject_sp)
723 return PARSE_FAIL;
724 *isreg32 = reg->type == REG_TYPE_SP_32;
725 *isregzero = 0;
726 break;
727 case REG_TYPE_R_32:
728 case REG_TYPE_R_64:
729 *isreg32 = reg->type == REG_TYPE_R_32;
730 *isregzero = 0;
731 break;
732 case REG_TYPE_Z_32:
733 case REG_TYPE_Z_64:
734 if (reject_rz)
735 return PARSE_FAIL;
736 *isreg32 = reg->type == REG_TYPE_Z_32;
737 *isregzero = 1;
738 break;
739 default:
740 return PARSE_FAIL;
741 }
742
743 *ccp = str;
744
745 return reg->number;
746 }
747
748 /* Parse the qualifier of a SIMD vector register or a SIMD vector element.
749 Fill in *PARSED_TYPE and return TRUE if the parsing succeeds;
750 otherwise return FALSE.
751
752 Accept only one occurrence of:
753 8b 16b 4h 8h 2s 4s 1d 2d
754 b h s d q */
755 static bfd_boolean
756 parse_neon_type_for_operand (struct neon_type_el *parsed_type, char **str)
757 {
758 char *ptr = *str;
759 unsigned width;
760 unsigned element_size;
761 enum neon_el_type type;
762
763 /* skip '.' */
764 ptr++;
765
766 if (!ISDIGIT (*ptr))
767 {
768 width = 0;
769 goto elt_size;
770 }
771 width = strtoul (ptr, &ptr, 10);
772 if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
773 {
774 first_error_fmt (_("bad size %d in vector width specifier"), width);
775 return FALSE;
776 }
777
778 elt_size:
779 switch (TOLOWER (*ptr))
780 {
781 case 'b':
782 type = NT_b;
783 element_size = 8;
784 break;
785 case 'h':
786 type = NT_h;
787 element_size = 16;
788 break;
789 case 's':
790 type = NT_s;
791 element_size = 32;
792 break;
793 case 'd':
794 type = NT_d;
795 element_size = 64;
796 break;
797 case 'q':
798 if (width == 1)
799 {
800 type = NT_q;
801 element_size = 128;
802 break;
803 }
804 /* fall through. */
805 default:
806 if (*ptr != '\0')
807 first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
808 else
809 first_error (_("missing element size"));
810 return FALSE;
811 }
812 if (width != 0 && width * element_size != 64 && width * element_size != 128)
813 {
814 first_error_fmt (_
815 ("invalid element size %d and vector size combination %c"),
816 width, *ptr);
817 return FALSE;
818 }
819 ptr++;
820
821 parsed_type->type = type;
822 parsed_type->width = width;
823
824 *str = ptr;
825
826 return TRUE;
827 }
828
829 /* Parse a single type, e.g. ".8b", leading period included.
830 Only applicable to Vn registers.
831
832 Return TRUE on success; otherwise return FALSE. */
833 static bfd_boolean
834 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
835 {
836 char *str = *ccp;
837
838 if (*str == '.')
839 {
840 if (! parse_neon_type_for_operand (vectype, &str))
841 {
842 first_error (_("vector type expected"));
843 return FALSE;
844 }
845 }
846 else
847 return FALSE;
848
849 *ccp = str;
850
851 return TRUE;
852 }
853
854 /* Parse a register of the type TYPE.
855
856 Return PARSE_FAIL if the string pointed by *CCP is not a valid register
857 name or the parsed register is not of TYPE.
858
859 Otherwise return the register number, and optionally fill in the actual
860 type of the register in *RTYPE when multiple alternatives were given, and
861 return the register shape and element index information in *TYPEINFO.
862
863 IN_REG_LIST should be set with TRUE if the caller is parsing a register
864 list. */
865
866 static int
867 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
868 struct neon_type_el *typeinfo, bfd_boolean in_reg_list)
869 {
870 char *str = *ccp;
871 const reg_entry *reg = parse_reg (&str);
872 struct neon_type_el atype;
873 struct neon_type_el parsetype;
874 bfd_boolean is_typed_vecreg = FALSE;
875
876 atype.defined = 0;
877 atype.type = NT_invtype;
878 atype.width = -1;
879 atype.index = 0;
880
881 if (reg == NULL)
882 {
883 if (typeinfo)
884 *typeinfo = atype;
885 set_default_error ();
886 return PARSE_FAIL;
887 }
888
889 if (! aarch64_check_reg_type (reg, type))
890 {
891 DEBUG_TRACE ("reg type check failed");
892 set_default_error ();
893 return PARSE_FAIL;
894 }
895 type = reg->type;
896
897 if (type == REG_TYPE_VN
898 && parse_neon_operand_type (&parsetype, &str))
899 {
900 /* Register if of the form Vn.[bhsdq]. */
901 is_typed_vecreg = TRUE;
902
903 if (parsetype.width == 0)
904 /* Expect index. In the new scheme we cannot have
905 Vn.[bhsdq] represent a scalar. Therefore any
906 Vn.[bhsdq] should have an index following it.
907 Except in reglists ofcourse. */
908 atype.defined |= NTA_HASINDEX;
909 else
910 atype.defined |= NTA_HASTYPE;
911
912 atype.type = parsetype.type;
913 atype.width = parsetype.width;
914 }
915
916 if (skip_past_char (&str, '['))
917 {
918 expressionS exp;
919
920 /* Reject Sn[index] syntax. */
921 if (!is_typed_vecreg)
922 {
923 first_error (_("this type of register can't be indexed"));
924 return PARSE_FAIL;
925 }
926
927 if (in_reg_list == TRUE)
928 {
929 first_error (_("index not allowed inside register list"));
930 return PARSE_FAIL;
931 }
932
933 atype.defined |= NTA_HASINDEX;
934
935 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
936
937 if (exp.X_op != O_constant)
938 {
939 first_error (_("constant expression required"));
940 return PARSE_FAIL;
941 }
942
943 if (! skip_past_char (&str, ']'))
944 return PARSE_FAIL;
945
946 atype.index = exp.X_add_number;
947 }
948 else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
949 {
950 /* Indexed vector register expected. */
951 first_error (_("indexed vector register expected"));
952 return PARSE_FAIL;
953 }
954
955 /* A vector reg Vn should be typed or indexed. */
956 if (type == REG_TYPE_VN && atype.defined == 0)
957 {
958 first_error (_("invalid use of vector register"));
959 }
960
961 if (typeinfo)
962 *typeinfo = atype;
963
964 if (rtype)
965 *rtype = type;
966
967 *ccp = str;
968
969 return reg->number;
970 }
971
972 /* Parse register.
973
974 Return the register number on success; return PARSE_FAIL otherwise.
975
976 If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
977 the register (e.g. NEON double or quad reg when either has been requested).
978
979 If this is a NEON vector register with additional type information, fill
980 in the struct pointed to by VECTYPE (if non-NULL).
981
982 This parser does not handle register list. */
983
984 static int
985 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
986 aarch64_reg_type *rtype, struct neon_type_el *vectype)
987 {
988 struct neon_type_el atype;
989 char *str = *ccp;
990 int reg = parse_typed_reg (&str, type, rtype, &atype,
991 /*in_reg_list= */ FALSE);
992
993 if (reg == PARSE_FAIL)
994 return PARSE_FAIL;
995
996 if (vectype)
997 *vectype = atype;
998
999 *ccp = str;
1000
1001 return reg;
1002 }
1003
1004 static inline bfd_boolean
1005 eq_neon_type_el (struct neon_type_el e1, struct neon_type_el e2)
1006 {
1007 return
1008 e1.type == e2.type
1009 && e1.defined == e2.defined
1010 && e1.width == e2.width && e1.index == e2.index;
1011 }
1012
1013 /* This function parses the NEON register list. On success, it returns
1014 the parsed register list information in the following encoded format:
1015
1016 bit 18-22 | 13-17 | 7-11 | 2-6 | 0-1
1017 4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1018
1019 The information of the register shape and/or index is returned in
1020 *VECTYPE.
1021
1022 It returns PARSE_FAIL if the register list is invalid.
1023
1024 The list contains one to four registers.
1025 Each register can be one of:
1026 <Vt>.<T>[<index>]
1027 <Vt>.<T>
1028 All <T> should be identical.
1029 All <index> should be identical.
1030 There are restrictions on <Vt> numbers which are checked later
1031 (by reg_list_valid_p). */
1032
1033 static int
1034 parse_neon_reg_list (char **ccp, struct neon_type_el *vectype)
1035 {
1036 char *str = *ccp;
1037 int nb_regs;
1038 struct neon_type_el typeinfo, typeinfo_first;
1039 int val, val_range;
1040 int in_range;
1041 int ret_val;
1042 int i;
1043 bfd_boolean error = FALSE;
1044 bfd_boolean expect_index = FALSE;
1045
1046 if (*str != '{')
1047 {
1048 set_syntax_error (_("expecting {"));
1049 return PARSE_FAIL;
1050 }
1051 str++;
1052
1053 nb_regs = 0;
1054 typeinfo_first.defined = 0;
1055 typeinfo_first.type = NT_invtype;
1056 typeinfo_first.width = -1;
1057 typeinfo_first.index = 0;
1058 ret_val = 0;
1059 val = -1;
1060 val_range = -1;
1061 in_range = 0;
1062 do
1063 {
1064 if (in_range)
1065 {
1066 str++; /* skip over '-' */
1067 val_range = val;
1068 }
1069 val = parse_typed_reg (&str, REG_TYPE_VN, NULL, &typeinfo,
1070 /*in_reg_list= */ TRUE);
1071 if (val == PARSE_FAIL)
1072 {
1073 set_first_syntax_error (_("invalid vector register in list"));
1074 error = TRUE;
1075 continue;
1076 }
1077 /* reject [bhsd]n */
1078 if (typeinfo.defined == 0)
1079 {
1080 set_first_syntax_error (_("invalid scalar register in list"));
1081 error = TRUE;
1082 continue;
1083 }
1084
1085 if (typeinfo.defined & NTA_HASINDEX)
1086 expect_index = TRUE;
1087
1088 if (in_range)
1089 {
1090 if (val < val_range)
1091 {
1092 set_first_syntax_error
1093 (_("invalid range in vector register list"));
1094 error = TRUE;
1095 }
1096 val_range++;
1097 }
1098 else
1099 {
1100 val_range = val;
1101 if (nb_regs == 0)
1102 typeinfo_first = typeinfo;
1103 else if (! eq_neon_type_el (typeinfo_first, typeinfo))
1104 {
1105 set_first_syntax_error
1106 (_("type mismatch in vector register list"));
1107 error = TRUE;
1108 }
1109 }
1110 if (! error)
1111 for (i = val_range; i <= val; i++)
1112 {
1113 ret_val |= i << (5 * nb_regs);
1114 nb_regs++;
1115 }
1116 in_range = 0;
1117 }
1118 while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1119
1120 skip_whitespace (str);
1121 if (*str != '}')
1122 {
1123 set_first_syntax_error (_("end of vector register list not found"));
1124 error = TRUE;
1125 }
1126 str++;
1127
1128 skip_whitespace (str);
1129
1130 if (expect_index)
1131 {
1132 if (skip_past_char (&str, '['))
1133 {
1134 expressionS exp;
1135
1136 my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1137 if (exp.X_op != O_constant)
1138 {
1139 set_first_syntax_error (_("constant expression required."));
1140 error = TRUE;
1141 }
1142 if (! skip_past_char (&str, ']'))
1143 error = TRUE;
1144 else
1145 typeinfo_first.index = exp.X_add_number;
1146 }
1147 else
1148 {
1149 set_first_syntax_error (_("expected index"));
1150 error = TRUE;
1151 }
1152 }
1153
1154 if (nb_regs > 4)
1155 {
1156 set_first_syntax_error (_("too many registers in vector register list"));
1157 error = TRUE;
1158 }
1159 else if (nb_regs == 0)
1160 {
1161 set_first_syntax_error (_("empty vector register list"));
1162 error = TRUE;
1163 }
1164
1165 *ccp = str;
1166 if (! error)
1167 *vectype = typeinfo_first;
1168
1169 return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1170 }
1171
1172 /* Directives: register aliases. */
1173
1174 static reg_entry *
1175 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1176 {
1177 reg_entry *new;
1178 const char *name;
1179
1180 if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1181 {
1182 if (new->builtin)
1183 as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1184 str);
1185
1186 /* Only warn about a redefinition if it's not defined as the
1187 same register. */
1188 else if (new->number != number || new->type != type)
1189 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1190
1191 return NULL;
1192 }
1193
1194 name = xstrdup (str);
1195 new = xmalloc (sizeof (reg_entry));
1196
1197 new->name = name;
1198 new->number = number;
1199 new->type = type;
1200 new->builtin = FALSE;
1201
1202 if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1203 abort ();
1204
1205 return new;
1206 }
1207
1208 /* Look for the .req directive. This is of the form:
1209
1210 new_register_name .req existing_register_name
1211
1212 If we find one, or if it looks sufficiently like one that we want to
1213 handle any error here, return TRUE. Otherwise return FALSE. */
1214
1215 static bfd_boolean
1216 create_register_alias (char *newname, char *p)
1217 {
1218 const reg_entry *old;
1219 char *oldname, *nbuf;
1220 size_t nlen;
1221
1222 /* The input scrubber ensures that whitespace after the mnemonic is
1223 collapsed to single spaces. */
1224 oldname = p;
1225 if (strncmp (oldname, " .req ", 6) != 0)
1226 return FALSE;
1227
1228 oldname += 6;
1229 if (*oldname == '\0')
1230 return FALSE;
1231
1232 old = hash_find (aarch64_reg_hsh, oldname);
1233 if (!old)
1234 {
1235 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1236 return TRUE;
1237 }
1238
1239 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1240 the desired alias name, and p points to its end. If not, then
1241 the desired alias name is in the global original_case_string. */
1242 #ifdef TC_CASE_SENSITIVE
1243 nlen = p - newname;
1244 #else
1245 newname = original_case_string;
1246 nlen = strlen (newname);
1247 #endif
1248
1249 nbuf = alloca (nlen + 1);
1250 memcpy (nbuf, newname, nlen);
1251 nbuf[nlen] = '\0';
1252
1253 /* Create aliases under the new name as stated; an all-lowercase
1254 version of the new name; and an all-uppercase version of the new
1255 name. */
1256 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1257 {
1258 for (p = nbuf; *p; p++)
1259 *p = TOUPPER (*p);
1260
1261 if (strncmp (nbuf, newname, nlen))
1262 {
1263 /* If this attempt to create an additional alias fails, do not bother
1264 trying to create the all-lower case alias. We will fail and issue
1265 a second, duplicate error message. This situation arises when the
1266 programmer does something like:
1267 foo .req r0
1268 Foo .req r1
1269 The second .req creates the "Foo" alias but then fails to create
1270 the artificial FOO alias because it has already been created by the
1271 first .req. */
1272 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1273 return TRUE;
1274 }
1275
1276 for (p = nbuf; *p; p++)
1277 *p = TOLOWER (*p);
1278
1279 if (strncmp (nbuf, newname, nlen))
1280 insert_reg_alias (nbuf, old->number, old->type);
1281 }
1282
1283 return TRUE;
1284 }
1285
1286 /* Should never be called, as .req goes between the alias and the
1287 register name, not at the beginning of the line. */
1288 static void
1289 s_req (int a ATTRIBUTE_UNUSED)
1290 {
1291 as_bad (_("invalid syntax for .req directive"));
1292 }
1293
1294 /* The .unreq directive deletes an alias which was previously defined
1295 by .req. For example:
1296
1297 my_alias .req r11
1298 .unreq my_alias */
1299
1300 static void
1301 s_unreq (int a ATTRIBUTE_UNUSED)
1302 {
1303 char *name;
1304 char saved_char;
1305
1306 name = input_line_pointer;
1307
1308 while (*input_line_pointer != 0
1309 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1310 ++input_line_pointer;
1311
1312 saved_char = *input_line_pointer;
1313 *input_line_pointer = 0;
1314
1315 if (!*name)
1316 as_bad (_("invalid syntax for .unreq directive"));
1317 else
1318 {
1319 reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1320
1321 if (!reg)
1322 as_bad (_("unknown register alias '%s'"), name);
1323 else if (reg->builtin)
1324 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1325 name);
1326 else
1327 {
1328 char *p;
1329 char *nbuf;
1330
1331 hash_delete (aarch64_reg_hsh, name, FALSE);
1332 free ((char *) reg->name);
1333 free (reg);
1334
1335 /* Also locate the all upper case and all lower case versions.
1336 Do not complain if we cannot find one or the other as it
1337 was probably deleted above. */
1338
1339 nbuf = strdup (name);
1340 for (p = nbuf; *p; p++)
1341 *p = TOUPPER (*p);
1342 reg = hash_find (aarch64_reg_hsh, nbuf);
1343 if (reg)
1344 {
1345 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1346 free ((char *) reg->name);
1347 free (reg);
1348 }
1349
1350 for (p = nbuf; *p; p++)
1351 *p = TOLOWER (*p);
1352 reg = hash_find (aarch64_reg_hsh, nbuf);
1353 if (reg)
1354 {
1355 hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1356 free ((char *) reg->name);
1357 free (reg);
1358 }
1359
1360 free (nbuf);
1361 }
1362 }
1363
1364 *input_line_pointer = saved_char;
1365 demand_empty_rest_of_line ();
1366 }
1367
1368 /* Directives: Instruction set selection. */
1369
1370 #ifdef OBJ_ELF
1371 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1372 spec. (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1373 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1374 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1375
1376 /* Create a new mapping symbol for the transition to STATE. */
1377
1378 static void
1379 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1380 {
1381 symbolS *symbolP;
1382 const char *symname;
1383 int type;
1384
1385 switch (state)
1386 {
1387 case MAP_DATA:
1388 symname = "$d";
1389 type = BSF_NO_FLAGS;
1390 break;
1391 case MAP_INSN:
1392 symname = "$x";
1393 type = BSF_NO_FLAGS;
1394 break;
1395 default:
1396 abort ();
1397 }
1398
1399 symbolP = symbol_new (symname, now_seg, value, frag);
1400 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1401
1402 /* Save the mapping symbols for future reference. Also check that
1403 we do not place two mapping symbols at the same offset within a
1404 frag. We'll handle overlap between frags in
1405 check_mapping_symbols.
1406
1407 If .fill or other data filling directive generates zero sized data,
1408 the mapping symbol for the following code will have the same value
1409 as the one generated for the data filling directive. In this case,
1410 we replace the old symbol with the new one at the same address. */
1411 if (value == 0)
1412 {
1413 if (frag->tc_frag_data.first_map != NULL)
1414 {
1415 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1416 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1417 &symbol_lastP);
1418 }
1419 frag->tc_frag_data.first_map = symbolP;
1420 }
1421 if (frag->tc_frag_data.last_map != NULL)
1422 {
1423 know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1424 S_GET_VALUE (symbolP));
1425 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1426 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1427 &symbol_lastP);
1428 }
1429 frag->tc_frag_data.last_map = symbolP;
1430 }
1431
1432 /* We must sometimes convert a region marked as code to data during
1433 code alignment, if an odd number of bytes have to be padded. The
1434 code mapping symbol is pushed to an aligned address. */
1435
1436 static void
1437 insert_data_mapping_symbol (enum mstate state,
1438 valueT value, fragS * frag, offsetT bytes)
1439 {
1440 /* If there was already a mapping symbol, remove it. */
1441 if (frag->tc_frag_data.last_map != NULL
1442 && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1443 frag->fr_address + value)
1444 {
1445 symbolS *symp = frag->tc_frag_data.last_map;
1446
1447 if (value == 0)
1448 {
1449 know (frag->tc_frag_data.first_map == symp);
1450 frag->tc_frag_data.first_map = NULL;
1451 }
1452 frag->tc_frag_data.last_map = NULL;
1453 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1454 }
1455
1456 make_mapping_symbol (MAP_DATA, value, frag);
1457 make_mapping_symbol (state, value + bytes, frag);
1458 }
1459
1460 static void mapping_state_2 (enum mstate state, int max_chars);
1461
1462 /* Set the mapping state to STATE. Only call this when about to
1463 emit some STATE bytes to the file. */
1464
1465 void
1466 mapping_state (enum mstate state)
1467 {
1468 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1469
1470 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1471
1472 if (mapstate == state)
1473 /* The mapping symbol has already been emitted.
1474 There is nothing else to do. */
1475 return;
1476 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
1477 /* This case will be evaluated later in the next else. */
1478 return;
1479 else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1480 {
1481 /* Only add the symbol if the offset is > 0:
1482 if we're at the first frag, check it's size > 0;
1483 if we're not at the first frag, then for sure
1484 the offset is > 0. */
1485 struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1486 const int add_symbol = (frag_now != frag_first)
1487 || (frag_now_fix () > 0);
1488
1489 if (add_symbol)
1490 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1491 }
1492
1493 mapping_state_2 (state, 0);
1494 #undef TRANSITION
1495 }
1496
1497 /* Same as mapping_state, but MAX_CHARS bytes have already been
1498 allocated. Put the mapping symbol that far back. */
1499
1500 static void
1501 mapping_state_2 (enum mstate state, int max_chars)
1502 {
1503 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1504
1505 if (!SEG_NORMAL (now_seg))
1506 return;
1507
1508 if (mapstate == state)
1509 /* The mapping symbol has already been emitted.
1510 There is nothing else to do. */
1511 return;
1512
1513 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1514 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1515 }
1516 #else
1517 #define mapping_state(x) /* nothing */
1518 #define mapping_state_2(x, y) /* nothing */
1519 #endif
1520
1521 /* Directives: sectioning and alignment. */
1522
1523 static void
1524 s_bss (int ignore ATTRIBUTE_UNUSED)
1525 {
1526 /* We don't support putting frags in the BSS segment, we fake it by
1527 marking in_bss, then looking at s_skip for clues. */
1528 subseg_set (bss_section, 0);
1529 demand_empty_rest_of_line ();
1530 mapping_state (MAP_DATA);
1531 }
1532
1533 static void
1534 s_even (int ignore ATTRIBUTE_UNUSED)
1535 {
1536 /* Never make frag if expect extra pass. */
1537 if (!need_pass_2)
1538 frag_align (1, 0, 0);
1539
1540 record_alignment (now_seg, 1);
1541
1542 demand_empty_rest_of_line ();
1543 }
1544
1545 /* Directives: Literal pools. */
1546
1547 static literal_pool *
1548 find_literal_pool (int size)
1549 {
1550 literal_pool *pool;
1551
1552 for (pool = list_of_pools; pool != NULL; pool = pool->next)
1553 {
1554 if (pool->section == now_seg
1555 && pool->sub_section == now_subseg && pool->size == size)
1556 break;
1557 }
1558
1559 return pool;
1560 }
1561
1562 static literal_pool *
1563 find_or_make_literal_pool (int size)
1564 {
1565 /* Next literal pool ID number. */
1566 static unsigned int latest_pool_num = 1;
1567 literal_pool *pool;
1568
1569 pool = find_literal_pool (size);
1570
1571 if (pool == NULL)
1572 {
1573 /* Create a new pool. */
1574 pool = xmalloc (sizeof (*pool));
1575 if (!pool)
1576 return NULL;
1577
1578 /* Currently we always put the literal pool in the current text
1579 section. If we were generating "small" model code where we
1580 knew that all code and initialised data was within 1MB then
1581 we could output literals to mergeable, read-only data
1582 sections. */
1583
1584 pool->next_free_entry = 0;
1585 pool->section = now_seg;
1586 pool->sub_section = now_subseg;
1587 pool->size = size;
1588 pool->next = list_of_pools;
1589 pool->symbol = NULL;
1590
1591 /* Add it to the list. */
1592 list_of_pools = pool;
1593 }
1594
1595 /* New pools, and emptied pools, will have a NULL symbol. */
1596 if (pool->symbol == NULL)
1597 {
1598 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1599 (valueT) 0, &zero_address_frag);
1600 pool->id = latest_pool_num++;
1601 }
1602
1603 /* Done. */
1604 return pool;
1605 }
1606
1607 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1608 Return TRUE on success, otherwise return FALSE. */
1609 static bfd_boolean
1610 add_to_lit_pool (expressionS *exp, int size)
1611 {
1612 literal_pool *pool;
1613 unsigned int entry;
1614
1615 pool = find_or_make_literal_pool (size);
1616
1617 /* Check if this literal value is already in the pool. */
1618 for (entry = 0; entry < pool->next_free_entry; entry++)
1619 {
1620 if ((pool->literals[entry].X_op == exp->X_op)
1621 && (exp->X_op == O_constant)
1622 && (pool->literals[entry].X_add_number == exp->X_add_number)
1623 && (pool->literals[entry].X_unsigned == exp->X_unsigned))
1624 break;
1625
1626 if ((pool->literals[entry].X_op == exp->X_op)
1627 && (exp->X_op == O_symbol)
1628 && (pool->literals[entry].X_add_number == exp->X_add_number)
1629 && (pool->literals[entry].X_add_symbol == exp->X_add_symbol)
1630 && (pool->literals[entry].X_op_symbol == exp->X_op_symbol))
1631 break;
1632 }
1633
1634 /* Do we need to create a new entry? */
1635 if (entry == pool->next_free_entry)
1636 {
1637 if (entry >= MAX_LITERAL_POOL_SIZE)
1638 {
1639 set_syntax_error (_("literal pool overflow"));
1640 return FALSE;
1641 }
1642
1643 pool->literals[entry] = *exp;
1644 pool->next_free_entry += 1;
1645 }
1646
1647 exp->X_op = O_symbol;
1648 exp->X_add_number = ((int) entry) * size;
1649 exp->X_add_symbol = pool->symbol;
1650
1651 return TRUE;
1652 }
1653
1654 /* Can't use symbol_new here, so have to create a symbol and then at
1655 a later date assign it a value. Thats what these functions do. */
1656
1657 static void
1658 symbol_locate (symbolS * symbolP,
1659 const char *name,/* It is copied, the caller can modify. */
1660 segT segment, /* Segment identifier (SEG_<something>). */
1661 valueT valu, /* Symbol value. */
1662 fragS * frag) /* Associated fragment. */
1663 {
1664 unsigned int name_length;
1665 char *preserved_copy_of_name;
1666
1667 name_length = strlen (name) + 1; /* +1 for \0. */
1668 obstack_grow (&notes, name, name_length);
1669 preserved_copy_of_name = obstack_finish (&notes);
1670
1671 #ifdef tc_canonicalize_symbol_name
1672 preserved_copy_of_name =
1673 tc_canonicalize_symbol_name (preserved_copy_of_name);
1674 #endif
1675
1676 S_SET_NAME (symbolP, preserved_copy_of_name);
1677
1678 S_SET_SEGMENT (symbolP, segment);
1679 S_SET_VALUE (symbolP, valu);
1680 symbol_clear_list_pointers (symbolP);
1681
1682 symbol_set_frag (symbolP, frag);
1683
1684 /* Link to end of symbol chain. */
1685 {
1686 extern int symbol_table_frozen;
1687
1688 if (symbol_table_frozen)
1689 abort ();
1690 }
1691
1692 symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1693
1694 obj_symbol_new_hook (symbolP);
1695
1696 #ifdef tc_symbol_new_hook
1697 tc_symbol_new_hook (symbolP);
1698 #endif
1699
1700 #ifdef DEBUG_SYMS
1701 verify_symbol_chain (symbol_rootP, symbol_lastP);
1702 #endif /* DEBUG_SYMS */
1703 }
1704
1705
1706 static void
1707 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1708 {
1709 unsigned int entry;
1710 literal_pool *pool;
1711 char sym_name[20];
1712 int align;
1713
1714 for (align = 2; align <= 4; align++)
1715 {
1716 int size = 1 << align;
1717
1718 pool = find_literal_pool (size);
1719 if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1720 continue;
1721
1722 mapping_state (MAP_DATA);
1723
1724 /* Align pool as you have word accesses.
1725 Only make a frag if we have to. */
1726 if (!need_pass_2)
1727 frag_align (align, 0, 0);
1728
1729 record_alignment (now_seg, align);
1730
1731 sprintf (sym_name, "$$lit_\002%x", pool->id);
1732
1733 symbol_locate (pool->symbol, sym_name, now_seg,
1734 (valueT) frag_now_fix (), frag_now);
1735 symbol_table_insert (pool->symbol);
1736
1737 for (entry = 0; entry < pool->next_free_entry; entry++)
1738 /* First output the expression in the instruction to the pool. */
1739 emit_expr (&(pool->literals[entry]), size); /* .word|.xword */
1740
1741 /* Mark the pool as empty. */
1742 pool->next_free_entry = 0;
1743 pool->symbol = NULL;
1744 }
1745 }
1746
1747 #ifdef OBJ_ELF
1748 /* Forward declarations for functions below, in the MD interface
1749 section. */
1750 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1751 static struct reloc_table_entry * find_reloc_table_entry (char **);
1752
1753 /* Directives: Data. */
1754 /* N.B. the support for relocation suffix in this directive needs to be
1755 implemented properly. */
1756
1757 static void
1758 s_aarch64_elf_cons (int nbytes)
1759 {
1760 expressionS exp;
1761
1762 #ifdef md_flush_pending_output
1763 md_flush_pending_output ();
1764 #endif
1765
1766 if (is_it_end_of_statement ())
1767 {
1768 demand_empty_rest_of_line ();
1769 return;
1770 }
1771
1772 #ifdef md_cons_align
1773 md_cons_align (nbytes);
1774 #endif
1775
1776 mapping_state (MAP_DATA);
1777 do
1778 {
1779 struct reloc_table_entry *reloc;
1780
1781 expression (&exp);
1782
1783 if (exp.X_op != O_symbol)
1784 emit_expr (&exp, (unsigned int) nbytes);
1785 else
1786 {
1787 skip_past_char (&input_line_pointer, '#');
1788 if (skip_past_char (&input_line_pointer, ':'))
1789 {
1790 reloc = find_reloc_table_entry (&input_line_pointer);
1791 if (reloc == NULL)
1792 as_bad (_("unrecognized relocation suffix"));
1793 else
1794 as_bad (_("unimplemented relocation suffix"));
1795 ignore_rest_of_line ();
1796 return;
1797 }
1798 else
1799 emit_expr (&exp, (unsigned int) nbytes);
1800 }
1801 }
1802 while (*input_line_pointer++ == ',');
1803
1804 /* Put terminator back into stream. */
1805 input_line_pointer--;
1806 demand_empty_rest_of_line ();
1807 }
1808
1809 #endif /* OBJ_ELF */
1810
1811 /* Output a 32-bit word, but mark as an instruction. */
1812
1813 static void
1814 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1815 {
1816 expressionS exp;
1817
1818 #ifdef md_flush_pending_output
1819 md_flush_pending_output ();
1820 #endif
1821
1822 if (is_it_end_of_statement ())
1823 {
1824 demand_empty_rest_of_line ();
1825 return;
1826 }
1827
1828 if (!need_pass_2)
1829 frag_align_code (2, 0);
1830 #ifdef OBJ_ELF
1831 mapping_state (MAP_INSN);
1832 #endif
1833
1834 do
1835 {
1836 expression (&exp);
1837 if (exp.X_op != O_constant)
1838 {
1839 as_bad (_("constant expression required"));
1840 ignore_rest_of_line ();
1841 return;
1842 }
1843
1844 if (target_big_endian)
1845 {
1846 unsigned int val = exp.X_add_number;
1847 exp.X_add_number = SWAP_32 (val);
1848 }
1849 emit_expr (&exp, 4);
1850 }
1851 while (*input_line_pointer++ == ',');
1852
1853 /* Put terminator back into stream. */
1854 input_line_pointer--;
1855 demand_empty_rest_of_line ();
1856 }
1857
1858 #ifdef OBJ_ELF
1859 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction. */
1860
1861 static void
1862 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1863 {
1864 expressionS exp;
1865
1866 /* Since we're just labelling the code, there's no need to define a
1867 mapping symbol. */
1868 expression (&exp);
1869 /* Make sure there is enough room in this frag for the following
1870 blr. This trick only works if the blr follows immediately after
1871 the .tlsdesc directive. */
1872 frag_grow (4);
1873 fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1874 BFD_RELOC_AARCH64_TLSDESC_CALL);
1875
1876 demand_empty_rest_of_line ();
1877 }
1878 #endif /* OBJ_ELF */
1879
1880 static void s_aarch64_arch (int);
1881 static void s_aarch64_cpu (int);
1882
1883 /* This table describes all the machine specific pseudo-ops the assembler
1884 has to support. The fields are:
1885 pseudo-op name without dot
1886 function to call to execute this pseudo-op
1887 Integer arg to pass to the function. */
1888
1889 const pseudo_typeS md_pseudo_table[] = {
1890 /* Never called because '.req' does not start a line. */
1891 {"req", s_req, 0},
1892 {"unreq", s_unreq, 0},
1893 {"bss", s_bss, 0},
1894 {"even", s_even, 0},
1895 {"ltorg", s_ltorg, 0},
1896 {"pool", s_ltorg, 0},
1897 {"cpu", s_aarch64_cpu, 0},
1898 {"arch", s_aarch64_arch, 0},
1899 {"inst", s_aarch64_inst, 0},
1900 #ifdef OBJ_ELF
1901 {"tlsdesccall", s_tlsdesccall, 0},
1902 {"word", s_aarch64_elf_cons, 4},
1903 {"long", s_aarch64_elf_cons, 4},
1904 {"xword", s_aarch64_elf_cons, 8},
1905 {"dword", s_aarch64_elf_cons, 8},
1906 #endif
1907 {0, 0, 0}
1908 };
1909 \f
1910
1911 /* Check whether STR points to a register name followed by a comma or the
1912 end of line; REG_TYPE indicates which register types are checked
1913 against. Return TRUE if STR is such a register name; otherwise return
1914 FALSE. The function does not intend to produce any diagnostics, but since
1915 the register parser aarch64_reg_parse, which is called by this function,
1916 does produce diagnostics, we call clear_error to clear any diagnostics
1917 that may be generated by aarch64_reg_parse.
1918 Also, the function returns FALSE directly if there is any user error
1919 present at the function entry. This prevents the existing diagnostics
1920 state from being spoiled.
1921 The function currently serves parse_constant_immediate and
1922 parse_big_immediate only. */
1923 static bfd_boolean
1924 reg_name_p (char *str, aarch64_reg_type reg_type)
1925 {
1926 int reg;
1927
1928 /* Prevent the diagnostics state from being spoiled. */
1929 if (error_p ())
1930 return FALSE;
1931
1932 reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
1933
1934 /* Clear the parsing error that may be set by the reg parser. */
1935 clear_error ();
1936
1937 if (reg == PARSE_FAIL)
1938 return FALSE;
1939
1940 skip_whitespace (str);
1941 if (*str == ',' || is_end_of_line[(unsigned int) *str])
1942 return TRUE;
1943
1944 return FALSE;
1945 }
1946
1947 /* Parser functions used exclusively in instruction operands. */
1948
1949 /* Parse an immediate expression which may not be constant.
1950
1951 To prevent the expression parser from pushing a register name
1952 into the symbol table as an undefined symbol, firstly a check is
1953 done to find out whether STR is a valid register name followed
1954 by a comma or the end of line. Return FALSE if STR is such a
1955 string. */
1956
1957 static bfd_boolean
1958 parse_immediate_expression (char **str, expressionS *exp)
1959 {
1960 if (reg_name_p (*str, REG_TYPE_R_Z_BHSDQ_V))
1961 {
1962 set_recoverable_error (_("immediate operand required"));
1963 return FALSE;
1964 }
1965
1966 my_get_expression (exp, str, GE_OPT_PREFIX, 1);
1967
1968 if (exp->X_op == O_absent)
1969 {
1970 set_fatal_syntax_error (_("missing immediate expression"));
1971 return FALSE;
1972 }
1973
1974 return TRUE;
1975 }
1976
1977 /* Constant immediate-value read function for use in insn parsing.
1978 STR points to the beginning of the immediate (with the optional
1979 leading #); *VAL receives the value.
1980
1981 Return TRUE on success; otherwise return FALSE. */
1982
1983 static bfd_boolean
1984 parse_constant_immediate (char **str, int64_t * val)
1985 {
1986 expressionS exp;
1987
1988 if (! parse_immediate_expression (str, &exp))
1989 return FALSE;
1990
1991 if (exp.X_op != O_constant)
1992 {
1993 set_syntax_error (_("constant expression required"));
1994 return FALSE;
1995 }
1996
1997 *val = exp.X_add_number;
1998 return TRUE;
1999 }
2000
2001 static uint32_t
2002 encode_imm_float_bits (uint32_t imm)
2003 {
2004 return ((imm >> 19) & 0x7f) /* b[25:19] -> b[6:0] */
2005 | ((imm >> (31 - 7)) & 0x80); /* b[31] -> b[7] */
2006 }
2007
2008 /* Return TRUE if the single-precision floating-point value encoded in IMM
2009 can be expressed in the AArch64 8-bit signed floating-point format with
2010 3-bit exponent and normalized 4 bits of precision; in other words, the
2011 floating-point value must be expressable as
2012 (+/-) n / 16 * power (2, r)
2013 where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4. */
2014
2015 static bfd_boolean
2016 aarch64_imm_float_p (uint32_t imm)
2017 {
2018 /* If a single-precision floating-point value has the following bit
2019 pattern, it can be expressed in the AArch64 8-bit floating-point
2020 format:
2021
2022 3 32222222 2221111111111
2023 1 09876543 21098765432109876543210
2024 n Eeeeeexx xxxx0000000000000000000
2025
2026 where n, e and each x are either 0 or 1 independently, with
2027 E == ~ e. */
2028
2029 uint32_t pattern;
2030
2031 /* Prepare the pattern for 'Eeeeee'. */
2032 if (((imm >> 30) & 0x1) == 0)
2033 pattern = 0x3e000000;
2034 else
2035 pattern = 0x40000000;
2036
2037 return (imm & 0x7ffff) == 0 /* lower 19 bits are 0. */
2038 && ((imm & 0x7e000000) == pattern); /* bits 25 - 29 == ~ bit 30. */
2039 }
2040
2041 /* Like aarch64_imm_float_p but for a double-precision floating-point value.
2042
2043 Return TRUE if the value encoded in IMM can be expressed in the AArch64
2044 8-bit signed floating-point format with 3-bit exponent and normalized 4
2045 bits of precision (i.e. can be used in an FMOV instruction); return the
2046 equivalent single-precision encoding in *FPWORD.
2047
2048 Otherwise return FALSE. */
2049
2050 static bfd_boolean
2051 aarch64_double_precision_fmovable (uint64_t imm, uint32_t *fpword)
2052 {
2053 /* If a double-precision floating-point value has the following bit
2054 pattern, it can be expressed in the AArch64 8-bit floating-point
2055 format:
2056
2057 6 66655555555 554444444...21111111111
2058 3 21098765432 109876543...098765432109876543210
2059 n Eeeeeeeeexx xxxx00000...000000000000000000000
2060
2061 where n, e and each x are either 0 or 1 independently, with
2062 E == ~ e. */
2063
2064 uint32_t pattern;
2065 uint32_t high32 = imm >> 32;
2066
2067 /* Lower 32 bits need to be 0s. */
2068 if ((imm & 0xffffffff) != 0)
2069 return FALSE;
2070
2071 /* Prepare the pattern for 'Eeeeeeeee'. */
2072 if (((high32 >> 30) & 0x1) == 0)
2073 pattern = 0x3fc00000;
2074 else
2075 pattern = 0x40000000;
2076
2077 if ((high32 & 0xffff) == 0 /* bits 32 - 47 are 0. */
2078 && (high32 & 0x7fc00000) == pattern) /* bits 54 - 61 == ~ bit 62. */
2079 {
2080 /* Convert to the single-precision encoding.
2081 i.e. convert
2082 n Eeeeeeeeexx xxxx00000...000000000000000000000
2083 to
2084 n Eeeeeexx xxxx0000000000000000000. */
2085 *fpword = ((high32 & 0xfe000000) /* nEeeeee. */
2086 | (((high32 >> 16) & 0x3f) << 19)); /* xxxxxx. */
2087 return TRUE;
2088 }
2089 else
2090 return FALSE;
2091 }
2092
2093 /* Parse a floating-point immediate. Return TRUE on success and return the
2094 value in *IMMED in the format of IEEE754 single-precision encoding.
2095 *CCP points to the start of the string; DP_P is TRUE when the immediate
2096 is expected to be in double-precision (N.B. this only matters when
2097 hexadecimal representation is involved).
2098
2099 N.B. 0.0 is accepted by this function. */
2100
2101 static bfd_boolean
2102 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p)
2103 {
2104 char *str = *ccp;
2105 char *fpnum;
2106 LITTLENUM_TYPE words[MAX_LITTLENUMS];
2107 int found_fpchar = 0;
2108 int64_t val = 0;
2109 unsigned fpword = 0;
2110 bfd_boolean hex_p = FALSE;
2111
2112 skip_past_char (&str, '#');
2113
2114 fpnum = str;
2115 skip_whitespace (fpnum);
2116
2117 if (strncmp (fpnum, "0x", 2) == 0)
2118 {
2119 /* Support the hexadecimal representation of the IEEE754 encoding.
2120 Double-precision is expected when DP_P is TRUE, otherwise the
2121 representation should be in single-precision. */
2122 if (! parse_constant_immediate (&str, &val))
2123 goto invalid_fp;
2124
2125 if (dp_p)
2126 {
2127 if (! aarch64_double_precision_fmovable (val, &fpword))
2128 goto invalid_fp;
2129 }
2130 else if ((uint64_t) val > 0xffffffff)
2131 goto invalid_fp;
2132 else
2133 fpword = val;
2134
2135 hex_p = TRUE;
2136 }
2137 else
2138 {
2139 /* We must not accidentally parse an integer as a floating-point number.
2140 Make sure that the value we parse is not an integer by checking for
2141 special characters '.' or 'e'. */
2142 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2143 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2144 {
2145 found_fpchar = 1;
2146 break;
2147 }
2148
2149 if (!found_fpchar)
2150 return FALSE;
2151 }
2152
2153 if (! hex_p)
2154 {
2155 int i;
2156
2157 if ((str = atof_ieee (str, 's', words)) == NULL)
2158 goto invalid_fp;
2159
2160 /* Our FP word must be 32 bits (single-precision FP). */
2161 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2162 {
2163 fpword <<= LITTLENUM_NUMBER_OF_BITS;
2164 fpword |= words[i];
2165 }
2166 }
2167
2168 if (aarch64_imm_float_p (fpword) || (fpword & 0x7fffffff) == 0)
2169 {
2170 *immed = fpword;
2171 *ccp = str;
2172 return TRUE;
2173 }
2174
2175 invalid_fp:
2176 set_fatal_syntax_error (_("invalid floating-point constant"));
2177 return FALSE;
2178 }
2179
2180 /* Less-generic immediate-value read function with the possibility of loading
2181 a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2182 instructions.
2183
2184 To prevent the expression parser from pushing a register name into the
2185 symbol table as an undefined symbol, a check is firstly done to find
2186 out whether STR is a valid register name followed by a comma or the end
2187 of line. Return FALSE if STR is such a register. */
2188
2189 static bfd_boolean
2190 parse_big_immediate (char **str, int64_t *imm)
2191 {
2192 char *ptr = *str;
2193
2194 if (reg_name_p (ptr, REG_TYPE_R_Z_BHSDQ_V))
2195 {
2196 set_syntax_error (_("immediate operand required"));
2197 return FALSE;
2198 }
2199
2200 my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2201
2202 if (inst.reloc.exp.X_op == O_constant)
2203 *imm = inst.reloc.exp.X_add_number;
2204
2205 *str = ptr;
2206
2207 return TRUE;
2208 }
2209
2210 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2211 if NEED_LIBOPCODES is non-zero, the fixup will need
2212 assistance from the libopcodes. */
2213
2214 static inline void
2215 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2216 const aarch64_opnd_info *operand,
2217 int need_libopcodes_p)
2218 {
2219 reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2220 reloc->opnd = operand->type;
2221 if (need_libopcodes_p)
2222 reloc->need_libopcodes_p = 1;
2223 };
2224
2225 /* Return TRUE if the instruction needs to be fixed up later internally by
2226 the GAS; otherwise return FALSE. */
2227
2228 static inline bfd_boolean
2229 aarch64_gas_internal_fixup_p (void)
2230 {
2231 return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2232 }
2233
2234 /* Assign the immediate value to the relavant field in *OPERAND if
2235 RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2236 needs an internal fixup in a later stage.
2237 ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2238 IMM.VALUE that may get assigned with the constant. */
2239 static inline void
2240 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2241 aarch64_opnd_info *operand,
2242 int addr_off_p,
2243 int need_libopcodes_p,
2244 int skip_p)
2245 {
2246 if (reloc->exp.X_op == O_constant)
2247 {
2248 if (addr_off_p)
2249 operand->addr.offset.imm = reloc->exp.X_add_number;
2250 else
2251 operand->imm.value = reloc->exp.X_add_number;
2252 reloc->type = BFD_RELOC_UNUSED;
2253 }
2254 else
2255 {
2256 aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2257 /* Tell libopcodes to ignore this operand or not. This is helpful
2258 when one of the operands needs to be fixed up later but we need
2259 libopcodes to check the other operands. */
2260 operand->skip = skip_p;
2261 }
2262 }
2263
2264 /* Relocation modifiers. Each entry in the table contains the textual
2265 name for the relocation which may be placed before a symbol used as
2266 a load/store offset, or add immediate. It must be surrounded by a
2267 leading and trailing colon, for example:
2268
2269 ldr x0, [x1, #:rello:varsym]
2270 add x0, x1, #:rello:varsym */
2271
2272 struct reloc_table_entry
2273 {
2274 const char *name;
2275 int pc_rel;
2276 bfd_reloc_code_real_type adrp_type;
2277 bfd_reloc_code_real_type movw_type;
2278 bfd_reloc_code_real_type add_type;
2279 bfd_reloc_code_real_type ldst_type;
2280 };
2281
2282 static struct reloc_table_entry reloc_table[] = {
2283 /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2284 {"lo12", 0,
2285 0,
2286 0,
2287 BFD_RELOC_AARCH64_ADD_LO12,
2288 BFD_RELOC_AARCH64_LDST_LO12},
2289
2290 /* Higher 21 bits of pc-relative page offset: ADRP */
2291 {"pg_hi21", 1,
2292 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2293 0,
2294 0,
2295 0},
2296
2297 /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2298 {"pg_hi21_nc", 1,
2299 BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2300 0,
2301 0,
2302 0},
2303
2304 /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2305 {"abs_g0", 0,
2306 0,
2307 BFD_RELOC_AARCH64_MOVW_G0,
2308 0,
2309 0},
2310
2311 /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2312 {"abs_g0_s", 0,
2313 0,
2314 BFD_RELOC_AARCH64_MOVW_G0_S,
2315 0,
2316 0},
2317
2318 /* Less significant bits 0-15 of address/value: MOVK, no check */
2319 {"abs_g0_nc", 0,
2320 0,
2321 BFD_RELOC_AARCH64_MOVW_G0_NC,
2322 0,
2323 0},
2324
2325 /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2326 {"abs_g1", 0,
2327 0,
2328 BFD_RELOC_AARCH64_MOVW_G1,
2329 0,
2330 0},
2331
2332 /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2333 {"abs_g1_s", 0,
2334 0,
2335 BFD_RELOC_AARCH64_MOVW_G1_S,
2336 0,
2337 0},
2338
2339 /* Less significant bits 16-31 of address/value: MOVK, no check */
2340 {"abs_g1_nc", 0,
2341 0,
2342 BFD_RELOC_AARCH64_MOVW_G1_NC,
2343 0,
2344 0},
2345
2346 /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2347 {"abs_g2", 0,
2348 0,
2349 BFD_RELOC_AARCH64_MOVW_G2,
2350 0,
2351 0},
2352
2353 /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2354 {"abs_g2_s", 0,
2355 0,
2356 BFD_RELOC_AARCH64_MOVW_G2_S,
2357 0,
2358 0},
2359
2360 /* Less significant bits 32-47 of address/value: MOVK, no check */
2361 {"abs_g2_nc", 0,
2362 0,
2363 BFD_RELOC_AARCH64_MOVW_G2_NC,
2364 0,
2365 0},
2366
2367 /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2368 {"abs_g3", 0,
2369 0,
2370 BFD_RELOC_AARCH64_MOVW_G3,
2371 0,
2372 0},
2373
2374 /* Get to the page containing GOT entry for a symbol. */
2375 {"got", 1,
2376 BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2377 0,
2378 0,
2379 BFD_RELOC_AARCH64_GOT_LD_PREL19},
2380
2381 /* 12 bit offset into the page containing GOT entry for that symbol. */
2382 {"got_lo12", 0,
2383 0,
2384 0,
2385 0,
2386 BFD_RELOC_AARCH64_LD_GOT_LO12_NC},
2387
2388 /* Get to the page containing GOT TLS entry for a symbol */
2389 {"tlsgd", 0,
2390 BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2391 0,
2392 0,
2393 0},
2394
2395 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2396 {"tlsgd_lo12", 0,
2397 0,
2398 0,
2399 BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2400 0},
2401
2402 /* Get to the page containing GOT TLS entry for a symbol */
2403 {"tlsdesc", 0,
2404 BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2405 0,
2406 0,
2407 0},
2408
2409 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2410 {"tlsdesc_lo12", 0,
2411 0,
2412 0,
2413 BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2414 BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC},
2415
2416 /* Get to the page containing GOT TLS entry for a symbol */
2417 {"gottprel", 0,
2418 BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2419 0,
2420 0,
2421 0},
2422
2423 /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2424 {"gottprel_lo12", 0,
2425 0,
2426 0,
2427 0,
2428 BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC},
2429
2430 /* Get tp offset for a symbol. */
2431 {"tprel", 0,
2432 0,
2433 0,
2434 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2435 0},
2436
2437 /* Get tp offset for a symbol. */
2438 {"tprel_lo12", 0,
2439 0,
2440 0,
2441 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2442 0},
2443
2444 /* Get tp offset for a symbol. */
2445 {"tprel_hi12", 0,
2446 0,
2447 0,
2448 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2449 0},
2450
2451 /* Get tp offset for a symbol. */
2452 {"tprel_lo12_nc", 0,
2453 0,
2454 0,
2455 BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2456 0},
2457
2458 /* Most significant bits 32-47 of address/value: MOVZ. */
2459 {"tprel_g2", 0,
2460 0,
2461 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2462 0,
2463 0},
2464
2465 /* Most significant bits 16-31 of address/value: MOVZ. */
2466 {"tprel_g1", 0,
2467 0,
2468 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2469 0,
2470 0},
2471
2472 /* Most significant bits 16-31 of address/value: MOVZ, no check. */
2473 {"tprel_g1_nc", 0,
2474 0,
2475 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2476 0,
2477 0},
2478
2479 /* Most significant bits 0-15 of address/value: MOVZ. */
2480 {"tprel_g0", 0,
2481 0,
2482 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2483 0,
2484 0},
2485
2486 /* Most significant bits 0-15 of address/value: MOVZ, no check. */
2487 {"tprel_g0_nc", 0,
2488 0,
2489 BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2490 0,
2491 0},
2492 };
2493
2494 /* Given the address of a pointer pointing to the textual name of a
2495 relocation as may appear in assembler source, attempt to find its
2496 details in reloc_table. The pointer will be updated to the character
2497 after the trailing colon. On failure, NULL will be returned;
2498 otherwise return the reloc_table_entry. */
2499
2500 static struct reloc_table_entry *
2501 find_reloc_table_entry (char **str)
2502 {
2503 unsigned int i;
2504 for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2505 {
2506 int length = strlen (reloc_table[i].name);
2507
2508 if (strncasecmp (reloc_table[i].name, *str, length) == 0
2509 && (*str)[length] == ':')
2510 {
2511 *str += (length + 1);
2512 return &reloc_table[i];
2513 }
2514 }
2515
2516 return NULL;
2517 }
2518
2519 /* Mode argument to parse_shift and parser_shifter_operand. */
2520 enum parse_shift_mode
2521 {
2522 SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2523 "#imm{,lsl #n}" */
2524 SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or
2525 "#imm" */
2526 SHIFTED_LSL, /* bare "lsl #n" */
2527 SHIFTED_LSL_MSL, /* "lsl|msl #n" */
2528 SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */
2529 };
2530
2531 /* Parse a <shift> operator on an AArch64 data processing instruction.
2532 Return TRUE on success; otherwise return FALSE. */
2533 static bfd_boolean
2534 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2535 {
2536 const struct aarch64_name_value_pair *shift_op;
2537 enum aarch64_modifier_kind kind;
2538 expressionS exp;
2539 int exp_has_prefix;
2540 char *s = *str;
2541 char *p = s;
2542
2543 for (p = *str; ISALPHA (*p); p++)
2544 ;
2545
2546 if (p == *str)
2547 {
2548 set_syntax_error (_("shift expression expected"));
2549 return FALSE;
2550 }
2551
2552 shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2553
2554 if (shift_op == NULL)
2555 {
2556 set_syntax_error (_("shift operator expected"));
2557 return FALSE;
2558 }
2559
2560 kind = aarch64_get_operand_modifier (shift_op);
2561
2562 if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2563 {
2564 set_syntax_error (_("invalid use of 'MSL'"));
2565 return FALSE;
2566 }
2567
2568 switch (mode)
2569 {
2570 case SHIFTED_LOGIC_IMM:
2571 if (aarch64_extend_operator_p (kind) == TRUE)
2572 {
2573 set_syntax_error (_("extending shift is not permitted"));
2574 return FALSE;
2575 }
2576 break;
2577
2578 case SHIFTED_ARITH_IMM:
2579 if (kind == AARCH64_MOD_ROR)
2580 {
2581 set_syntax_error (_("'ROR' shift is not permitted"));
2582 return FALSE;
2583 }
2584 break;
2585
2586 case SHIFTED_LSL:
2587 if (kind != AARCH64_MOD_LSL)
2588 {
2589 set_syntax_error (_("only 'LSL' shift is permitted"));
2590 return FALSE;
2591 }
2592 break;
2593
2594 case SHIFTED_REG_OFFSET:
2595 if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
2596 && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
2597 {
2598 set_fatal_syntax_error
2599 (_("invalid shift for the register offset addressing mode"));
2600 return FALSE;
2601 }
2602 break;
2603
2604 case SHIFTED_LSL_MSL:
2605 if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
2606 {
2607 set_syntax_error (_("invalid shift operator"));
2608 return FALSE;
2609 }
2610 break;
2611
2612 default:
2613 abort ();
2614 }
2615
2616 /* Whitespace can appear here if the next thing is a bare digit. */
2617 skip_whitespace (p);
2618
2619 /* Parse shift amount. */
2620 exp_has_prefix = 0;
2621 if (mode == SHIFTED_REG_OFFSET && *p == ']')
2622 exp.X_op = O_absent;
2623 else
2624 {
2625 if (is_immediate_prefix (*p))
2626 {
2627 p++;
2628 exp_has_prefix = 1;
2629 }
2630 my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
2631 }
2632 if (exp.X_op == O_absent)
2633 {
2634 if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
2635 {
2636 set_syntax_error (_("missing shift amount"));
2637 return FALSE;
2638 }
2639 operand->shifter.amount = 0;
2640 }
2641 else if (exp.X_op != O_constant)
2642 {
2643 set_syntax_error (_("constant shift amount required"));
2644 return FALSE;
2645 }
2646 else if (exp.X_add_number < 0 || exp.X_add_number > 63)
2647 {
2648 set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
2649 return FALSE;
2650 }
2651 else
2652 {
2653 operand->shifter.amount = exp.X_add_number;
2654 operand->shifter.amount_present = 1;
2655 }
2656
2657 operand->shifter.operator_present = 1;
2658 operand->shifter.kind = kind;
2659
2660 *str = p;
2661 return TRUE;
2662 }
2663
2664 /* Parse a <shifter_operand> for a data processing instruction:
2665
2666 #<immediate>
2667 #<immediate>, LSL #imm
2668
2669 Validation of immediate operands is deferred to md_apply_fix.
2670
2671 Return TRUE on success; otherwise return FALSE. */
2672
2673 static bfd_boolean
2674 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
2675 enum parse_shift_mode mode)
2676 {
2677 char *p;
2678
2679 if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
2680 return FALSE;
2681
2682 p = *str;
2683
2684 /* Accept an immediate expression. */
2685 if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
2686 return FALSE;
2687
2688 /* Accept optional LSL for arithmetic immediate values. */
2689 if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
2690 if (! parse_shift (&p, operand, SHIFTED_LSL))
2691 return FALSE;
2692
2693 /* Not accept any shifter for logical immediate values. */
2694 if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
2695 && parse_shift (&p, operand, mode))
2696 {
2697 set_syntax_error (_("unexpected shift operator"));
2698 return FALSE;
2699 }
2700
2701 *str = p;
2702 return TRUE;
2703 }
2704
2705 /* Parse a <shifter_operand> for a data processing instruction:
2706
2707 <Rm>
2708 <Rm>, <shift>
2709 #<immediate>
2710 #<immediate>, LSL #imm
2711
2712 where <shift> is handled by parse_shift above, and the last two
2713 cases are handled by the function above.
2714
2715 Validation of immediate operands is deferred to md_apply_fix.
2716
2717 Return TRUE on success; otherwise return FALSE. */
2718
2719 static bfd_boolean
2720 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
2721 enum parse_shift_mode mode)
2722 {
2723 int reg;
2724 int isreg32, isregzero;
2725 enum aarch64_operand_class opd_class
2726 = aarch64_get_operand_class (operand->type);
2727
2728 if ((reg =
2729 aarch64_reg_parse_32_64 (str, 0, 0, &isreg32, &isregzero)) != PARSE_FAIL)
2730 {
2731 if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
2732 {
2733 set_syntax_error (_("unexpected register in the immediate operand"));
2734 return FALSE;
2735 }
2736
2737 if (!isregzero && reg == REG_SP)
2738 {
2739 set_syntax_error (BAD_SP);
2740 return FALSE;
2741 }
2742
2743 operand->reg.regno = reg;
2744 operand->qualifier = isreg32 ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2745
2746 /* Accept optional shift operation on register. */
2747 if (! skip_past_comma (str))
2748 return TRUE;
2749
2750 if (! parse_shift (str, operand, mode))
2751 return FALSE;
2752
2753 return TRUE;
2754 }
2755 else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
2756 {
2757 set_syntax_error
2758 (_("integer register expected in the extended/shifted operand "
2759 "register"));
2760 return FALSE;
2761 }
2762
2763 /* We have a shifted immediate variable. */
2764 return parse_shifter_operand_imm (str, operand, mode);
2765 }
2766
2767 /* Return TRUE on success; return FALSE otherwise. */
2768
2769 static bfd_boolean
2770 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
2771 enum parse_shift_mode mode)
2772 {
2773 char *p = *str;
2774
2775 /* Determine if we have the sequence of characters #: or just :
2776 coming next. If we do, then we check for a :rello: relocation
2777 modifier. If we don't, punt the whole lot to
2778 parse_shifter_operand. */
2779
2780 if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
2781 {
2782 struct reloc_table_entry *entry;
2783
2784 if (p[0] == '#')
2785 p += 2;
2786 else
2787 p++;
2788 *str = p;
2789
2790 /* Try to parse a relocation. Anything else is an error. */
2791 if (!(entry = find_reloc_table_entry (str)))
2792 {
2793 set_syntax_error (_("unknown relocation modifier"));
2794 return FALSE;
2795 }
2796
2797 if (entry->add_type == 0)
2798 {
2799 set_syntax_error
2800 (_("this relocation modifier is not allowed on this instruction"));
2801 return FALSE;
2802 }
2803
2804 /* Save str before we decompose it. */
2805 p = *str;
2806
2807 /* Next, we parse the expression. */
2808 if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
2809 return FALSE;
2810
2811 /* Record the relocation type (use the ADD variant here). */
2812 inst.reloc.type = entry->add_type;
2813 inst.reloc.pc_rel = entry->pc_rel;
2814
2815 /* If str is empty, we've reached the end, stop here. */
2816 if (**str == '\0')
2817 return TRUE;
2818
2819 /* Otherwise, we have a shifted reloc modifier, so rewind to
2820 recover the variable name and continue parsing for the shifter. */
2821 *str = p;
2822 return parse_shifter_operand_imm (str, operand, mode);
2823 }
2824
2825 return parse_shifter_operand (str, operand, mode);
2826 }
2827
2828 /* Parse all forms of an address expression. Information is written
2829 to *OPERAND and/or inst.reloc.
2830
2831 The A64 instruction set has the following addressing modes:
2832
2833 Offset
2834 [base] // in SIMD ld/st structure
2835 [base{,#0}] // in ld/st exclusive
2836 [base{,#imm}]
2837 [base,Xm{,LSL #imm}]
2838 [base,Xm,SXTX {#imm}]
2839 [base,Wm,(S|U)XTW {#imm}]
2840 Pre-indexed
2841 [base,#imm]!
2842 Post-indexed
2843 [base],#imm
2844 [base],Xm // in SIMD ld/st structure
2845 PC-relative (literal)
2846 label
2847 =immediate
2848
2849 (As a convenience, the notation "=immediate" is permitted in conjunction
2850 with the pc-relative literal load instructions to automatically place an
2851 immediate value or symbolic address in a nearby literal pool and generate
2852 a hidden label which references it.)
2853
2854 Upon a successful parsing, the address structure in *OPERAND will be
2855 filled in the following way:
2856
2857 .base_regno = <base>
2858 .offset.is_reg // 1 if the offset is a register
2859 .offset.imm = <imm>
2860 .offset.regno = <Rm>
2861
2862 For different addressing modes defined in the A64 ISA:
2863
2864 Offset
2865 .pcrel=0; .preind=1; .postind=0; .writeback=0
2866 Pre-indexed
2867 .pcrel=0; .preind=1; .postind=0; .writeback=1
2868 Post-indexed
2869 .pcrel=0; .preind=0; .postind=1; .writeback=1
2870 PC-relative (literal)
2871 .pcrel=1; .preind=1; .postind=0; .writeback=0
2872
2873 The shift/extension information, if any, will be stored in .shifter.
2874
2875 It is the caller's responsibility to check for addressing modes not
2876 supported by the instruction, and to set inst.reloc.type. */
2877
2878 static bfd_boolean
2879 parse_address_main (char **str, aarch64_opnd_info *operand, int reloc,
2880 int accept_reg_post_index)
2881 {
2882 char *p = *str;
2883 int reg;
2884 int isreg32, isregzero;
2885 expressionS *exp = &inst.reloc.exp;
2886
2887 if (! skip_past_char (&p, '['))
2888 {
2889 /* =immediate or label. */
2890 operand->addr.pcrel = 1;
2891 operand->addr.preind = 1;
2892
2893 /* #:<reloc_op>:<symbol> */
2894 skip_past_char (&p, '#');
2895 if (reloc && skip_past_char (&p, ':'))
2896 {
2897 struct reloc_table_entry *entry;
2898
2899 /* Try to parse a relocation modifier. Anything else is
2900 an error. */
2901 entry = find_reloc_table_entry (&p);
2902 if (! entry)
2903 {
2904 set_syntax_error (_("unknown relocation modifier"));
2905 return FALSE;
2906 }
2907
2908 if (entry->ldst_type == 0)
2909 {
2910 set_syntax_error
2911 (_("this relocation modifier is not allowed on this "
2912 "instruction"));
2913 return FALSE;
2914 }
2915
2916 /* #:<reloc_op>: */
2917 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2918 {
2919 set_syntax_error (_("invalid relocation expression"));
2920 return FALSE;
2921 }
2922
2923 /* #:<reloc_op>:<expr> */
2924 /* Record the load/store relocation type. */
2925 inst.reloc.type = entry->ldst_type;
2926 inst.reloc.pc_rel = entry->pc_rel;
2927 }
2928 else
2929 {
2930
2931 if (skip_past_char (&p, '='))
2932 /* =immediate; need to generate the literal in the literal pool. */
2933 inst.gen_lit_pool = 1;
2934
2935 if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
2936 {
2937 set_syntax_error (_("invalid address"));
2938 return FALSE;
2939 }
2940 }
2941
2942 *str = p;
2943 return TRUE;
2944 }
2945
2946 /* [ */
2947
2948 /* Accept SP and reject ZR */
2949 reg = aarch64_reg_parse_32_64 (&p, 0, 1, &isreg32, &isregzero);
2950 if (reg == PARSE_FAIL || isreg32)
2951 {
2952 set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
2953 return FALSE;
2954 }
2955 operand->addr.base_regno = reg;
2956
2957 /* [Xn */
2958 if (skip_past_comma (&p))
2959 {
2960 /* [Xn, */
2961 operand->addr.preind = 1;
2962
2963 /* Reject SP and accept ZR */
2964 reg = aarch64_reg_parse_32_64 (&p, 1, 0, &isreg32, &isregzero);
2965 if (reg != PARSE_FAIL)
2966 {
2967 /* [Xn,Rm */
2968 operand->addr.offset.regno = reg;
2969 operand->addr.offset.is_reg = 1;
2970 /* Shifted index. */
2971 if (skip_past_comma (&p))
2972 {
2973 /* [Xn,Rm, */
2974 if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
2975 /* Use the diagnostics set in parse_shift, so not set new
2976 error message here. */
2977 return FALSE;
2978 }
2979 /* We only accept:
2980 [base,Xm{,LSL #imm}]
2981 [base,Xm,SXTX {#imm}]
2982 [base,Wm,(S|U)XTW {#imm}] */
2983 if (operand->shifter.kind == AARCH64_MOD_NONE
2984 || operand->shifter.kind == AARCH64_MOD_LSL
2985 || operand->shifter.kind == AARCH64_MOD_SXTX)
2986 {
2987 if (isreg32)
2988 {
2989 set_syntax_error (_("invalid use of 32-bit register offset"));
2990 return FALSE;
2991 }
2992 }
2993 else if (!isreg32)
2994 {
2995 set_syntax_error (_("invalid use of 64-bit register offset"));
2996 return FALSE;
2997 }
2998 }
2999 else
3000 {
3001 /* [Xn,#:<reloc_op>:<symbol> */
3002 skip_past_char (&p, '#');
3003 if (reloc && skip_past_char (&p, ':'))
3004 {
3005 struct reloc_table_entry *entry;
3006
3007 /* Try to parse a relocation modifier. Anything else is
3008 an error. */
3009 if (!(entry = find_reloc_table_entry (&p)))
3010 {
3011 set_syntax_error (_("unknown relocation modifier"));
3012 return FALSE;
3013 }
3014
3015 if (entry->ldst_type == 0)
3016 {
3017 set_syntax_error
3018 (_("this relocation modifier is not allowed on this "
3019 "instruction"));
3020 return FALSE;
3021 }
3022
3023 /* [Xn,#:<reloc_op>: */
3024 /* We now have the group relocation table entry corresponding to
3025 the name in the assembler source. Next, we parse the
3026 expression. */
3027 if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3028 {
3029 set_syntax_error (_("invalid relocation expression"));
3030 return FALSE;
3031 }
3032
3033 /* [Xn,#:<reloc_op>:<expr> */
3034 /* Record the load/store relocation type. */
3035 inst.reloc.type = entry->ldst_type;
3036 inst.reloc.pc_rel = entry->pc_rel;
3037 }
3038 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3039 {
3040 set_syntax_error (_("invalid expression in the address"));
3041 return FALSE;
3042 }
3043 /* [Xn,<expr> */
3044 }
3045 }
3046
3047 if (! skip_past_char (&p, ']'))
3048 {
3049 set_syntax_error (_("']' expected"));
3050 return FALSE;
3051 }
3052
3053 if (skip_past_char (&p, '!'))
3054 {
3055 if (operand->addr.preind && operand->addr.offset.is_reg)
3056 {
3057 set_syntax_error (_("register offset not allowed in pre-indexed "
3058 "addressing mode"));
3059 return FALSE;
3060 }
3061 /* [Xn]! */
3062 operand->addr.writeback = 1;
3063 }
3064 else if (skip_past_comma (&p))
3065 {
3066 /* [Xn], */
3067 operand->addr.postind = 1;
3068 operand->addr.writeback = 1;
3069
3070 if (operand->addr.preind)
3071 {
3072 set_syntax_error (_("cannot combine pre- and post-indexing"));
3073 return FALSE;
3074 }
3075
3076 if (accept_reg_post_index
3077 && (reg = aarch64_reg_parse_32_64 (&p, 1, 1, &isreg32,
3078 &isregzero)) != PARSE_FAIL)
3079 {
3080 /* [Xn],Xm */
3081 if (isreg32)
3082 {
3083 set_syntax_error (_("invalid 32-bit register offset"));
3084 return FALSE;
3085 }
3086 operand->addr.offset.regno = reg;
3087 operand->addr.offset.is_reg = 1;
3088 }
3089 else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3090 {
3091 /* [Xn],#expr */
3092 set_syntax_error (_("invalid expression in the address"));
3093 return FALSE;
3094 }
3095 }
3096
3097 /* If at this point neither .preind nor .postind is set, we have a
3098 bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0]. */
3099 if (operand->addr.preind == 0 && operand->addr.postind == 0)
3100 {
3101 if (operand->addr.writeback)
3102 {
3103 /* Reject [Rn]! */
3104 set_syntax_error (_("missing offset in the pre-indexed address"));
3105 return FALSE;
3106 }
3107 operand->addr.preind = 1;
3108 inst.reloc.exp.X_op = O_constant;
3109 inst.reloc.exp.X_add_number = 0;
3110 }
3111
3112 *str = p;
3113 return TRUE;
3114 }
3115
3116 /* Return TRUE on success; otherwise return FALSE. */
3117 static bfd_boolean
3118 parse_address (char **str, aarch64_opnd_info *operand,
3119 int accept_reg_post_index)
3120 {
3121 return parse_address_main (str, operand, 0, accept_reg_post_index);
3122 }
3123
3124 /* Return TRUE on success; otherwise return FALSE. */
3125 static bfd_boolean
3126 parse_address_reloc (char **str, aarch64_opnd_info *operand)
3127 {
3128 return parse_address_main (str, operand, 1, 0);
3129 }
3130
3131 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3132 Return TRUE on success; otherwise return FALSE. */
3133 static bfd_boolean
3134 parse_half (char **str, int *internal_fixup_p)
3135 {
3136 char *p, *saved;
3137 int dummy;
3138
3139 p = *str;
3140 skip_past_char (&p, '#');
3141
3142 gas_assert (internal_fixup_p);
3143 *internal_fixup_p = 0;
3144
3145 if (*p == ':')
3146 {
3147 struct reloc_table_entry *entry;
3148
3149 /* Try to parse a relocation. Anything else is an error. */
3150 ++p;
3151 if (!(entry = find_reloc_table_entry (&p)))
3152 {
3153 set_syntax_error (_("unknown relocation modifier"));
3154 return FALSE;
3155 }
3156
3157 if (entry->movw_type == 0)
3158 {
3159 set_syntax_error
3160 (_("this relocation modifier is not allowed on this instruction"));
3161 return FALSE;
3162 }
3163
3164 inst.reloc.type = entry->movw_type;
3165 }
3166 else
3167 *internal_fixup_p = 1;
3168
3169 /* Avoid parsing a register as a general symbol. */
3170 saved = p;
3171 if (aarch64_reg_parse_32_64 (&p, 0, 0, &dummy, &dummy) != PARSE_FAIL)
3172 return FALSE;
3173 p = saved;
3174
3175 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3176 return FALSE;
3177
3178 *str = p;
3179 return TRUE;
3180 }
3181
3182 /* Parse an operand for an ADRP instruction:
3183 ADRP <Xd>, <label>
3184 Return TRUE on success; otherwise return FALSE. */
3185
3186 static bfd_boolean
3187 parse_adrp (char **str)
3188 {
3189 char *p;
3190
3191 p = *str;
3192 if (*p == ':')
3193 {
3194 struct reloc_table_entry *entry;
3195
3196 /* Try to parse a relocation. Anything else is an error. */
3197 ++p;
3198 if (!(entry = find_reloc_table_entry (&p)))
3199 {
3200 set_syntax_error (_("unknown relocation modifier"));
3201 return FALSE;
3202 }
3203
3204 if (entry->adrp_type == 0)
3205 {
3206 set_syntax_error
3207 (_("this relocation modifier is not allowed on this instruction"));
3208 return FALSE;
3209 }
3210
3211 inst.reloc.type = entry->adrp_type;
3212 }
3213 else
3214 inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3215
3216 inst.reloc.pc_rel = 1;
3217
3218 if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3219 return FALSE;
3220
3221 *str = p;
3222 return TRUE;
3223 }
3224
3225 /* Miscellaneous. */
3226
3227 /* Parse an option for a preload instruction. Returns the encoding for the
3228 option, or PARSE_FAIL. */
3229
3230 static int
3231 parse_pldop (char **str)
3232 {
3233 char *p, *q;
3234 const struct aarch64_name_value_pair *o;
3235
3236 p = q = *str;
3237 while (ISALNUM (*q))
3238 q++;
3239
3240 o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3241 if (!o)
3242 return PARSE_FAIL;
3243
3244 *str = q;
3245 return o->value;
3246 }
3247
3248 /* Parse an option for a barrier instruction. Returns the encoding for the
3249 option, or PARSE_FAIL. */
3250
3251 static int
3252 parse_barrier (char **str)
3253 {
3254 char *p, *q;
3255 const asm_barrier_opt *o;
3256
3257 p = q = *str;
3258 while (ISALPHA (*q))
3259 q++;
3260
3261 o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3262 if (!o)
3263 return PARSE_FAIL;
3264
3265 *str = q;
3266 return o->value;
3267 }
3268
3269 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3270 Returns the encoding for the option, or PARSE_FAIL.
3271
3272 If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3273 implementation defined system register name S3_<op1>_<Cn>_<Cm>_<op2>. */
3274
3275 static int
3276 parse_sys_reg (char **str, struct hash_control *sys_regs, int imple_defined_p)
3277 {
3278 char *p, *q;
3279 char buf[32];
3280 const struct aarch64_name_value_pair *o;
3281 int value;
3282
3283 p = buf;
3284 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3285 if (p < buf + 31)
3286 *p++ = TOLOWER (*q);
3287 *p = '\0';
3288 /* Assert that BUF be large enough. */
3289 gas_assert (p - buf == q - *str);
3290
3291 o = hash_find (sys_regs, buf);
3292 if (!o)
3293 {
3294 if (!imple_defined_p)
3295 return PARSE_FAIL;
3296 else
3297 {
3298 /* Parse S3_<op1>_<Cn>_<Cm>_<op2>, the implementation defined
3299 registers. */
3300 unsigned int op0, op1, cn, cm, op2;
3301 if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2) != 5)
3302 return PARSE_FAIL;
3303 /* The architecture specifies the encoding space for implementation
3304 defined registers as:
3305 op0 op1 CRn CRm op2
3306 11 xxx 1x11 xxxx xxx
3307 For convenience GAS accepts a wider encoding space, as follows:
3308 op0 op1 CRn CRm op2
3309 11 xxx xxxx xxxx xxx */
3310 if (op0 != 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3311 return PARSE_FAIL;
3312 value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3313 }
3314 }
3315 else
3316 value = o->value;
3317
3318 *str = q;
3319 return value;
3320 }
3321
3322 /* Parse a system reg for ic/dc/at/tlbi instructions. Returns the table entry
3323 for the option, or NULL. */
3324
3325 static const aarch64_sys_ins_reg *
3326 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3327 {
3328 char *p, *q;
3329 char buf[32];
3330 const aarch64_sys_ins_reg *o;
3331
3332 p = buf;
3333 for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3334 if (p < buf + 31)
3335 *p++ = TOLOWER (*q);
3336 *p = '\0';
3337
3338 o = hash_find (sys_ins_regs, buf);
3339 if (!o)
3340 return NULL;
3341
3342 *str = q;
3343 return o;
3344 }
3345 \f
3346 #define po_char_or_fail(chr) do { \
3347 if (! skip_past_char (&str, chr)) \
3348 goto failure; \
3349 } while (0)
3350
3351 #define po_reg_or_fail(regtype) do { \
3352 val = aarch64_reg_parse (&str, regtype, &rtype, NULL); \
3353 if (val == PARSE_FAIL) \
3354 { \
3355 set_default_error (); \
3356 goto failure; \
3357 } \
3358 } while (0)
3359
3360 #define po_int_reg_or_fail(reject_sp, reject_rz) do { \
3361 val = aarch64_reg_parse_32_64 (&str, reject_sp, reject_rz, \
3362 &isreg32, &isregzero); \
3363 if (val == PARSE_FAIL) \
3364 { \
3365 set_default_error (); \
3366 goto failure; \
3367 } \
3368 info->reg.regno = val; \
3369 if (isreg32) \
3370 info->qualifier = AARCH64_OPND_QLF_W; \
3371 else \
3372 info->qualifier = AARCH64_OPND_QLF_X; \
3373 } while (0)
3374
3375 #define po_imm_nc_or_fail() do { \
3376 if (! parse_constant_immediate (&str, &val)) \
3377 goto failure; \
3378 } while (0)
3379
3380 #define po_imm_or_fail(min, max) do { \
3381 if (! parse_constant_immediate (&str, &val)) \
3382 goto failure; \
3383 if (val < min || val > max) \
3384 { \
3385 set_fatal_syntax_error (_("immediate value out of range "\
3386 #min " to "#max)); \
3387 goto failure; \
3388 } \
3389 } while (0)
3390
3391 #define po_misc_or_fail(expr) do { \
3392 if (!expr) \
3393 goto failure; \
3394 } while (0)
3395 \f
3396 /* encode the 12-bit imm field of Add/sub immediate */
3397 static inline uint32_t
3398 encode_addsub_imm (uint32_t imm)
3399 {
3400 return imm << 10;
3401 }
3402
3403 /* encode the shift amount field of Add/sub immediate */
3404 static inline uint32_t
3405 encode_addsub_imm_shift_amount (uint32_t cnt)
3406 {
3407 return cnt << 22;
3408 }
3409
3410
3411 /* encode the imm field of Adr instruction */
3412 static inline uint32_t
3413 encode_adr_imm (uint32_t imm)
3414 {
3415 return (((imm & 0x3) << 29) /* [1:0] -> [30:29] */
3416 | ((imm & (0x7ffff << 2)) << 3)); /* [20:2] -> [23:5] */
3417 }
3418
3419 /* encode the immediate field of Move wide immediate */
3420 static inline uint32_t
3421 encode_movw_imm (uint32_t imm)
3422 {
3423 return imm << 5;
3424 }
3425
3426 /* encode the 26-bit offset of unconditional branch */
3427 static inline uint32_t
3428 encode_branch_ofs_26 (uint32_t ofs)
3429 {
3430 return ofs & ((1 << 26) - 1);
3431 }
3432
3433 /* encode the 19-bit offset of conditional branch and compare & branch */
3434 static inline uint32_t
3435 encode_cond_branch_ofs_19 (uint32_t ofs)
3436 {
3437 return (ofs & ((1 << 19) - 1)) << 5;
3438 }
3439
3440 /* encode the 19-bit offset of ld literal */
3441 static inline uint32_t
3442 encode_ld_lit_ofs_19 (uint32_t ofs)
3443 {
3444 return (ofs & ((1 << 19) - 1)) << 5;
3445 }
3446
3447 /* Encode the 14-bit offset of test & branch. */
3448 static inline uint32_t
3449 encode_tst_branch_ofs_14 (uint32_t ofs)
3450 {
3451 return (ofs & ((1 << 14) - 1)) << 5;
3452 }
3453
3454 /* Encode the 16-bit imm field of svc/hvc/smc. */
3455 static inline uint32_t
3456 encode_svc_imm (uint32_t imm)
3457 {
3458 return imm << 5;
3459 }
3460
3461 /* Reencode add(s) to sub(s), or sub(s) to add(s). */
3462 static inline uint32_t
3463 reencode_addsub_switch_add_sub (uint32_t opcode)
3464 {
3465 return opcode ^ (1 << 30);
3466 }
3467
3468 static inline uint32_t
3469 reencode_movzn_to_movz (uint32_t opcode)
3470 {
3471 return opcode | (1 << 30);
3472 }
3473
3474 static inline uint32_t
3475 reencode_movzn_to_movn (uint32_t opcode)
3476 {
3477 return opcode & ~(1 << 30);
3478 }
3479
3480 /* Overall per-instruction processing. */
3481
3482 /* We need to be able to fix up arbitrary expressions in some statements.
3483 This is so that we can handle symbols that are an arbitrary distance from
3484 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
3485 which returns part of an address in a form which will be valid for
3486 a data instruction. We do this by pushing the expression into a symbol
3487 in the expr_section, and creating a fix for that. */
3488
3489 static fixS *
3490 fix_new_aarch64 (fragS * frag,
3491 int where,
3492 short int size, expressionS * exp, int pc_rel, int reloc)
3493 {
3494 fixS *new_fix;
3495
3496 switch (exp->X_op)
3497 {
3498 case O_constant:
3499 case O_symbol:
3500 case O_add:
3501 case O_subtract:
3502 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
3503 break;
3504
3505 default:
3506 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
3507 pc_rel, reloc);
3508 break;
3509 }
3510 return new_fix;
3511 }
3512 \f
3513 /* Diagnostics on operands errors. */
3514
3515 /* By default, output one-line error message only.
3516 Enable the verbose error message by -merror-verbose. */
3517 static int verbose_error_p = 0;
3518
3519 #ifdef DEBUG_AARCH64
3520 /* N.B. this is only for the purpose of debugging. */
3521 const char* operand_mismatch_kind_names[] =
3522 {
3523 "AARCH64_OPDE_NIL",
3524 "AARCH64_OPDE_RECOVERABLE",
3525 "AARCH64_OPDE_SYNTAX_ERROR",
3526 "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
3527 "AARCH64_OPDE_INVALID_VARIANT",
3528 "AARCH64_OPDE_OUT_OF_RANGE",
3529 "AARCH64_OPDE_UNALIGNED",
3530 "AARCH64_OPDE_REG_LIST",
3531 "AARCH64_OPDE_OTHER_ERROR",
3532 };
3533 #endif /* DEBUG_AARCH64 */
3534
3535 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
3536
3537 When multiple errors of different kinds are found in the same assembly
3538 line, only the error of the highest severity will be picked up for
3539 issuing the diagnostics. */
3540
3541 static inline bfd_boolean
3542 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
3543 enum aarch64_operand_error_kind rhs)
3544 {
3545 gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
3546 gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
3547 gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
3548 gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
3549 gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
3550 gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
3551 gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
3552 gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
3553 return lhs > rhs;
3554 }
3555
3556 /* Helper routine to get the mnemonic name from the assembly instruction
3557 line; should only be called for the diagnosis purpose, as there is
3558 string copy operation involved, which may affect the runtime
3559 performance if used in elsewhere. */
3560
3561 static const char*
3562 get_mnemonic_name (const char *str)
3563 {
3564 static char mnemonic[32];
3565 char *ptr;
3566
3567 /* Get the first 15 bytes and assume that the full name is included. */
3568 strncpy (mnemonic, str, 31);
3569 mnemonic[31] = '\0';
3570
3571 /* Scan up to the end of the mnemonic, which must end in white space,
3572 '.', or end of string. */
3573 for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
3574 ;
3575
3576 *ptr = '\0';
3577
3578 /* Append '...' to the truncated long name. */
3579 if (ptr - mnemonic == 31)
3580 mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
3581
3582 return mnemonic;
3583 }
3584
3585 static void
3586 reset_aarch64_instruction (aarch64_instruction *instruction)
3587 {
3588 memset (instruction, '\0', sizeof (aarch64_instruction));
3589 instruction->reloc.type = BFD_RELOC_UNUSED;
3590 }
3591
3592 /* Data strutures storing one user error in the assembly code related to
3593 operands. */
3594
3595 struct operand_error_record
3596 {
3597 const aarch64_opcode *opcode;
3598 aarch64_operand_error detail;
3599 struct operand_error_record *next;
3600 };
3601
3602 typedef struct operand_error_record operand_error_record;
3603
3604 struct operand_errors
3605 {
3606 operand_error_record *head;
3607 operand_error_record *tail;
3608 };
3609
3610 typedef struct operand_errors operand_errors;
3611
3612 /* Top-level data structure reporting user errors for the current line of
3613 the assembly code.
3614 The way md_assemble works is that all opcodes sharing the same mnemonic
3615 name are iterated to find a match to the assembly line. In this data
3616 structure, each of the such opcodes will have one operand_error_record
3617 allocated and inserted. In other words, excessive errors related with
3618 a single opcode are disregarded. */
3619 operand_errors operand_error_report;
3620
3621 /* Free record nodes. */
3622 static operand_error_record *free_opnd_error_record_nodes = NULL;
3623
3624 /* Initialize the data structure that stores the operand mismatch
3625 information on assembling one line of the assembly code. */
3626 static void
3627 init_operand_error_report (void)
3628 {
3629 if (operand_error_report.head != NULL)
3630 {
3631 gas_assert (operand_error_report.tail != NULL);
3632 operand_error_report.tail->next = free_opnd_error_record_nodes;
3633 free_opnd_error_record_nodes = operand_error_report.head;
3634 operand_error_report.head = NULL;
3635 operand_error_report.tail = NULL;
3636 return;
3637 }
3638 gas_assert (operand_error_report.tail == NULL);
3639 }
3640
3641 /* Return TRUE if some operand error has been recorded during the
3642 parsing of the current assembly line using the opcode *OPCODE;
3643 otherwise return FALSE. */
3644 static inline bfd_boolean
3645 opcode_has_operand_error_p (const aarch64_opcode *opcode)
3646 {
3647 operand_error_record *record = operand_error_report.head;
3648 return record && record->opcode == opcode;
3649 }
3650
3651 /* Add the error record *NEW_RECORD to operand_error_report. The record's
3652 OPCODE field is initialized with OPCODE.
3653 N.B. only one record for each opcode, i.e. the maximum of one error is
3654 recorded for each instruction template. */
3655
3656 static void
3657 add_operand_error_record (const operand_error_record* new_record)
3658 {
3659 const aarch64_opcode *opcode = new_record->opcode;
3660 operand_error_record* record = operand_error_report.head;
3661
3662 /* The record may have been created for this opcode. If not, we need
3663 to prepare one. */
3664 if (! opcode_has_operand_error_p (opcode))
3665 {
3666 /* Get one empty record. */
3667 if (free_opnd_error_record_nodes == NULL)
3668 {
3669 record = xmalloc (sizeof (operand_error_record));
3670 if (record == NULL)
3671 abort ();
3672 }
3673 else
3674 {
3675 record = free_opnd_error_record_nodes;
3676 free_opnd_error_record_nodes = record->next;
3677 }
3678 record->opcode = opcode;
3679 /* Insert at the head. */
3680 record->next = operand_error_report.head;
3681 operand_error_report.head = record;
3682 if (operand_error_report.tail == NULL)
3683 operand_error_report.tail = record;
3684 }
3685 else if (record->detail.kind != AARCH64_OPDE_NIL
3686 && record->detail.index <= new_record->detail.index
3687 && operand_error_higher_severity_p (record->detail.kind,
3688 new_record->detail.kind))
3689 {
3690 /* In the case of multiple errors found on operands related with a
3691 single opcode, only record the error of the leftmost operand and
3692 only if the error is of higher severity. */
3693 DEBUG_TRACE ("error %s on operand %d not added to the report due to"
3694 " the existing error %s on operand %d",
3695 operand_mismatch_kind_names[new_record->detail.kind],
3696 new_record->detail.index,
3697 operand_mismatch_kind_names[record->detail.kind],
3698 record->detail.index);
3699 return;
3700 }
3701
3702 record->detail = new_record->detail;
3703 }
3704
3705 static inline void
3706 record_operand_error_info (const aarch64_opcode *opcode,
3707 aarch64_operand_error *error_info)
3708 {
3709 operand_error_record record;
3710 record.opcode = opcode;
3711 record.detail = *error_info;
3712 add_operand_error_record (&record);
3713 }
3714
3715 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
3716 error message *ERROR, for operand IDX (count from 0). */
3717
3718 static void
3719 record_operand_error (const aarch64_opcode *opcode, int idx,
3720 enum aarch64_operand_error_kind kind,
3721 const char* error)
3722 {
3723 aarch64_operand_error info;
3724 memset(&info, 0, sizeof (info));
3725 info.index = idx;
3726 info.kind = kind;
3727 info.error = error;
3728 record_operand_error_info (opcode, &info);
3729 }
3730
3731 static void
3732 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
3733 enum aarch64_operand_error_kind kind,
3734 const char* error, const int *extra_data)
3735 {
3736 aarch64_operand_error info;
3737 info.index = idx;
3738 info.kind = kind;
3739 info.error = error;
3740 info.data[0] = extra_data[0];
3741 info.data[1] = extra_data[1];
3742 info.data[2] = extra_data[2];
3743 record_operand_error_info (opcode, &info);
3744 }
3745
3746 static void
3747 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
3748 const char* error, int lower_bound,
3749 int upper_bound)
3750 {
3751 int data[3] = {lower_bound, upper_bound, 0};
3752 record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
3753 error, data);
3754 }
3755
3756 /* Remove the operand error record for *OPCODE. */
3757 static void ATTRIBUTE_UNUSED
3758 remove_operand_error_record (const aarch64_opcode *opcode)
3759 {
3760 if (opcode_has_operand_error_p (opcode))
3761 {
3762 operand_error_record* record = operand_error_report.head;
3763 gas_assert (record != NULL && operand_error_report.tail != NULL);
3764 operand_error_report.head = record->next;
3765 record->next = free_opnd_error_record_nodes;
3766 free_opnd_error_record_nodes = record;
3767 if (operand_error_report.head == NULL)
3768 {
3769 gas_assert (operand_error_report.tail == record);
3770 operand_error_report.tail = NULL;
3771 }
3772 }
3773 }
3774
3775 /* Given the instruction in *INSTR, return the index of the best matched
3776 qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
3777
3778 Return -1 if there is no qualifier sequence; return the first match
3779 if there is multiple matches found. */
3780
3781 static int
3782 find_best_match (const aarch64_inst *instr,
3783 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
3784 {
3785 int i, num_opnds, max_num_matched, idx;
3786
3787 num_opnds = aarch64_num_of_operands (instr->opcode);
3788 if (num_opnds == 0)
3789 {
3790 DEBUG_TRACE ("no operand");
3791 return -1;
3792 }
3793
3794 max_num_matched = 0;
3795 idx = -1;
3796
3797 /* For each pattern. */
3798 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
3799 {
3800 int j, num_matched;
3801 const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
3802
3803 /* Most opcodes has much fewer patterns in the list. */
3804 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
3805 {
3806 DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
3807 if (i != 0 && idx == -1)
3808 /* If nothing has been matched, return the 1st sequence. */
3809 idx = 0;
3810 break;
3811 }
3812
3813 for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
3814 if (*qualifiers == instr->operands[j].qualifier)
3815 ++num_matched;
3816
3817 if (num_matched > max_num_matched)
3818 {
3819 max_num_matched = num_matched;
3820 idx = i;
3821 }
3822 }
3823
3824 DEBUG_TRACE ("return with %d", idx);
3825 return idx;
3826 }
3827
3828 /* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
3829 corresponding operands in *INSTR. */
3830
3831 static inline void
3832 assign_qualifier_sequence (aarch64_inst *instr,
3833 const aarch64_opnd_qualifier_t *qualifiers)
3834 {
3835 int i = 0;
3836 int num_opnds = aarch64_num_of_operands (instr->opcode);
3837 gas_assert (num_opnds);
3838 for (i = 0; i < num_opnds; ++i, ++qualifiers)
3839 instr->operands[i].qualifier = *qualifiers;
3840 }
3841
3842 /* Print operands for the diagnosis purpose. */
3843
3844 static void
3845 print_operands (char *buf, const aarch64_opcode *opcode,
3846 const aarch64_opnd_info *opnds)
3847 {
3848 int i;
3849
3850 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3851 {
3852 const size_t size = 128;
3853 char str[size];
3854
3855 /* We regard the opcode operand info more, however we also look into
3856 the inst->operands to support the disassembling of the optional
3857 operand.
3858 The two operand code should be the same in all cases, apart from
3859 when the operand can be optional. */
3860 if (opcode->operands[i] == AARCH64_OPND_NIL
3861 || opnds[i].type == AARCH64_OPND_NIL)
3862 break;
3863
3864 /* Generate the operand string in STR. */
3865 aarch64_print_operand (str, size, 0, opcode, opnds, i, NULL, NULL);
3866
3867 /* Delimiter. */
3868 if (str[0] != '\0')
3869 strcat (buf, i == 0 ? " " : ",");
3870
3871 /* Append the operand string. */
3872 strcat (buf, str);
3873 }
3874 }
3875
3876 /* Send to stderr a string as information. */
3877
3878 static void
3879 output_info (const char *format, ...)
3880 {
3881 char *file;
3882 unsigned int line;
3883 va_list args;
3884
3885 as_where (&file, &line);
3886 if (file)
3887 {
3888 if (line != 0)
3889 fprintf (stderr, "%s:%u: ", file, line);
3890 else
3891 fprintf (stderr, "%s: ", file);
3892 }
3893 fprintf (stderr, _("Info: "));
3894 va_start (args, format);
3895 vfprintf (stderr, format, args);
3896 va_end (args);
3897 (void) putc ('\n', stderr);
3898 }
3899
3900 /* Output one operand error record. */
3901
3902 static void
3903 output_operand_error_record (const operand_error_record *record, char *str)
3904 {
3905 int idx = record->detail.index;
3906 const aarch64_opcode *opcode = record->opcode;
3907 enum aarch64_opnd opd_code = (idx != -1 ? opcode->operands[idx]
3908 : AARCH64_OPND_NIL);
3909 const aarch64_operand_error *detail = &record->detail;
3910
3911 switch (detail->kind)
3912 {
3913 case AARCH64_OPDE_NIL:
3914 gas_assert (0);
3915 break;
3916
3917 case AARCH64_OPDE_SYNTAX_ERROR:
3918 case AARCH64_OPDE_RECOVERABLE:
3919 case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
3920 case AARCH64_OPDE_OTHER_ERROR:
3921 gas_assert (idx >= 0);
3922 /* Use the prepared error message if there is, otherwise use the
3923 operand description string to describe the error. */
3924 if (detail->error != NULL)
3925 {
3926 if (detail->index == -1)
3927 as_bad (_("%s -- `%s'"), detail->error, str);
3928 else
3929 as_bad (_("%s at operand %d -- `%s'"),
3930 detail->error, detail->index + 1, str);
3931 }
3932 else
3933 as_bad (_("operand %d should be %s -- `%s'"), idx + 1,
3934 aarch64_get_operand_desc (opd_code), str);
3935 break;
3936
3937 case AARCH64_OPDE_INVALID_VARIANT:
3938 as_bad (_("operand mismatch -- `%s'"), str);
3939 if (verbose_error_p)
3940 {
3941 /* We will try to correct the erroneous instruction and also provide
3942 more information e.g. all other valid variants.
3943
3944 The string representation of the corrected instruction and other
3945 valid variants are generated by
3946
3947 1) obtaining the intermediate representation of the erroneous
3948 instruction;
3949 2) manipulating the IR, e.g. replacing the operand qualifier;
3950 3) printing out the instruction by calling the printer functions
3951 shared with the disassembler.
3952
3953 The limitation of this method is that the exact input assembly
3954 line cannot be accurately reproduced in some cases, for example an
3955 optional operand present in the actual assembly line will be
3956 omitted in the output; likewise for the optional syntax rules,
3957 e.g. the # before the immediate. Another limitation is that the
3958 assembly symbols and relocation operations in the assembly line
3959 currently cannot be printed out in the error report. Last but not
3960 least, when there is other error(s) co-exist with this error, the
3961 'corrected' instruction may be still incorrect, e.g. given
3962 'ldnp h0,h1,[x0,#6]!'
3963 this diagnosis will provide the version:
3964 'ldnp s0,s1,[x0,#6]!'
3965 which is still not right. */
3966 size_t len = strlen (get_mnemonic_name (str));
3967 int i, qlf_idx;
3968 bfd_boolean result;
3969 const size_t size = 2048;
3970 char buf[size];
3971 aarch64_inst *inst_base = &inst.base;
3972 const aarch64_opnd_qualifier_seq_t *qualifiers_list;
3973
3974 /* Init inst. */
3975 reset_aarch64_instruction (&inst);
3976 inst_base->opcode = opcode;
3977
3978 /* Reset the error report so that there is no side effect on the
3979 following operand parsing. */
3980 init_operand_error_report ();
3981
3982 /* Fill inst. */
3983 result = parse_operands (str + len, opcode)
3984 && programmer_friendly_fixup (&inst);
3985 gas_assert (result);
3986 result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
3987 NULL, NULL);
3988 gas_assert (!result);
3989
3990 /* Find the most matched qualifier sequence. */
3991 qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
3992 gas_assert (qlf_idx > -1);
3993
3994 /* Assign the qualifiers. */
3995 assign_qualifier_sequence (inst_base,
3996 opcode->qualifiers_list[qlf_idx]);
3997
3998 /* Print the hint. */
3999 output_info (_(" did you mean this?"));
4000 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4001 print_operands (buf, opcode, inst_base->operands);
4002 output_info (_(" %s"), buf);
4003
4004 /* Print out other variant(s) if there is any. */
4005 if (qlf_idx != 0 ||
4006 !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4007 output_info (_(" other valid variant(s):"));
4008
4009 /* For each pattern. */
4010 qualifiers_list = opcode->qualifiers_list;
4011 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4012 {
4013 /* Most opcodes has much fewer patterns in the list.
4014 First NIL qualifier indicates the end in the list. */
4015 if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4016 break;
4017
4018 if (i != qlf_idx)
4019 {
4020 /* Mnemonics name. */
4021 snprintf (buf, size, "\t%s", get_mnemonic_name (str));
4022
4023 /* Assign the qualifiers. */
4024 assign_qualifier_sequence (inst_base, *qualifiers_list);
4025
4026 /* Print instruction. */
4027 print_operands (buf, opcode, inst_base->operands);
4028
4029 output_info (_(" %s"), buf);
4030 }
4031 }
4032 }
4033 break;
4034
4035 case AARCH64_OPDE_OUT_OF_RANGE:
4036 if (detail->data[0] != detail->data[1])
4037 as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4038 detail->error ? detail->error : _("immediate value"),
4039 detail->data[0], detail->data[1], detail->index + 1, str);
4040 else
4041 as_bad (_("%s expected to be %d at operand %d -- `%s'"),
4042 detail->error ? detail->error : _("immediate value"),
4043 detail->data[0], detail->index + 1, str);
4044 break;
4045
4046 case AARCH64_OPDE_REG_LIST:
4047 if (detail->data[0] == 1)
4048 as_bad (_("invalid number of registers in the list; "
4049 "only 1 register is expected at operand %d -- `%s'"),
4050 detail->index + 1, str);
4051 else
4052 as_bad (_("invalid number of registers in the list; "
4053 "%d registers are expected at operand %d -- `%s'"),
4054 detail->data[0], detail->index + 1, str);
4055 break;
4056
4057 case AARCH64_OPDE_UNALIGNED:
4058 as_bad (_("immediate value should be a multiple of "
4059 "%d at operand %d -- `%s'"),
4060 detail->data[0], detail->index + 1, str);
4061 break;
4062
4063 default:
4064 gas_assert (0);
4065 break;
4066 }
4067 }
4068
4069 /* Process and output the error message about the operand mismatching.
4070
4071 When this function is called, the operand error information had
4072 been collected for an assembly line and there will be multiple
4073 errors in the case of mulitple instruction templates; output the
4074 error message that most closely describes the problem. */
4075
4076 static void
4077 output_operand_error_report (char *str)
4078 {
4079 int largest_error_pos;
4080 const char *msg = NULL;
4081 enum aarch64_operand_error_kind kind;
4082 operand_error_record *curr;
4083 operand_error_record *head = operand_error_report.head;
4084 operand_error_record *record = NULL;
4085
4086 /* No error to report. */
4087 if (head == NULL)
4088 return;
4089
4090 gas_assert (head != NULL && operand_error_report.tail != NULL);
4091
4092 /* Only one error. */
4093 if (head == operand_error_report.tail)
4094 {
4095 DEBUG_TRACE ("single opcode entry with error kind: %s",
4096 operand_mismatch_kind_names[head->detail.kind]);
4097 output_operand_error_record (head, str);
4098 return;
4099 }
4100
4101 /* Find the error kind of the highest severity. */
4102 DEBUG_TRACE ("multiple opcode entres with error kind");
4103 kind = AARCH64_OPDE_NIL;
4104 for (curr = head; curr != NULL; curr = curr->next)
4105 {
4106 gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4107 DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4108 if (operand_error_higher_severity_p (curr->detail.kind, kind))
4109 kind = curr->detail.kind;
4110 }
4111 gas_assert (kind != AARCH64_OPDE_NIL);
4112
4113 /* Pick up one of errors of KIND to report. */
4114 largest_error_pos = -2; /* Index can be -1 which means unknown index. */
4115 for (curr = head; curr != NULL; curr = curr->next)
4116 {
4117 if (curr->detail.kind != kind)
4118 continue;
4119 /* If there are multiple errors, pick up the one with the highest
4120 mismatching operand index. In the case of multiple errors with
4121 the equally highest operand index, pick up the first one or the
4122 first one with non-NULL error message. */
4123 if (curr->detail.index > largest_error_pos
4124 || (curr->detail.index == largest_error_pos && msg == NULL
4125 && curr->detail.error != NULL))
4126 {
4127 largest_error_pos = curr->detail.index;
4128 record = curr;
4129 msg = record->detail.error;
4130 }
4131 }
4132
4133 gas_assert (largest_error_pos != -2 && record != NULL);
4134 DEBUG_TRACE ("Pick up error kind %s to report",
4135 operand_mismatch_kind_names[record->detail.kind]);
4136
4137 /* Output. */
4138 output_operand_error_record (record, str);
4139 }
4140 \f
4141 /* Write an AARCH64 instruction to buf - always little-endian. */
4142 static void
4143 put_aarch64_insn (char *buf, uint32_t insn)
4144 {
4145 unsigned char *where = (unsigned char *) buf;
4146 where[0] = insn;
4147 where[1] = insn >> 8;
4148 where[2] = insn >> 16;
4149 where[3] = insn >> 24;
4150 }
4151
4152 static uint32_t
4153 get_aarch64_insn (char *buf)
4154 {
4155 unsigned char *where = (unsigned char *) buf;
4156 uint32_t result;
4157 result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4158 return result;
4159 }
4160
4161 static void
4162 output_inst (struct aarch64_inst *new_inst)
4163 {
4164 char *to = NULL;
4165
4166 to = frag_more (INSN_SIZE);
4167
4168 frag_now->tc_frag_data.recorded = 1;
4169
4170 put_aarch64_insn (to, inst.base.value);
4171
4172 if (inst.reloc.type != BFD_RELOC_UNUSED)
4173 {
4174 fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4175 INSN_SIZE, &inst.reloc.exp,
4176 inst.reloc.pc_rel,
4177 inst.reloc.type);
4178 DEBUG_TRACE ("Prepared relocation fix up");
4179 /* Don't check the addend value against the instruction size,
4180 that's the job of our code in md_apply_fix(). */
4181 fixp->fx_no_overflow = 1;
4182 if (new_inst != NULL)
4183 fixp->tc_fix_data.inst = new_inst;
4184 if (aarch64_gas_internal_fixup_p ())
4185 {
4186 gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4187 fixp->tc_fix_data.opnd = inst.reloc.opnd;
4188 fixp->fx_addnumber = inst.reloc.flags;
4189 }
4190 }
4191
4192 dwarf2_emit_insn (INSN_SIZE);
4193 }
4194
4195 /* Link together opcodes of the same name. */
4196
4197 struct templates
4198 {
4199 aarch64_opcode *opcode;
4200 struct templates *next;
4201 };
4202
4203 typedef struct templates templates;
4204
4205 static templates *
4206 lookup_mnemonic (const char *start, int len)
4207 {
4208 templates *templ = NULL;
4209
4210 templ = hash_find_n (aarch64_ops_hsh, start, len);
4211 return templ;
4212 }
4213
4214 /* Subroutine of md_assemble, responsible for looking up the primary
4215 opcode from the mnemonic the user wrote. STR points to the
4216 beginning of the mnemonic. */
4217
4218 static templates *
4219 opcode_lookup (char **str)
4220 {
4221 char *end, *base;
4222 const aarch64_cond *cond;
4223 char condname[16];
4224 int len;
4225
4226 /* Scan up to the end of the mnemonic, which must end in white space,
4227 '.', or end of string. */
4228 for (base = end = *str; is_part_of_name(*end); end++)
4229 if (*end == '.')
4230 break;
4231
4232 if (end == base)
4233 return 0;
4234
4235 inst.cond = COND_ALWAYS;
4236
4237 /* Handle a possible condition. */
4238 if (end[0] == '.')
4239 {
4240 cond = hash_find_n (aarch64_cond_hsh, end + 1, 2);
4241 if (cond)
4242 {
4243 inst.cond = cond->value;
4244 *str = end + 3;
4245 }
4246 else
4247 {
4248 *str = end;
4249 return 0;
4250 }
4251 }
4252 else
4253 *str = end;
4254
4255 len = end - base;
4256
4257 if (inst.cond == COND_ALWAYS)
4258 {
4259 /* Look for unaffixed mnemonic. */
4260 return lookup_mnemonic (base, len);
4261 }
4262 else if (len <= 13)
4263 {
4264 /* append ".c" to mnemonic if conditional */
4265 memcpy (condname, base, len);
4266 memcpy (condname + len, ".c", 2);
4267 base = condname;
4268 len += 2;
4269 return lookup_mnemonic (base, len);
4270 }
4271
4272 return NULL;
4273 }
4274
4275 /* Internal helper routine converting a vector neon_type_el structure
4276 *VECTYPE to a corresponding operand qualifier. */
4277
4278 static inline aarch64_opnd_qualifier_t
4279 vectype_to_qualifier (const struct neon_type_el *vectype)
4280 {
4281 /* Element size in bytes indexed by neon_el_type. */
4282 const unsigned char ele_size[5]
4283 = {1, 2, 4, 8, 16};
4284
4285 if (!vectype->defined || vectype->type == NT_invtype)
4286 goto vectype_conversion_fail;
4287
4288 gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4289
4290 if (vectype->defined & NTA_HASINDEX)
4291 /* Vector element register. */
4292 return AARCH64_OPND_QLF_S_B + vectype->type;
4293 else
4294 {
4295 /* Vector register. */
4296 int reg_size = ele_size[vectype->type] * vectype->width;
4297 unsigned offset;
4298 if (reg_size != 16 && reg_size != 8)
4299 goto vectype_conversion_fail;
4300 /* The conversion is calculated based on the relation of the order of
4301 qualifiers to the vector element size and vector register size. */
4302 offset = (vectype->type == NT_q)
4303 ? 8 : (vectype->type << 1) + (reg_size >> 4);
4304 gas_assert (offset <= 8);
4305 return AARCH64_OPND_QLF_V_8B + offset;
4306 }
4307
4308 vectype_conversion_fail:
4309 first_error (_("bad vector arrangement type"));
4310 return AARCH64_OPND_QLF_NIL;
4311 }
4312
4313 /* Process an optional operand that is found omitted from the assembly line.
4314 Fill *OPERAND for such an operand of type TYPE. OPCODE points to the
4315 instruction's opcode entry while IDX is the index of this omitted operand.
4316 */
4317
4318 static void
4319 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4320 int idx, aarch64_opnd_info *operand)
4321 {
4322 aarch64_insn default_value = get_optional_operand_default_value (opcode);
4323 gas_assert (optional_operand_p (opcode, idx));
4324 gas_assert (!operand->present);
4325
4326 switch (type)
4327 {
4328 case AARCH64_OPND_Rd:
4329 case AARCH64_OPND_Rn:
4330 case AARCH64_OPND_Rm:
4331 case AARCH64_OPND_Rt:
4332 case AARCH64_OPND_Rt2:
4333 case AARCH64_OPND_Rs:
4334 case AARCH64_OPND_Ra:
4335 case AARCH64_OPND_Rt_SYS:
4336 case AARCH64_OPND_Rd_SP:
4337 case AARCH64_OPND_Rn_SP:
4338 case AARCH64_OPND_Fd:
4339 case AARCH64_OPND_Fn:
4340 case AARCH64_OPND_Fm:
4341 case AARCH64_OPND_Fa:
4342 case AARCH64_OPND_Ft:
4343 case AARCH64_OPND_Ft2:
4344 case AARCH64_OPND_Sd:
4345 case AARCH64_OPND_Sn:
4346 case AARCH64_OPND_Sm:
4347 case AARCH64_OPND_Vd:
4348 case AARCH64_OPND_Vn:
4349 case AARCH64_OPND_Vm:
4350 case AARCH64_OPND_VdD1:
4351 case AARCH64_OPND_VnD1:
4352 operand->reg.regno = default_value;
4353 break;
4354
4355 case AARCH64_OPND_Ed:
4356 case AARCH64_OPND_En:
4357 case AARCH64_OPND_Em:
4358 operand->reglane.regno = default_value;
4359 break;
4360
4361 case AARCH64_OPND_IDX:
4362 case AARCH64_OPND_BIT_NUM:
4363 case AARCH64_OPND_IMMR:
4364 case AARCH64_OPND_IMMS:
4365 case AARCH64_OPND_SHLL_IMM:
4366 case AARCH64_OPND_IMM_VLSL:
4367 case AARCH64_OPND_IMM_VLSR:
4368 case AARCH64_OPND_CCMP_IMM:
4369 case AARCH64_OPND_FBITS:
4370 case AARCH64_OPND_UIMM4:
4371 case AARCH64_OPND_UIMM3_OP1:
4372 case AARCH64_OPND_UIMM3_OP2:
4373 case AARCH64_OPND_IMM:
4374 case AARCH64_OPND_WIDTH:
4375 case AARCH64_OPND_UIMM7:
4376 case AARCH64_OPND_NZCV:
4377 operand->imm.value = default_value;
4378 break;
4379
4380 case AARCH64_OPND_EXCEPTION:
4381 inst.reloc.type = BFD_RELOC_UNUSED;
4382 break;
4383
4384 case AARCH64_OPND_BARRIER_ISB:
4385 operand->barrier = aarch64_barrier_options + default_value;
4386
4387 default:
4388 break;
4389 }
4390 }
4391
4392 /* Process the relocation type for move wide instructions.
4393 Return TRUE on success; otherwise return FALSE. */
4394
4395 static bfd_boolean
4396 process_movw_reloc_info (void)
4397 {
4398 int is32;
4399 unsigned shift;
4400
4401 is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
4402
4403 if (inst.base.opcode->op == OP_MOVK)
4404 switch (inst.reloc.type)
4405 {
4406 case BFD_RELOC_AARCH64_MOVW_G0_S:
4407 case BFD_RELOC_AARCH64_MOVW_G1_S:
4408 case BFD_RELOC_AARCH64_MOVW_G2_S:
4409 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4410 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4411 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4412 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4413 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4414 set_syntax_error
4415 (_("the specified relocation type is not allowed for MOVK"));
4416 return FALSE;
4417 default:
4418 break;
4419 }
4420
4421 switch (inst.reloc.type)
4422 {
4423 case BFD_RELOC_AARCH64_MOVW_G0:
4424 case BFD_RELOC_AARCH64_MOVW_G0_S:
4425 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4426 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4427 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4428 shift = 0;
4429 break;
4430 case BFD_RELOC_AARCH64_MOVW_G1:
4431 case BFD_RELOC_AARCH64_MOVW_G1_S:
4432 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4433 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4434 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4435 shift = 16;
4436 break;
4437 case BFD_RELOC_AARCH64_MOVW_G2:
4438 case BFD_RELOC_AARCH64_MOVW_G2_S:
4439 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4440 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4441 if (is32)
4442 {
4443 set_fatal_syntax_error
4444 (_("the specified relocation type is not allowed for 32-bit "
4445 "register"));
4446 return FALSE;
4447 }
4448 shift = 32;
4449 break;
4450 case BFD_RELOC_AARCH64_MOVW_G3:
4451 if (is32)
4452 {
4453 set_fatal_syntax_error
4454 (_("the specified relocation type is not allowed for 32-bit "
4455 "register"));
4456 return FALSE;
4457 }
4458 shift = 48;
4459 break;
4460 default:
4461 /* More cases should be added when more MOVW-related relocation types
4462 are supported in GAS. */
4463 gas_assert (aarch64_gas_internal_fixup_p ());
4464 /* The shift amount should have already been set by the parser. */
4465 return TRUE;
4466 }
4467 inst.base.operands[1].shifter.amount = shift;
4468 return TRUE;
4469 }
4470
4471 /* A primitive log caculator. */
4472
4473 static inline unsigned int
4474 get_logsz (unsigned int size)
4475 {
4476 const unsigned char ls[16] =
4477 {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
4478 if (size > 16)
4479 {
4480 gas_assert (0);
4481 return -1;
4482 }
4483 gas_assert (ls[size - 1] != (unsigned char)-1);
4484 return ls[size - 1];
4485 }
4486
4487 /* Determine and return the real reloc type code for an instruction
4488 with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12. */
4489
4490 static inline bfd_reloc_code_real_type
4491 ldst_lo12_determine_real_reloc_type (void)
4492 {
4493 int logsz;
4494 enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
4495 enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
4496
4497 const bfd_reloc_code_real_type reloc_ldst_lo12[5] = {
4498 BFD_RELOC_AARCH64_LDST8_LO12, BFD_RELOC_AARCH64_LDST16_LO12,
4499 BFD_RELOC_AARCH64_LDST32_LO12, BFD_RELOC_AARCH64_LDST64_LO12,
4500 BFD_RELOC_AARCH64_LDST128_LO12
4501 };
4502
4503 gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12);
4504 gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
4505
4506 if (opd1_qlf == AARCH64_OPND_QLF_NIL)
4507 opd1_qlf =
4508 aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
4509 1, opd0_qlf, 0);
4510 gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
4511
4512 logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
4513 gas_assert (logsz >= 0 && logsz <= 4);
4514
4515 return reloc_ldst_lo12[logsz];
4516 }
4517
4518 /* Check whether a register list REGINFO is valid. The registers must be
4519 numbered in increasing order (modulo 32), in increments of one or two.
4520
4521 If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
4522 increments of two.
4523
4524 Return FALSE if such a register list is invalid, otherwise return TRUE. */
4525
4526 static bfd_boolean
4527 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
4528 {
4529 uint32_t i, nb_regs, prev_regno, incr;
4530
4531 nb_regs = 1 + (reginfo & 0x3);
4532 reginfo >>= 2;
4533 prev_regno = reginfo & 0x1f;
4534 incr = accept_alternate ? 2 : 1;
4535
4536 for (i = 1; i < nb_regs; ++i)
4537 {
4538 uint32_t curr_regno;
4539 reginfo >>= 5;
4540 curr_regno = reginfo & 0x1f;
4541 if (curr_regno != ((prev_regno + incr) & 0x1f))
4542 return FALSE;
4543 prev_regno = curr_regno;
4544 }
4545
4546 return TRUE;
4547 }
4548
4549 /* Generic instruction operand parser. This does no encoding and no
4550 semantic validation; it merely squirrels values away in the inst
4551 structure. Returns TRUE or FALSE depending on whether the
4552 specified grammar matched. */
4553
4554 static bfd_boolean
4555 parse_operands (char *str, const aarch64_opcode *opcode)
4556 {
4557 int i;
4558 char *backtrack_pos = 0;
4559 const enum aarch64_opnd *operands = opcode->operands;
4560
4561 clear_error ();
4562 skip_whitespace (str);
4563
4564 for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
4565 {
4566 int64_t val;
4567 int isreg32, isregzero;
4568 int comma_skipped_p = 0;
4569 aarch64_reg_type rtype;
4570 struct neon_type_el vectype;
4571 aarch64_opnd_info *info = &inst.base.operands[i];
4572
4573 DEBUG_TRACE ("parse operand %d", i);
4574
4575 /* Assign the operand code. */
4576 info->type = operands[i];
4577
4578 if (optional_operand_p (opcode, i))
4579 {
4580 /* Remember where we are in case we need to backtrack. */
4581 gas_assert (!backtrack_pos);
4582 backtrack_pos = str;
4583 }
4584
4585 /* Expect comma between operands; the backtrack mechanizm will take
4586 care of cases of omitted optional operand. */
4587 if (i > 0 && ! skip_past_char (&str, ','))
4588 {
4589 set_syntax_error (_("comma expected between operands"));
4590 goto failure;
4591 }
4592 else
4593 comma_skipped_p = 1;
4594
4595 switch (operands[i])
4596 {
4597 case AARCH64_OPND_Rd:
4598 case AARCH64_OPND_Rn:
4599 case AARCH64_OPND_Rm:
4600 case AARCH64_OPND_Rt:
4601 case AARCH64_OPND_Rt2:
4602 case AARCH64_OPND_Rs:
4603 case AARCH64_OPND_Ra:
4604 case AARCH64_OPND_Rt_SYS:
4605 po_int_reg_or_fail (1, 0);
4606 break;
4607
4608 case AARCH64_OPND_Rd_SP:
4609 case AARCH64_OPND_Rn_SP:
4610 po_int_reg_or_fail (0, 1);
4611 break;
4612
4613 case AARCH64_OPND_Rm_EXT:
4614 case AARCH64_OPND_Rm_SFT:
4615 po_misc_or_fail (parse_shifter_operand
4616 (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
4617 ? SHIFTED_ARITH_IMM
4618 : SHIFTED_LOGIC_IMM)));
4619 if (!info->shifter.operator_present)
4620 {
4621 /* Default to LSL if not present. Libopcodes prefers shifter
4622 kind to be explicit. */
4623 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4624 info->shifter.kind = AARCH64_MOD_LSL;
4625 /* For Rm_EXT, libopcodes will carry out further check on whether
4626 or not stack pointer is used in the instruction (Recall that
4627 "the extend operator is not optional unless at least one of
4628 "Rd" or "Rn" is '11111' (i.e. WSP)"). */
4629 }
4630 break;
4631
4632 case AARCH64_OPND_Fd:
4633 case AARCH64_OPND_Fn:
4634 case AARCH64_OPND_Fm:
4635 case AARCH64_OPND_Fa:
4636 case AARCH64_OPND_Ft:
4637 case AARCH64_OPND_Ft2:
4638 case AARCH64_OPND_Sd:
4639 case AARCH64_OPND_Sn:
4640 case AARCH64_OPND_Sm:
4641 val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
4642 if (val == PARSE_FAIL)
4643 {
4644 first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
4645 goto failure;
4646 }
4647 gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
4648
4649 info->reg.regno = val;
4650 info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
4651 break;
4652
4653 case AARCH64_OPND_Vd:
4654 case AARCH64_OPND_Vn:
4655 case AARCH64_OPND_Vm:
4656 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4657 if (val == PARSE_FAIL)
4658 {
4659 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4660 goto failure;
4661 }
4662 if (vectype.defined & NTA_HASINDEX)
4663 goto failure;
4664
4665 info->reg.regno = val;
4666 info->qualifier = vectype_to_qualifier (&vectype);
4667 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4668 goto failure;
4669 break;
4670
4671 case AARCH64_OPND_VdD1:
4672 case AARCH64_OPND_VnD1:
4673 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4674 if (val == PARSE_FAIL)
4675 {
4676 set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4677 goto failure;
4678 }
4679 if (vectype.type != NT_d || vectype.index != 1)
4680 {
4681 set_fatal_syntax_error
4682 (_("the top half of a 128-bit FP/SIMD register is expected"));
4683 goto failure;
4684 }
4685 info->reg.regno = val;
4686 /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
4687 here; it is correct for the purpose of encoding/decoding since
4688 only the register number is explicitly encoded in the related
4689 instructions, although this appears a bit hacky. */
4690 info->qualifier = AARCH64_OPND_QLF_S_D;
4691 break;
4692
4693 case AARCH64_OPND_Ed:
4694 case AARCH64_OPND_En:
4695 case AARCH64_OPND_Em:
4696 val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
4697 if (val == PARSE_FAIL)
4698 {
4699 first_error (_(get_reg_expected_msg (REG_TYPE_VN)));
4700 goto failure;
4701 }
4702 if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
4703 goto failure;
4704
4705 info->reglane.regno = val;
4706 info->reglane.index = vectype.index;
4707 info->qualifier = vectype_to_qualifier (&vectype);
4708 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4709 goto failure;
4710 break;
4711
4712 case AARCH64_OPND_LVn:
4713 case AARCH64_OPND_LVt:
4714 case AARCH64_OPND_LVt_AL:
4715 case AARCH64_OPND_LEt:
4716 if ((val = parse_neon_reg_list (&str, &vectype)) == PARSE_FAIL)
4717 goto failure;
4718 if (! reg_list_valid_p (val, /* accept_alternate */ 0))
4719 {
4720 set_fatal_syntax_error (_("invalid register list"));
4721 goto failure;
4722 }
4723 info->reglist.first_regno = (val >> 2) & 0x1f;
4724 info->reglist.num_regs = (val & 0x3) + 1;
4725 if (operands[i] == AARCH64_OPND_LEt)
4726 {
4727 if (!(vectype.defined & NTA_HASINDEX))
4728 goto failure;
4729 info->reglist.has_index = 1;
4730 info->reglist.index = vectype.index;
4731 }
4732 else if (!(vectype.defined & NTA_HASTYPE))
4733 goto failure;
4734 info->qualifier = vectype_to_qualifier (&vectype);
4735 if (info->qualifier == AARCH64_OPND_QLF_NIL)
4736 goto failure;
4737 break;
4738
4739 case AARCH64_OPND_Cn:
4740 case AARCH64_OPND_Cm:
4741 po_reg_or_fail (REG_TYPE_CN);
4742 if (val > 15)
4743 {
4744 set_fatal_syntax_error (_(get_reg_expected_msg (REG_TYPE_CN)));
4745 goto failure;
4746 }
4747 inst.base.operands[i].reg.regno = val;
4748 break;
4749
4750 case AARCH64_OPND_SHLL_IMM:
4751 case AARCH64_OPND_IMM_VLSR:
4752 po_imm_or_fail (1, 64);
4753 info->imm.value = val;
4754 break;
4755
4756 case AARCH64_OPND_CCMP_IMM:
4757 case AARCH64_OPND_FBITS:
4758 case AARCH64_OPND_UIMM4:
4759 case AARCH64_OPND_UIMM3_OP1:
4760 case AARCH64_OPND_UIMM3_OP2:
4761 case AARCH64_OPND_IMM_VLSL:
4762 case AARCH64_OPND_IMM:
4763 case AARCH64_OPND_WIDTH:
4764 po_imm_nc_or_fail ();
4765 info->imm.value = val;
4766 break;
4767
4768 case AARCH64_OPND_UIMM7:
4769 po_imm_or_fail (0, 127);
4770 info->imm.value = val;
4771 break;
4772
4773 case AARCH64_OPND_IDX:
4774 case AARCH64_OPND_BIT_NUM:
4775 case AARCH64_OPND_IMMR:
4776 case AARCH64_OPND_IMMS:
4777 po_imm_or_fail (0, 63);
4778 info->imm.value = val;
4779 break;
4780
4781 case AARCH64_OPND_IMM0:
4782 po_imm_nc_or_fail ();
4783 if (val != 0)
4784 {
4785 set_fatal_syntax_error (_("immediate zero expected"));
4786 goto failure;
4787 }
4788 info->imm.value = 0;
4789 break;
4790
4791 case AARCH64_OPND_FPIMM0:
4792 {
4793 int qfloat;
4794 bfd_boolean res1 = FALSE, res2 = FALSE;
4795 /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
4796 it is probably not worth the effort to support it. */
4797 if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE))
4798 && !(res2 = parse_constant_immediate (&str, &val)))
4799 goto failure;
4800 if ((res1 && qfloat == 0) || (res2 && val == 0))
4801 {
4802 info->imm.value = 0;
4803 info->imm.is_fp = 1;
4804 break;
4805 }
4806 set_fatal_syntax_error (_("immediate zero expected"));
4807 goto failure;
4808 }
4809
4810 case AARCH64_OPND_IMM_MOV:
4811 {
4812 char *saved = str;
4813 if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
4814 reg_name_p (str, REG_TYPE_VN))
4815 goto failure;
4816 str = saved;
4817 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4818 GE_OPT_PREFIX, 1));
4819 /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
4820 later. fix_mov_imm_insn will try to determine a machine
4821 instruction (MOVZ, MOVN or ORR) for it and will issue an error
4822 message if the immediate cannot be moved by a single
4823 instruction. */
4824 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
4825 inst.base.operands[i].skip = 1;
4826 }
4827 break;
4828
4829 case AARCH64_OPND_SIMD_IMM:
4830 case AARCH64_OPND_SIMD_IMM_SFT:
4831 if (! parse_big_immediate (&str, &val))
4832 goto failure;
4833 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4834 /* addr_off_p */ 0,
4835 /* need_libopcodes_p */ 1,
4836 /* skip_p */ 1);
4837 /* Parse shift.
4838 N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
4839 shift, we don't check it here; we leave the checking to
4840 the libopcodes (operand_general_constraint_met_p). By
4841 doing this, we achieve better diagnostics. */
4842 if (skip_past_comma (&str)
4843 && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
4844 goto failure;
4845 if (!info->shifter.operator_present
4846 && info->type == AARCH64_OPND_SIMD_IMM_SFT)
4847 {
4848 /* Default to LSL if not present. Libopcodes prefers shifter
4849 kind to be explicit. */
4850 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4851 info->shifter.kind = AARCH64_MOD_LSL;
4852 }
4853 break;
4854
4855 case AARCH64_OPND_FPIMM:
4856 case AARCH64_OPND_SIMD_FPIMM:
4857 {
4858 int qfloat;
4859 bfd_boolean dp_p
4860 = (aarch64_get_qualifier_esize (inst.base.operands[0].qualifier)
4861 == 8);
4862 if (! parse_aarch64_imm_float (&str, &qfloat, dp_p))
4863 goto failure;
4864 if (qfloat == 0)
4865 {
4866 set_fatal_syntax_error (_("invalid floating-point constant"));
4867 goto failure;
4868 }
4869 inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
4870 inst.base.operands[i].imm.is_fp = 1;
4871 }
4872 break;
4873
4874 case AARCH64_OPND_LIMM:
4875 po_misc_or_fail (parse_shifter_operand (&str, info,
4876 SHIFTED_LOGIC_IMM));
4877 if (info->shifter.operator_present)
4878 {
4879 set_fatal_syntax_error
4880 (_("shift not allowed for bitmask immediate"));
4881 goto failure;
4882 }
4883 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4884 /* addr_off_p */ 0,
4885 /* need_libopcodes_p */ 1,
4886 /* skip_p */ 1);
4887 break;
4888
4889 case AARCH64_OPND_AIMM:
4890 if (opcode->op == OP_ADD)
4891 /* ADD may have relocation types. */
4892 po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
4893 SHIFTED_ARITH_IMM));
4894 else
4895 po_misc_or_fail (parse_shifter_operand (&str, info,
4896 SHIFTED_ARITH_IMM));
4897 switch (inst.reloc.type)
4898 {
4899 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4900 info->shifter.amount = 12;
4901 break;
4902 case BFD_RELOC_UNUSED:
4903 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4904 if (info->shifter.kind != AARCH64_MOD_NONE)
4905 inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
4906 inst.reloc.pc_rel = 0;
4907 break;
4908 default:
4909 break;
4910 }
4911 info->imm.value = 0;
4912 if (!info->shifter.operator_present)
4913 {
4914 /* Default to LSL if not present. Libopcodes prefers shifter
4915 kind to be explicit. */
4916 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
4917 info->shifter.kind = AARCH64_MOD_LSL;
4918 }
4919 break;
4920
4921 case AARCH64_OPND_HALF:
4922 {
4923 /* #<imm16> or relocation. */
4924 int internal_fixup_p;
4925 po_misc_or_fail (parse_half (&str, &internal_fixup_p));
4926 if (internal_fixup_p)
4927 aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
4928 skip_whitespace (str);
4929 if (skip_past_comma (&str))
4930 {
4931 /* {, LSL #<shift>} */
4932 if (! aarch64_gas_internal_fixup_p ())
4933 {
4934 set_fatal_syntax_error (_("can't mix relocation modifier "
4935 "with explicit shift"));
4936 goto failure;
4937 }
4938 po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
4939 }
4940 else
4941 inst.base.operands[i].shifter.amount = 0;
4942 inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
4943 inst.base.operands[i].imm.value = 0;
4944 if (! process_movw_reloc_info ())
4945 goto failure;
4946 }
4947 break;
4948
4949 case AARCH64_OPND_EXCEPTION:
4950 po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp));
4951 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
4952 /* addr_off_p */ 0,
4953 /* need_libopcodes_p */ 0,
4954 /* skip_p */ 1);
4955 break;
4956
4957 case AARCH64_OPND_NZCV:
4958 {
4959 const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
4960 if (nzcv != NULL)
4961 {
4962 str += 4;
4963 info->imm.value = nzcv->value;
4964 break;
4965 }
4966 po_imm_or_fail (0, 15);
4967 info->imm.value = val;
4968 }
4969 break;
4970
4971 case AARCH64_OPND_COND:
4972 info->cond = hash_find_n (aarch64_cond_hsh, str, 2);
4973 str += 2;
4974 if (info->cond == NULL)
4975 {
4976 set_syntax_error (_("invalid condition"));
4977 goto failure;
4978 }
4979 break;
4980
4981 case AARCH64_OPND_ADDR_ADRP:
4982 po_misc_or_fail (parse_adrp (&str));
4983 /* Clear the value as operand needs to be relocated. */
4984 info->imm.value = 0;
4985 break;
4986
4987 case AARCH64_OPND_ADDR_PCREL14:
4988 case AARCH64_OPND_ADDR_PCREL19:
4989 case AARCH64_OPND_ADDR_PCREL21:
4990 case AARCH64_OPND_ADDR_PCREL26:
4991 po_misc_or_fail (parse_address_reloc (&str, info));
4992 if (!info->addr.pcrel)
4993 {
4994 set_syntax_error (_("invalid pc-relative address"));
4995 goto failure;
4996 }
4997 if (inst.gen_lit_pool
4998 && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
4999 {
5000 /* Only permit "=value" in the literal load instructions.
5001 The literal will be generated by programmer_friendly_fixup. */
5002 set_syntax_error (_("invalid use of \"=immediate\""));
5003 goto failure;
5004 }
5005 if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5006 {
5007 set_syntax_error (_("unrecognized relocation suffix"));
5008 goto failure;
5009 }
5010 if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5011 {
5012 info->imm.value = inst.reloc.exp.X_add_number;
5013 inst.reloc.type = BFD_RELOC_UNUSED;
5014 }
5015 else
5016 {
5017 info->imm.value = 0;
5018 if (inst.reloc.type == BFD_RELOC_UNUSED)
5019 switch (opcode->iclass)
5020 {
5021 case compbranch:
5022 case condbranch:
5023 /* e.g. CBZ or B.COND */
5024 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5025 inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5026 break;
5027 case testbranch:
5028 /* e.g. TBZ */
5029 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5030 inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5031 break;
5032 case branch_imm:
5033 /* e.g. B or BL */
5034 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5035 inst.reloc.type =
5036 (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5037 : BFD_RELOC_AARCH64_JUMP26;
5038 break;
5039 case loadlit:
5040 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5041 inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5042 break;
5043 case pcreladdr:
5044 gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5045 inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5046 break;
5047 default:
5048 gas_assert (0);
5049 abort ();
5050 }
5051 inst.reloc.pc_rel = 1;
5052 }
5053 break;
5054
5055 case AARCH64_OPND_ADDR_SIMPLE:
5056 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5057 /* [<Xn|SP>{, #<simm>}] */
5058 po_char_or_fail ('[');
5059 po_reg_or_fail (REG_TYPE_R64_SP);
5060 /* Accept optional ", #0". */
5061 if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5062 && skip_past_char (&str, ','))
5063 {
5064 skip_past_char (&str, '#');
5065 if (! skip_past_char (&str, '0'))
5066 {
5067 set_fatal_syntax_error
5068 (_("the optional immediate offset can only be 0"));
5069 goto failure;
5070 }
5071 }
5072 po_char_or_fail (']');
5073 info->addr.base_regno = val;
5074 break;
5075
5076 case AARCH64_OPND_ADDR_REGOFF:
5077 /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
5078 po_misc_or_fail (parse_address (&str, info, 0));
5079 if (info->addr.pcrel || !info->addr.offset.is_reg
5080 || !info->addr.preind || info->addr.postind
5081 || info->addr.writeback)
5082 {
5083 set_syntax_error (_("invalid addressing mode"));
5084 goto failure;
5085 }
5086 if (!info->shifter.operator_present)
5087 {
5088 /* Default to LSL if not present. Libopcodes prefers shifter
5089 kind to be explicit. */
5090 gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5091 info->shifter.kind = AARCH64_MOD_LSL;
5092 }
5093 /* Qualifier to be deduced by libopcodes. */
5094 break;
5095
5096 case AARCH64_OPND_ADDR_SIMM7:
5097 po_misc_or_fail (parse_address (&str, info, 0));
5098 if (info->addr.pcrel || info->addr.offset.is_reg
5099 || (!info->addr.preind && !info->addr.postind))
5100 {
5101 set_syntax_error (_("invalid addressing mode"));
5102 goto failure;
5103 }
5104 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5105 /* addr_off_p */ 1,
5106 /* need_libopcodes_p */ 1,
5107 /* skip_p */ 0);
5108 break;
5109
5110 case AARCH64_OPND_ADDR_SIMM9:
5111 case AARCH64_OPND_ADDR_SIMM9_2:
5112 po_misc_or_fail (parse_address_reloc (&str, info));
5113 if (info->addr.pcrel || info->addr.offset.is_reg
5114 || (!info->addr.preind && !info->addr.postind)
5115 || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
5116 && info->addr.writeback))
5117 {
5118 set_syntax_error (_("invalid addressing mode"));
5119 goto failure;
5120 }
5121 if (inst.reloc.type != BFD_RELOC_UNUSED)
5122 {
5123 set_syntax_error (_("relocation not allowed"));
5124 goto failure;
5125 }
5126 assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5127 /* addr_off_p */ 1,
5128 /* need_libopcodes_p */ 1,
5129 /* skip_p */ 0);
5130 break;
5131
5132 case AARCH64_OPND_ADDR_UIMM12:
5133 po_misc_or_fail (parse_address_reloc (&str, info));
5134 if (info->addr.pcrel || info->addr.offset.is_reg
5135 || !info->addr.preind || info->addr.writeback)
5136 {
5137 set_syntax_error (_("invalid addressing mode"));
5138 goto failure;
5139 }
5140 if (inst.reloc.type == BFD_RELOC_UNUSED)
5141 aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5142 else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12)
5143 inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
5144 /* Leave qualifier to be determined by libopcodes. */
5145 break;
5146
5147 case AARCH64_OPND_SIMD_ADDR_POST:
5148 /* [<Xn|SP>], <Xm|#<amount>> */
5149 po_misc_or_fail (parse_address (&str, info, 1));
5150 if (!info->addr.postind || !info->addr.writeback)
5151 {
5152 set_syntax_error (_("invalid addressing mode"));
5153 goto failure;
5154 }
5155 if (!info->addr.offset.is_reg)
5156 {
5157 if (inst.reloc.exp.X_op == O_constant)
5158 info->addr.offset.imm = inst.reloc.exp.X_add_number;
5159 else
5160 {
5161 set_fatal_syntax_error
5162 (_("writeback value should be an immediate constant"));
5163 goto failure;
5164 }
5165 }
5166 /* No qualifier. */
5167 break;
5168
5169 case AARCH64_OPND_SYSREG:
5170 if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1))
5171 == PARSE_FAIL)
5172 {
5173 set_syntax_error (_("unknown or missing system register name"));
5174 goto failure;
5175 }
5176 inst.base.operands[i].sysreg = val;
5177 break;
5178
5179 case AARCH64_OPND_PSTATEFIELD:
5180 if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0))
5181 == PARSE_FAIL)
5182 {
5183 set_syntax_error (_("unknown or missing PSTATE field name"));
5184 goto failure;
5185 }
5186 inst.base.operands[i].pstatefield = val;
5187 break;
5188
5189 case AARCH64_OPND_SYSREG_IC:
5190 inst.base.operands[i].sysins_op =
5191 parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
5192 goto sys_reg_ins;
5193 case AARCH64_OPND_SYSREG_DC:
5194 inst.base.operands[i].sysins_op =
5195 parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
5196 goto sys_reg_ins;
5197 case AARCH64_OPND_SYSREG_AT:
5198 inst.base.operands[i].sysins_op =
5199 parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
5200 goto sys_reg_ins;
5201 case AARCH64_OPND_SYSREG_TLBI:
5202 inst.base.operands[i].sysins_op =
5203 parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
5204 sys_reg_ins:
5205 if (inst.base.operands[i].sysins_op == NULL)
5206 {
5207 set_fatal_syntax_error ( _("unknown or missing operation name"));
5208 goto failure;
5209 }
5210 break;
5211
5212 case AARCH64_OPND_BARRIER:
5213 case AARCH64_OPND_BARRIER_ISB:
5214 val = parse_barrier (&str);
5215 if (val != PARSE_FAIL
5216 && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
5217 {
5218 /* ISB only accepts options name 'sy'. */
5219 set_syntax_error
5220 (_("the specified option is not accepted in ISB"));
5221 /* Turn off backtrack as this optional operand is present. */
5222 backtrack_pos = 0;
5223 goto failure;
5224 }
5225 /* This is an extension to accept a 0..15 immediate. */
5226 if (val == PARSE_FAIL)
5227 po_imm_or_fail (0, 15);
5228 info->barrier = aarch64_barrier_options + val;
5229 break;
5230
5231 case AARCH64_OPND_PRFOP:
5232 val = parse_pldop (&str);
5233 /* This is an extension to accept a 0..31 immediate. */
5234 if (val == PARSE_FAIL)
5235 po_imm_or_fail (0, 31);
5236 inst.base.operands[i].prfop = aarch64_prfops + val;
5237 break;
5238
5239 default:
5240 as_fatal (_("unhandled operand code %d"), operands[i]);
5241 }
5242
5243 /* If we get here, this operand was successfully parsed. */
5244 inst.base.operands[i].present = 1;
5245 continue;
5246
5247 failure:
5248 /* The parse routine should already have set the error, but in case
5249 not, set a default one here. */
5250 if (! error_p ())
5251 set_default_error ();
5252
5253 if (! backtrack_pos)
5254 goto parse_operands_return;
5255
5256 /* Reaching here means we are dealing with an optional operand that is
5257 omitted from the assembly line. */
5258 gas_assert (optional_operand_p (opcode, i));
5259 info->present = 0;
5260 process_omitted_operand (operands[i], opcode, i, info);
5261
5262 /* Try again, skipping the optional operand at backtrack_pos. */
5263 str = backtrack_pos;
5264 backtrack_pos = 0;
5265
5266 /* If this is the last operand that is optional and omitted, but without
5267 the presence of a comma. */
5268 if (i && comma_skipped_p && i == aarch64_num_of_operands (opcode) - 1)
5269 {
5270 set_fatal_syntax_error
5271 (_("unexpected comma before the omitted optional operand"));
5272 goto parse_operands_return;
5273 }
5274
5275 /* Clear any error record after the omitted optional operand has been
5276 successfully handled. */
5277 clear_error ();
5278 }
5279
5280 /* Check if we have parsed all the operands. */
5281 if (*str != '\0' && ! error_p ())
5282 {
5283 /* Set I to the index of the last present operand; this is
5284 for the purpose of diagnostics. */
5285 for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
5286 ;
5287 set_fatal_syntax_error
5288 (_("unexpected characters following instruction"));
5289 }
5290
5291 parse_operands_return:
5292
5293 if (error_p ())
5294 {
5295 DEBUG_TRACE ("parsing FAIL: %s - %s",
5296 operand_mismatch_kind_names[get_error_kind ()],
5297 get_error_message ());
5298 /* Record the operand error properly; this is useful when there
5299 are multiple instruction templates for a mnemonic name, so that
5300 later on, we can select the error that most closely describes
5301 the problem. */
5302 record_operand_error (opcode, i, get_error_kind (),
5303 get_error_message ());
5304 return FALSE;
5305 }
5306 else
5307 {
5308 DEBUG_TRACE ("parsing SUCCESS");
5309 return TRUE;
5310 }
5311 }
5312
5313 /* It does some fix-up to provide some programmer friendly feature while
5314 keeping the libopcodes happy, i.e. libopcodes only accepts
5315 the preferred architectural syntax.
5316 Return FALSE if there is any failure; otherwise return TRUE. */
5317
5318 static bfd_boolean
5319 programmer_friendly_fixup (aarch64_instruction *instr)
5320 {
5321 aarch64_inst *base = &instr->base;
5322 const aarch64_opcode *opcode = base->opcode;
5323 enum aarch64_op op = opcode->op;
5324 aarch64_opnd_info *operands = base->operands;
5325
5326 DEBUG_TRACE ("enter");
5327
5328 switch (opcode->iclass)
5329 {
5330 case testbranch:
5331 /* TBNZ Xn|Wn, #uimm6, label
5332 Test and Branch Not Zero: conditionally jumps to label if bit number
5333 uimm6 in register Xn is not zero. The bit number implies the width of
5334 the register, which may be written and should be disassembled as Wn if
5335 uimm is less than 32. */
5336 if (operands[0].qualifier == AARCH64_OPND_QLF_W)
5337 {
5338 if (operands[1].imm.value >= 32)
5339 {
5340 record_operand_out_of_range_error (opcode, 1, _("immediate value"),
5341 0, 31);
5342 return FALSE;
5343 }
5344 operands[0].qualifier = AARCH64_OPND_QLF_X;
5345 }
5346 break;
5347 case loadlit:
5348 /* LDR Wt, label | =value
5349 As a convenience assemblers will typically permit the notation
5350 "=value" in conjunction with the pc-relative literal load instructions
5351 to automatically place an immediate value or symbolic address in a
5352 nearby literal pool and generate a hidden label which references it.
5353 ISREG has been set to 0 in the case of =value. */
5354 if (instr->gen_lit_pool
5355 && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
5356 {
5357 int size = aarch64_get_qualifier_esize (operands[0].qualifier);
5358 if (op == OP_LDRSW_LIT)
5359 size = 4;
5360 if (instr->reloc.exp.X_op != O_constant
5361 && instr->reloc.exp.X_op != O_big
5362 && instr->reloc.exp.X_op != O_symbol)
5363 {
5364 record_operand_error (opcode, 1,
5365 AARCH64_OPDE_FATAL_SYNTAX_ERROR,
5366 _("constant expression expected"));
5367 return FALSE;
5368 }
5369 if (! add_to_lit_pool (&instr->reloc.exp, size))
5370 {
5371 record_operand_error (opcode, 1,
5372 AARCH64_OPDE_OTHER_ERROR,
5373 _("literal pool insertion failed"));
5374 return FALSE;
5375 }
5376 }
5377 break;
5378 case log_shift:
5379 case bitfield:
5380 /* UXT[BHW] Wd, Wn
5381 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
5382 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
5383 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
5384 A programmer-friendly assembler should accept a destination Xd in
5385 place of Wd, however that is not the preferred form for disassembly.
5386 */
5387 if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
5388 && operands[1].qualifier == AARCH64_OPND_QLF_W
5389 && operands[0].qualifier == AARCH64_OPND_QLF_X)
5390 operands[0].qualifier = AARCH64_OPND_QLF_W;
5391 break;
5392
5393 case addsub_ext:
5394 {
5395 /* In the 64-bit form, the final register operand is written as Wm
5396 for all but the (possibly omitted) UXTX/LSL and SXTX
5397 operators.
5398 As a programmer-friendly assembler, we accept e.g.
5399 ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
5400 ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}. */
5401 int idx = aarch64_operand_index (opcode->operands,
5402 AARCH64_OPND_Rm_EXT);
5403 gas_assert (idx == 1 || idx == 2);
5404 if (operands[0].qualifier == AARCH64_OPND_QLF_X
5405 && operands[idx].qualifier == AARCH64_OPND_QLF_X
5406 && operands[idx].shifter.kind != AARCH64_MOD_LSL
5407 && operands[idx].shifter.kind != AARCH64_MOD_UXTX
5408 && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
5409 operands[idx].qualifier = AARCH64_OPND_QLF_W;
5410 }
5411 break;
5412
5413 default:
5414 break;
5415 }
5416
5417 DEBUG_TRACE ("exit with SUCCESS");
5418 return TRUE;
5419 }
5420
5421 /* A wrapper function to interface with libopcodes on encoding and
5422 record the error message if there is any.
5423
5424 Return TRUE on success; otherwise return FALSE. */
5425
5426 static bfd_boolean
5427 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
5428 aarch64_insn *code)
5429 {
5430 aarch64_operand_error error_info;
5431 error_info.kind = AARCH64_OPDE_NIL;
5432 if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
5433 return TRUE;
5434 else
5435 {
5436 gas_assert (error_info.kind != AARCH64_OPDE_NIL);
5437 record_operand_error_info (opcode, &error_info);
5438 return FALSE;
5439 }
5440 }
5441
5442 #ifdef DEBUG_AARCH64
5443 static inline void
5444 dump_opcode_operands (const aarch64_opcode *opcode)
5445 {
5446 int i = 0;
5447 while (opcode->operands[i] != AARCH64_OPND_NIL)
5448 {
5449 aarch64_verbose ("\t\t opnd%d: %s", i,
5450 aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
5451 ? aarch64_get_operand_name (opcode->operands[i])
5452 : aarch64_get_operand_desc (opcode->operands[i]));
5453 ++i;
5454 }
5455 }
5456 #endif /* DEBUG_AARCH64 */
5457
5458 /* This is the guts of the machine-dependent assembler. STR points to a
5459 machine dependent instruction. This function is supposed to emit
5460 the frags/bytes it assembles to. */
5461
5462 void
5463 md_assemble (char *str)
5464 {
5465 char *p = str;
5466 templates *template;
5467 aarch64_opcode *opcode;
5468 aarch64_inst *inst_base;
5469 unsigned saved_cond;
5470
5471 /* Align the previous label if needed. */
5472 if (last_label_seen != NULL)
5473 {
5474 symbol_set_frag (last_label_seen, frag_now);
5475 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
5476 S_SET_SEGMENT (last_label_seen, now_seg);
5477 }
5478
5479 inst.reloc.type = BFD_RELOC_UNUSED;
5480
5481 DEBUG_TRACE ("\n\n");
5482 DEBUG_TRACE ("==============================");
5483 DEBUG_TRACE ("Enter md_assemble with %s", str);
5484
5485 template = opcode_lookup (&p);
5486 if (!template)
5487 {
5488 /* It wasn't an instruction, but it might be a register alias of
5489 the form alias .req reg directive. */
5490 if (!create_register_alias (str, p))
5491 as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
5492 str);
5493 return;
5494 }
5495
5496 skip_whitespace (p);
5497 if (*p == ',')
5498 {
5499 as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
5500 get_mnemonic_name (str), str);
5501 return;
5502 }
5503
5504 init_operand_error_report ();
5505
5506 saved_cond = inst.cond;
5507 reset_aarch64_instruction (&inst);
5508 inst.cond = saved_cond;
5509
5510 /* Iterate through all opcode entries with the same mnemonic name. */
5511 do
5512 {
5513 opcode = template->opcode;
5514
5515 DEBUG_TRACE ("opcode %s found", opcode->name);
5516 #ifdef DEBUG_AARCH64
5517 if (debug_dump)
5518 dump_opcode_operands (opcode);
5519 #endif /* DEBUG_AARCH64 */
5520
5521 /* Check that this instruction is supported for this CPU. */
5522 if (!opcode->avariant
5523 || !AARCH64_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
5524 {
5525 as_bad (_("selected processor does not support `%s'"), str);
5526 return;
5527 }
5528
5529 mapping_state (MAP_INSN);
5530
5531 inst_base = &inst.base;
5532 inst_base->opcode = opcode;
5533
5534 /* Truly conditionally executed instructions, e.g. b.cond. */
5535 if (opcode->flags & F_COND)
5536 {
5537 gas_assert (inst.cond != COND_ALWAYS);
5538 inst_base->cond = get_cond_from_value (inst.cond);
5539 DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
5540 }
5541 else if (inst.cond != COND_ALWAYS)
5542 {
5543 /* It shouldn't arrive here, where the assembly looks like a
5544 conditional instruction but the found opcode is unconditional. */
5545 gas_assert (0);
5546 continue;
5547 }
5548
5549 if (parse_operands (p, opcode)
5550 && programmer_friendly_fixup (&inst)
5551 && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
5552 {
5553 if (inst.reloc.type == BFD_RELOC_UNUSED
5554 || !inst.reloc.need_libopcodes_p)
5555 output_inst (NULL);
5556 else
5557 {
5558 /* If there is relocation generated for the instruction,
5559 store the instruction information for the future fix-up. */
5560 struct aarch64_inst *copy;
5561 gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
5562 if ((copy = xmalloc (sizeof (struct aarch64_inst))) == NULL)
5563 abort ();
5564 memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
5565 output_inst (copy);
5566 }
5567 return;
5568 }
5569
5570 template = template->next;
5571 if (template != NULL)
5572 {
5573 reset_aarch64_instruction (&inst);
5574 inst.cond = saved_cond;
5575 }
5576 }
5577 while (template != NULL);
5578
5579 /* Issue the error messages if any. */
5580 output_operand_error_report (str);
5581 }
5582
5583 /* Various frobbings of labels and their addresses. */
5584
5585 void
5586 aarch64_start_line_hook (void)
5587 {
5588 last_label_seen = NULL;
5589 }
5590
5591 void
5592 aarch64_frob_label (symbolS * sym)
5593 {
5594 last_label_seen = sym;
5595
5596 dwarf2_emit_label (sym);
5597 }
5598
5599 int
5600 aarch64_data_in_code (void)
5601 {
5602 if (!strncmp (input_line_pointer + 1, "data:", 5))
5603 {
5604 *input_line_pointer = '/';
5605 input_line_pointer += 5;
5606 *input_line_pointer = 0;
5607 return 1;
5608 }
5609
5610 return 0;
5611 }
5612
5613 char *
5614 aarch64_canonicalize_symbol_name (char *name)
5615 {
5616 int len;
5617
5618 if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
5619 *(name + len - 5) = 0;
5620
5621 return name;
5622 }
5623 \f
5624 /* Table of all register names defined by default. The user can
5625 define additional names with .req. Note that all register names
5626 should appear in both upper and lowercase variants. Some registers
5627 also have mixed-case names. */
5628
5629 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
5630 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
5631 #define REGSET31(p,t) \
5632 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
5633 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
5634 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
5635 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t), \
5636 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
5637 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
5638 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
5639 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
5640 #define REGSET(p,t) \
5641 REGSET31(p,t), REGNUM(p,31,t)
5642
5643 /* These go into aarch64_reg_hsh hash-table. */
5644 static const reg_entry reg_names[] = {
5645 /* Integer registers. */
5646 REGSET31 (x, R_64), REGSET31 (X, R_64),
5647 REGSET31 (w, R_32), REGSET31 (W, R_32),
5648
5649 REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
5650 REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
5651
5652 REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
5653 REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
5654
5655 /* Coprocessor register numbers. */
5656 REGSET (c, CN), REGSET (C, CN),
5657
5658 /* Floating-point single precision registers. */
5659 REGSET (s, FP_S), REGSET (S, FP_S),
5660
5661 /* Floating-point double precision registers. */
5662 REGSET (d, FP_D), REGSET (D, FP_D),
5663
5664 /* Floating-point half precision registers. */
5665 REGSET (h, FP_H), REGSET (H, FP_H),
5666
5667 /* Floating-point byte precision registers. */
5668 REGSET (b, FP_B), REGSET (B, FP_B),
5669
5670 /* Floating-point quad precision registers. */
5671 REGSET (q, FP_Q), REGSET (Q, FP_Q),
5672
5673 /* FP/SIMD registers. */
5674 REGSET (v, VN), REGSET (V, VN),
5675 };
5676
5677 #undef REGDEF
5678 #undef REGNUM
5679 #undef REGSET
5680
5681 #define N 1
5682 #define n 0
5683 #define Z 1
5684 #define z 0
5685 #define C 1
5686 #define c 0
5687 #define V 1
5688 #define v 0
5689 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
5690 static const asm_nzcv nzcv_names[] = {
5691 {"nzcv", B (n, z, c, v)},
5692 {"nzcV", B (n, z, c, V)},
5693 {"nzCv", B (n, z, C, v)},
5694 {"nzCV", B (n, z, C, V)},
5695 {"nZcv", B (n, Z, c, v)},
5696 {"nZcV", B (n, Z, c, V)},
5697 {"nZCv", B (n, Z, C, v)},
5698 {"nZCV", B (n, Z, C, V)},
5699 {"Nzcv", B (N, z, c, v)},
5700 {"NzcV", B (N, z, c, V)},
5701 {"NzCv", B (N, z, C, v)},
5702 {"NzCV", B (N, z, C, V)},
5703 {"NZcv", B (N, Z, c, v)},
5704 {"NZcV", B (N, Z, c, V)},
5705 {"NZCv", B (N, Z, C, v)},
5706 {"NZCV", B (N, Z, C, V)}
5707 };
5708
5709 #undef N
5710 #undef n
5711 #undef Z
5712 #undef z
5713 #undef C
5714 #undef c
5715 #undef V
5716 #undef v
5717 #undef B
5718 \f
5719 /* MD interface: bits in the object file. */
5720
5721 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
5722 for use in the a.out file, and stores them in the array pointed to by buf.
5723 This knows about the endian-ness of the target machine and does
5724 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
5725 2 (short) and 4 (long) Floating numbers are put out as a series of
5726 LITTLENUMS (shorts, here at least). */
5727
5728 void
5729 md_number_to_chars (char *buf, valueT val, int n)
5730 {
5731 if (target_big_endian)
5732 number_to_chars_bigendian (buf, val, n);
5733 else
5734 number_to_chars_littleendian (buf, val, n);
5735 }
5736
5737 /* MD interface: Sections. */
5738
5739 /* Estimate the size of a frag before relaxing. Assume everything fits in
5740 4 bytes. */
5741
5742 int
5743 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
5744 {
5745 fragp->fr_var = 4;
5746 return 4;
5747 }
5748
5749 /* Round up a section size to the appropriate boundary. */
5750
5751 valueT
5752 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
5753 {
5754 return size;
5755 }
5756
5757 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
5758 of an rs_align_code fragment. */
5759
5760 void
5761 aarch64_handle_align (fragS * fragP)
5762 {
5763 /* NOP = d503201f */
5764 /* AArch64 instructions are always little-endian. */
5765 static char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
5766
5767 int bytes, fix, noop_size;
5768 char *p;
5769 const char *noop;
5770
5771 if (fragP->fr_type != rs_align_code)
5772 return;
5773
5774 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
5775 p = fragP->fr_literal + fragP->fr_fix;
5776 fix = 0;
5777
5778 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
5779 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
5780
5781 #ifdef OBJ_ELF
5782 gas_assert (fragP->tc_frag_data.recorded);
5783 #endif
5784
5785 noop = aarch64_noop;
5786 noop_size = sizeof (aarch64_noop);
5787 fragP->fr_var = noop_size;
5788
5789 if (bytes & (noop_size - 1))
5790 {
5791 fix = bytes & (noop_size - 1);
5792 #ifdef OBJ_ELF
5793 insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
5794 #endif
5795 memset (p, 0, fix);
5796 p += fix;
5797 bytes -= fix;
5798 }
5799
5800 while (bytes >= noop_size)
5801 {
5802 memcpy (p, noop, noop_size);
5803 p += noop_size;
5804 bytes -= noop_size;
5805 fix += noop_size;
5806 }
5807
5808 fragP->fr_fix += fix;
5809 }
5810
5811 /* Called from md_do_align. Used to create an alignment
5812 frag in a code section. */
5813
5814 void
5815 aarch64_frag_align_code (int n, int max)
5816 {
5817 char *p;
5818
5819 /* We assume that there will never be a requirement
5820 to support alignments greater than x bytes. */
5821 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
5822 as_fatal (_
5823 ("alignments greater than %d bytes not supported in .text sections"),
5824 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
5825
5826 p = frag_var (rs_align_code,
5827 MAX_MEM_FOR_RS_ALIGN_CODE,
5828 1,
5829 (relax_substateT) max,
5830 (symbolS *) NULL, (offsetT) n, (char *) NULL);
5831 *p = 0;
5832 }
5833
5834 /* Perform target specific initialisation of a frag.
5835 Note - despite the name this initialisation is not done when the frag
5836 is created, but only when its type is assigned. A frag can be created
5837 and used a long time before its type is set, so beware of assuming that
5838 this initialisationis performed first. */
5839
5840 #ifndef OBJ_ELF
5841 void
5842 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
5843 int max_chars ATTRIBUTE_UNUSED)
5844 {
5845 }
5846
5847 #else /* OBJ_ELF is defined. */
5848 void
5849 aarch64_init_frag (fragS * fragP, int max_chars)
5850 {
5851 /* Record a mapping symbol for alignment frags. We will delete this
5852 later if the alignment ends up empty. */
5853 if (!fragP->tc_frag_data.recorded)
5854 {
5855 fragP->tc_frag_data.recorded = 1;
5856 switch (fragP->fr_type)
5857 {
5858 case rs_align:
5859 case rs_align_test:
5860 case rs_fill:
5861 mapping_state_2 (MAP_DATA, max_chars);
5862 break;
5863 case rs_align_code:
5864 mapping_state_2 (MAP_INSN, max_chars);
5865 break;
5866 default:
5867 break;
5868 }
5869 }
5870 }
5871 \f
5872 /* Initialize the DWARF-2 unwind information for this procedure. */
5873
5874 void
5875 tc_aarch64_frame_initial_instructions (void)
5876 {
5877 cfi_add_CFA_def_cfa (REG_SP, 0);
5878 }
5879 #endif /* OBJ_ELF */
5880
5881 /* Convert REGNAME to a DWARF-2 register number. */
5882
5883 int
5884 tc_aarch64_regname_to_dw2regnum (char *regname)
5885 {
5886 const reg_entry *reg = parse_reg (&regname);
5887 if (reg == NULL)
5888 return -1;
5889
5890 switch (reg->type)
5891 {
5892 case REG_TYPE_SP_32:
5893 case REG_TYPE_SP_64:
5894 case REG_TYPE_R_32:
5895 case REG_TYPE_R_64:
5896 case REG_TYPE_FP_B:
5897 case REG_TYPE_FP_H:
5898 case REG_TYPE_FP_S:
5899 case REG_TYPE_FP_D:
5900 case REG_TYPE_FP_Q:
5901 return reg->number;
5902 default:
5903 break;
5904 }
5905 return -1;
5906 }
5907
5908 /* Implement DWARF2_ADDR_SIZE. */
5909
5910 int
5911 aarch64_dwarf2_addr_size (void)
5912 {
5913 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
5914 if (ilp32_p)
5915 return 4;
5916 #endif
5917 return bfd_arch_bits_per_address (stdoutput) / 8;
5918 }
5919
5920 /* MD interface: Symbol and relocation handling. */
5921
5922 /* Return the address within the segment that a PC-relative fixup is
5923 relative to. For AArch64 PC-relative fixups applied to instructions
5924 are generally relative to the location plus AARCH64_PCREL_OFFSET bytes. */
5925
5926 long
5927 md_pcrel_from_section (fixS * fixP, segT seg)
5928 {
5929 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
5930
5931 /* If this is pc-relative and we are going to emit a relocation
5932 then we just want to put out any pipeline compensation that the linker
5933 will need. Otherwise we want to use the calculated base. */
5934 if (fixP->fx_pcrel
5935 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
5936 || aarch64_force_relocation (fixP)))
5937 base = 0;
5938
5939 /* AArch64 should be consistent for all pc-relative relocations. */
5940 return base + AARCH64_PCREL_OFFSET;
5941 }
5942
5943 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
5944 Otherwise we have no need to default values of symbols. */
5945
5946 symbolS *
5947 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
5948 {
5949 #ifdef OBJ_ELF
5950 if (name[0] == '_' && name[1] == 'G'
5951 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
5952 {
5953 if (!GOT_symbol)
5954 {
5955 if (symbol_find (name))
5956 as_bad (_("GOT already in the symbol table"));
5957
5958 GOT_symbol = symbol_new (name, undefined_section,
5959 (valueT) 0, &zero_address_frag);
5960 }
5961
5962 return GOT_symbol;
5963 }
5964 #endif
5965
5966 return 0;
5967 }
5968
5969 /* Return non-zero if the indicated VALUE has overflowed the maximum
5970 range expressible by a unsigned number with the indicated number of
5971 BITS. */
5972
5973 static bfd_boolean
5974 unsigned_overflow (valueT value, unsigned bits)
5975 {
5976 valueT lim;
5977 if (bits >= sizeof (valueT) * 8)
5978 return FALSE;
5979 lim = (valueT) 1 << bits;
5980 return (value >= lim);
5981 }
5982
5983
5984 /* Return non-zero if the indicated VALUE has overflowed the maximum
5985 range expressible by an signed number with the indicated number of
5986 BITS. */
5987
5988 static bfd_boolean
5989 signed_overflow (offsetT value, unsigned bits)
5990 {
5991 offsetT lim;
5992 if (bits >= sizeof (offsetT) * 8)
5993 return FALSE;
5994 lim = (offsetT) 1 << (bits - 1);
5995 return (value < -lim || value >= lim);
5996 }
5997
5998 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
5999 unsigned immediate offset load/store instruction, try to encode it as
6000 an unscaled, 9-bit, signed immediate offset load/store instruction.
6001 Return TRUE if it is successful; otherwise return FALSE.
6002
6003 As a programmer-friendly assembler, LDUR/STUR instructions can be generated
6004 in response to the standard LDR/STR mnemonics when the immediate offset is
6005 unambiguous, i.e. when it is negative or unaligned. */
6006
6007 static bfd_boolean
6008 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
6009 {
6010 int idx;
6011 enum aarch64_op new_op;
6012 const aarch64_opcode *new_opcode;
6013
6014 gas_assert (instr->opcode->iclass == ldst_pos);
6015
6016 switch (instr->opcode->op)
6017 {
6018 case OP_LDRB_POS:new_op = OP_LDURB; break;
6019 case OP_STRB_POS: new_op = OP_STURB; break;
6020 case OP_LDRSB_POS: new_op = OP_LDURSB; break;
6021 case OP_LDRH_POS: new_op = OP_LDURH; break;
6022 case OP_STRH_POS: new_op = OP_STURH; break;
6023 case OP_LDRSH_POS: new_op = OP_LDURSH; break;
6024 case OP_LDR_POS: new_op = OP_LDUR; break;
6025 case OP_STR_POS: new_op = OP_STUR; break;
6026 case OP_LDRF_POS: new_op = OP_LDURV; break;
6027 case OP_STRF_POS: new_op = OP_STURV; break;
6028 case OP_LDRSW_POS: new_op = OP_LDURSW; break;
6029 case OP_PRFM_POS: new_op = OP_PRFUM; break;
6030 default: new_op = OP_NIL; break;
6031 }
6032
6033 if (new_op == OP_NIL)
6034 return FALSE;
6035
6036 new_opcode = aarch64_get_opcode (new_op);
6037 gas_assert (new_opcode != NULL);
6038
6039 DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
6040 instr->opcode->op, new_opcode->op);
6041
6042 aarch64_replace_opcode (instr, new_opcode);
6043
6044 /* Clear up the ADDR_SIMM9's qualifier; otherwise the
6045 qualifier matching may fail because the out-of-date qualifier will
6046 prevent the operand being updated with a new and correct qualifier. */
6047 idx = aarch64_operand_index (instr->opcode->operands,
6048 AARCH64_OPND_ADDR_SIMM9);
6049 gas_assert (idx == 1);
6050 instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
6051
6052 DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
6053
6054 if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
6055 return FALSE;
6056
6057 return TRUE;
6058 }
6059
6060 /* Called by fix_insn to fix a MOV immediate alias instruction.
6061
6062 Operand for a generic move immediate instruction, which is an alias
6063 instruction that generates a single MOVZ, MOVN or ORR instruction to loads
6064 a 32-bit/64-bit immediate value into general register. An assembler error
6065 shall result if the immediate cannot be created by a single one of these
6066 instructions. If there is a choice, then to ensure reversability an
6067 assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR. */
6068
6069 static void
6070 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
6071 {
6072 const aarch64_opcode *opcode;
6073
6074 /* Need to check if the destination is SP/ZR. The check has to be done
6075 before any aarch64_replace_opcode. */
6076 int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
6077 int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
6078
6079 instr->operands[1].imm.value = value;
6080 instr->operands[1].skip = 0;
6081
6082 if (try_mov_wide_p)
6083 {
6084 /* Try the MOVZ alias. */
6085 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
6086 aarch64_replace_opcode (instr, opcode);
6087 if (aarch64_opcode_encode (instr->opcode, instr,
6088 &instr->value, NULL, NULL))
6089 {
6090 put_aarch64_insn (buf, instr->value);
6091 return;
6092 }
6093 /* Try the MOVK alias. */
6094 opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
6095 aarch64_replace_opcode (instr, opcode);
6096 if (aarch64_opcode_encode (instr->opcode, instr,
6097 &instr->value, NULL, NULL))
6098 {
6099 put_aarch64_insn (buf, instr->value);
6100 return;
6101 }
6102 }
6103
6104 if (try_mov_bitmask_p)
6105 {
6106 /* Try the ORR alias. */
6107 opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
6108 aarch64_replace_opcode (instr, opcode);
6109 if (aarch64_opcode_encode (instr->opcode, instr,
6110 &instr->value, NULL, NULL))
6111 {
6112 put_aarch64_insn (buf, instr->value);
6113 return;
6114 }
6115 }
6116
6117 as_bad_where (fixP->fx_file, fixP->fx_line,
6118 _("immediate cannot be moved by a single instruction"));
6119 }
6120
6121 /* An instruction operand which is immediate related may have symbol used
6122 in the assembly, e.g.
6123
6124 mov w0, u32
6125 .set u32, 0x00ffff00
6126
6127 At the time when the assembly instruction is parsed, a referenced symbol,
6128 like 'u32' in the above example may not have been seen; a fixS is created
6129 in such a case and is handled here after symbols have been resolved.
6130 Instruction is fixed up with VALUE using the information in *FIXP plus
6131 extra information in FLAGS.
6132
6133 This function is called by md_apply_fix to fix up instructions that need
6134 a fix-up described above but does not involve any linker-time relocation. */
6135
6136 static void
6137 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
6138 {
6139 int idx;
6140 uint32_t insn;
6141 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6142 enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
6143 aarch64_inst *new_inst = fixP->tc_fix_data.inst;
6144
6145 if (new_inst)
6146 {
6147 /* Now the instruction is about to be fixed-up, so the operand that
6148 was previously marked as 'ignored' needs to be unmarked in order
6149 to get the encoding done properly. */
6150 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6151 new_inst->operands[idx].skip = 0;
6152 }
6153
6154 gas_assert (opnd != AARCH64_OPND_NIL);
6155
6156 switch (opnd)
6157 {
6158 case AARCH64_OPND_EXCEPTION:
6159 if (unsigned_overflow (value, 16))
6160 as_bad_where (fixP->fx_file, fixP->fx_line,
6161 _("immediate out of range"));
6162 insn = get_aarch64_insn (buf);
6163 insn |= encode_svc_imm (value);
6164 put_aarch64_insn (buf, insn);
6165 break;
6166
6167 case AARCH64_OPND_AIMM:
6168 /* ADD or SUB with immediate.
6169 NOTE this assumes we come here with a add/sub shifted reg encoding
6170 3 322|2222|2 2 2 21111 111111
6171 1 098|7654|3 2 1 09876 543210 98765 43210
6172 0b000000 sf 000|1011|shift 0 Rm imm6 Rn Rd ADD
6173 2b000000 sf 010|1011|shift 0 Rm imm6 Rn Rd ADDS
6174 4b000000 sf 100|1011|shift 0 Rm imm6 Rn Rd SUB
6175 6b000000 sf 110|1011|shift 0 Rm imm6 Rn Rd SUBS
6176 ->
6177 3 322|2222|2 2 221111111111
6178 1 098|7654|3 2 109876543210 98765 43210
6179 11000000 sf 001|0001|shift imm12 Rn Rd ADD
6180 31000000 sf 011|0001|shift imm12 Rn Rd ADDS
6181 51000000 sf 101|0001|shift imm12 Rn Rd SUB
6182 71000000 sf 111|0001|shift imm12 Rn Rd SUBS
6183 Fields sf Rn Rd are already set. */
6184 insn = get_aarch64_insn (buf);
6185 if (value < 0)
6186 {
6187 /* Add <-> sub. */
6188 insn = reencode_addsub_switch_add_sub (insn);
6189 value = -value;
6190 }
6191
6192 if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
6193 && unsigned_overflow (value, 12))
6194 {
6195 /* Try to shift the value by 12 to make it fit. */
6196 if (((value >> 12) << 12) == value
6197 && ! unsigned_overflow (value, 12 + 12))
6198 {
6199 value >>= 12;
6200 insn |= encode_addsub_imm_shift_amount (1);
6201 }
6202 }
6203
6204 if (unsigned_overflow (value, 12))
6205 as_bad_where (fixP->fx_file, fixP->fx_line,
6206 _("immediate out of range"));
6207
6208 insn |= encode_addsub_imm (value);
6209
6210 put_aarch64_insn (buf, insn);
6211 break;
6212
6213 case AARCH64_OPND_SIMD_IMM:
6214 case AARCH64_OPND_SIMD_IMM_SFT:
6215 case AARCH64_OPND_LIMM:
6216 /* Bit mask immediate. */
6217 gas_assert (new_inst != NULL);
6218 idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
6219 new_inst->operands[idx].imm.value = value;
6220 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6221 &new_inst->value, NULL, NULL))
6222 put_aarch64_insn (buf, new_inst->value);
6223 else
6224 as_bad_where (fixP->fx_file, fixP->fx_line,
6225 _("invalid immediate"));
6226 break;
6227
6228 case AARCH64_OPND_HALF:
6229 /* 16-bit unsigned immediate. */
6230 if (unsigned_overflow (value, 16))
6231 as_bad_where (fixP->fx_file, fixP->fx_line,
6232 _("immediate out of range"));
6233 insn = get_aarch64_insn (buf);
6234 insn |= encode_movw_imm (value & 0xffff);
6235 put_aarch64_insn (buf, insn);
6236 break;
6237
6238 case AARCH64_OPND_IMM_MOV:
6239 /* Operand for a generic move immediate instruction, which is
6240 an alias instruction that generates a single MOVZ, MOVN or ORR
6241 instruction to loads a 32-bit/64-bit immediate value into general
6242 register. An assembler error shall result if the immediate cannot be
6243 created by a single one of these instructions. If there is a choice,
6244 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
6245 and MOVZ or MOVN to ORR. */
6246 gas_assert (new_inst != NULL);
6247 fix_mov_imm_insn (fixP, buf, new_inst, value);
6248 break;
6249
6250 case AARCH64_OPND_ADDR_SIMM7:
6251 case AARCH64_OPND_ADDR_SIMM9:
6252 case AARCH64_OPND_ADDR_SIMM9_2:
6253 case AARCH64_OPND_ADDR_UIMM12:
6254 /* Immediate offset in an address. */
6255 insn = get_aarch64_insn (buf);
6256
6257 gas_assert (new_inst != NULL && new_inst->value == insn);
6258 gas_assert (new_inst->opcode->operands[1] == opnd
6259 || new_inst->opcode->operands[2] == opnd);
6260
6261 /* Get the index of the address operand. */
6262 if (new_inst->opcode->operands[1] == opnd)
6263 /* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
6264 idx = 1;
6265 else
6266 /* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}]. */
6267 idx = 2;
6268
6269 /* Update the resolved offset value. */
6270 new_inst->operands[idx].addr.offset.imm = value;
6271
6272 /* Encode/fix-up. */
6273 if (aarch64_opcode_encode (new_inst->opcode, new_inst,
6274 &new_inst->value, NULL, NULL))
6275 {
6276 put_aarch64_insn (buf, new_inst->value);
6277 break;
6278 }
6279 else if (new_inst->opcode->iclass == ldst_pos
6280 && try_to_encode_as_unscaled_ldst (new_inst))
6281 {
6282 put_aarch64_insn (buf, new_inst->value);
6283 break;
6284 }
6285
6286 as_bad_where (fixP->fx_file, fixP->fx_line,
6287 _("immediate offset out of range"));
6288 break;
6289
6290 default:
6291 gas_assert (0);
6292 as_fatal (_("unhandled operand code %d"), opnd);
6293 }
6294 }
6295
6296 /* Apply a fixup (fixP) to segment data, once it has been determined
6297 by our caller that we have all the info we need to fix it up.
6298
6299 Parameter valP is the pointer to the value of the bits. */
6300
6301 void
6302 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
6303 {
6304 offsetT value = *valP;
6305 uint32_t insn;
6306 char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
6307 int scale;
6308 unsigned flags = fixP->fx_addnumber;
6309
6310 DEBUG_TRACE ("\n\n");
6311 DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
6312 DEBUG_TRACE ("Enter md_apply_fix");
6313
6314 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
6315
6316 /* Note whether this will delete the relocation. */
6317
6318 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
6319 fixP->fx_done = 1;
6320
6321 /* Process the relocations. */
6322 switch (fixP->fx_r_type)
6323 {
6324 case BFD_RELOC_NONE:
6325 /* This will need to go in the object file. */
6326 fixP->fx_done = 0;
6327 break;
6328
6329 case BFD_RELOC_8:
6330 case BFD_RELOC_8_PCREL:
6331 if (fixP->fx_done || !seg->use_rela_p)
6332 md_number_to_chars (buf, value, 1);
6333 break;
6334
6335 case BFD_RELOC_16:
6336 case BFD_RELOC_16_PCREL:
6337 if (fixP->fx_done || !seg->use_rela_p)
6338 md_number_to_chars (buf, value, 2);
6339 break;
6340
6341 case BFD_RELOC_32:
6342 case BFD_RELOC_32_PCREL:
6343 if (fixP->fx_done || !seg->use_rela_p)
6344 md_number_to_chars (buf, value, 4);
6345 break;
6346
6347 case BFD_RELOC_64:
6348 case BFD_RELOC_64_PCREL:
6349 if (fixP->fx_done || !seg->use_rela_p)
6350 md_number_to_chars (buf, value, 8);
6351 break;
6352
6353 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6354 /* We claim that these fixups have been processed here, even if
6355 in fact we generate an error because we do not have a reloc
6356 for them, so tc_gen_reloc() will reject them. */
6357 fixP->fx_done = 1;
6358 if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
6359 {
6360 as_bad_where (fixP->fx_file, fixP->fx_line,
6361 _("undefined symbol %s used as an immediate value"),
6362 S_GET_NAME (fixP->fx_addsy));
6363 goto apply_fix_return;
6364 }
6365 fix_insn (fixP, flags, value);
6366 break;
6367
6368 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
6369 if (fixP->fx_done || !seg->use_rela_p)
6370 {
6371 if (value & 3)
6372 as_bad_where (fixP->fx_file, fixP->fx_line,
6373 _("pc-relative load offset not word aligned"));
6374 if (signed_overflow (value, 21))
6375 as_bad_where (fixP->fx_file, fixP->fx_line,
6376 _("pc-relative load offset out of range"));
6377 insn = get_aarch64_insn (buf);
6378 insn |= encode_ld_lit_ofs_19 (value >> 2);
6379 put_aarch64_insn (buf, insn);
6380 }
6381 break;
6382
6383 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6384 if (fixP->fx_done || !seg->use_rela_p)
6385 {
6386 if (signed_overflow (value, 21))
6387 as_bad_where (fixP->fx_file, fixP->fx_line,
6388 _("pc-relative address offset out of range"));
6389 insn = get_aarch64_insn (buf);
6390 insn |= encode_adr_imm (value);
6391 put_aarch64_insn (buf, insn);
6392 }
6393 break;
6394
6395 case BFD_RELOC_AARCH64_BRANCH19:
6396 if (fixP->fx_done || !seg->use_rela_p)
6397 {
6398 if (value & 3)
6399 as_bad_where (fixP->fx_file, fixP->fx_line,
6400 _("conditional branch target not word aligned"));
6401 if (signed_overflow (value, 21))
6402 as_bad_where (fixP->fx_file, fixP->fx_line,
6403 _("conditional branch out of range"));
6404 insn = get_aarch64_insn (buf);
6405 insn |= encode_cond_branch_ofs_19 (value >> 2);
6406 put_aarch64_insn (buf, insn);
6407 }
6408 break;
6409
6410 case BFD_RELOC_AARCH64_TSTBR14:
6411 if (fixP->fx_done || !seg->use_rela_p)
6412 {
6413 if (value & 3)
6414 as_bad_where (fixP->fx_file, fixP->fx_line,
6415 _("conditional branch target not word aligned"));
6416 if (signed_overflow (value, 16))
6417 as_bad_where (fixP->fx_file, fixP->fx_line,
6418 _("conditional branch out of range"));
6419 insn = get_aarch64_insn (buf);
6420 insn |= encode_tst_branch_ofs_14 (value >> 2);
6421 put_aarch64_insn (buf, insn);
6422 }
6423 break;
6424
6425 case BFD_RELOC_AARCH64_JUMP26:
6426 case BFD_RELOC_AARCH64_CALL26:
6427 if (fixP->fx_done || !seg->use_rela_p)
6428 {
6429 if (value & 3)
6430 as_bad_where (fixP->fx_file, fixP->fx_line,
6431 _("branch target not word aligned"));
6432 if (signed_overflow (value, 28))
6433 as_bad_where (fixP->fx_file, fixP->fx_line,
6434 _("branch out of range"));
6435 insn = get_aarch64_insn (buf);
6436 insn |= encode_branch_ofs_26 (value >> 2);
6437 put_aarch64_insn (buf, insn);
6438 }
6439 break;
6440
6441 case BFD_RELOC_AARCH64_MOVW_G0:
6442 case BFD_RELOC_AARCH64_MOVW_G0_S:
6443 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6444 scale = 0;
6445 goto movw_common;
6446 case BFD_RELOC_AARCH64_MOVW_G1:
6447 case BFD_RELOC_AARCH64_MOVW_G1_S:
6448 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6449 scale = 16;
6450 goto movw_common;
6451 case BFD_RELOC_AARCH64_MOVW_G2:
6452 case BFD_RELOC_AARCH64_MOVW_G2_S:
6453 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6454 scale = 32;
6455 goto movw_common;
6456 case BFD_RELOC_AARCH64_MOVW_G3:
6457 scale = 48;
6458 movw_common:
6459 if (fixP->fx_done || !seg->use_rela_p)
6460 {
6461 insn = get_aarch64_insn (buf);
6462
6463 if (!fixP->fx_done)
6464 {
6465 /* REL signed addend must fit in 16 bits */
6466 if (signed_overflow (value, 16))
6467 as_bad_where (fixP->fx_file, fixP->fx_line,
6468 _("offset out of range"));
6469 }
6470 else
6471 {
6472 /* Check for overflow and scale. */
6473 switch (fixP->fx_r_type)
6474 {
6475 case BFD_RELOC_AARCH64_MOVW_G0:
6476 case BFD_RELOC_AARCH64_MOVW_G1:
6477 case BFD_RELOC_AARCH64_MOVW_G2:
6478 case BFD_RELOC_AARCH64_MOVW_G3:
6479 if (unsigned_overflow (value, scale + 16))
6480 as_bad_where (fixP->fx_file, fixP->fx_line,
6481 _("unsigned value out of range"));
6482 break;
6483 case BFD_RELOC_AARCH64_MOVW_G0_S:
6484 case BFD_RELOC_AARCH64_MOVW_G1_S:
6485 case BFD_RELOC_AARCH64_MOVW_G2_S:
6486 /* NOTE: We can only come here with movz or movn. */
6487 if (signed_overflow (value, scale + 16))
6488 as_bad_where (fixP->fx_file, fixP->fx_line,
6489 _("signed value out of range"));
6490 if (value < 0)
6491 {
6492 /* Force use of MOVN. */
6493 value = ~value;
6494 insn = reencode_movzn_to_movn (insn);
6495 }
6496 else
6497 {
6498 /* Force use of MOVZ. */
6499 insn = reencode_movzn_to_movz (insn);
6500 }
6501 break;
6502 default:
6503 /* Unchecked relocations. */
6504 break;
6505 }
6506 value >>= scale;
6507 }
6508
6509 /* Insert value into MOVN/MOVZ/MOVK instruction. */
6510 insn |= encode_movw_imm (value & 0xffff);
6511
6512 put_aarch64_insn (buf, insn);
6513 }
6514 break;
6515
6516 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6517 fixP->fx_r_type = (ilp32_p
6518 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
6519 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
6520 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6521 /* Should always be exported to object file, see
6522 aarch64_force_relocation(). */
6523 gas_assert (!fixP->fx_done);
6524 gas_assert (seg->use_rela_p);
6525 break;
6526
6527 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6528 fixP->fx_r_type = (ilp32_p
6529 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
6530 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
6531 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6532 /* Should always be exported to object file, see
6533 aarch64_force_relocation(). */
6534 gas_assert (!fixP->fx_done);
6535 gas_assert (seg->use_rela_p);
6536 break;
6537
6538 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6539 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6540 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6541 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6542 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6543 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6544 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6545 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6546 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6547 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6548 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6549 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6550 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6551 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6552 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6553 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6554 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6555 S_SET_THREAD_LOCAL (fixP->fx_addsy);
6556 /* Should always be exported to object file, see
6557 aarch64_force_relocation(). */
6558 gas_assert (!fixP->fx_done);
6559 gas_assert (seg->use_rela_p);
6560 break;
6561
6562 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6563 /* Should always be exported to object file, see
6564 aarch64_force_relocation(). */
6565 fixP->fx_r_type = (ilp32_p
6566 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
6567 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
6568 gas_assert (!fixP->fx_done);
6569 gas_assert (seg->use_rela_p);
6570 break;
6571
6572 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6573 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6574 case BFD_RELOC_AARCH64_ADD_LO12:
6575 case BFD_RELOC_AARCH64_LDST8_LO12:
6576 case BFD_RELOC_AARCH64_LDST16_LO12:
6577 case BFD_RELOC_AARCH64_LDST32_LO12:
6578 case BFD_RELOC_AARCH64_LDST64_LO12:
6579 case BFD_RELOC_AARCH64_LDST128_LO12:
6580 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6581 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6582 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6583 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6584 /* Should always be exported to object file, see
6585 aarch64_force_relocation(). */
6586 gas_assert (!fixP->fx_done);
6587 gas_assert (seg->use_rela_p);
6588 break;
6589
6590 case BFD_RELOC_AARCH64_TLSDESC_ADD:
6591 case BFD_RELOC_AARCH64_TLSDESC_LDR:
6592 case BFD_RELOC_AARCH64_TLSDESC_CALL:
6593 break;
6594
6595 default:
6596 as_bad_where (fixP->fx_file, fixP->fx_line,
6597 _("unexpected %s fixup"),
6598 bfd_get_reloc_code_name (fixP->fx_r_type));
6599 break;
6600 }
6601
6602 apply_fix_return:
6603 /* Free the allocated the struct aarch64_inst.
6604 N.B. currently there are very limited number of fix-up types actually use
6605 this field, so the impact on the performance should be minimal . */
6606 if (fixP->tc_fix_data.inst != NULL)
6607 free (fixP->tc_fix_data.inst);
6608
6609 return;
6610 }
6611
6612 /* Translate internal representation of relocation info to BFD target
6613 format. */
6614
6615 arelent *
6616 tc_gen_reloc (asection * section, fixS * fixp)
6617 {
6618 arelent *reloc;
6619 bfd_reloc_code_real_type code;
6620
6621 reloc = xmalloc (sizeof (arelent));
6622
6623 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
6624 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
6625 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
6626
6627 if (fixp->fx_pcrel)
6628 {
6629 if (section->use_rela_p)
6630 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
6631 else
6632 fixp->fx_offset = reloc->address;
6633 }
6634 reloc->addend = fixp->fx_offset;
6635
6636 code = fixp->fx_r_type;
6637 switch (code)
6638 {
6639 case BFD_RELOC_16:
6640 if (fixp->fx_pcrel)
6641 code = BFD_RELOC_16_PCREL;
6642 break;
6643
6644 case BFD_RELOC_32:
6645 if (fixp->fx_pcrel)
6646 code = BFD_RELOC_32_PCREL;
6647 break;
6648
6649 case BFD_RELOC_64:
6650 if (fixp->fx_pcrel)
6651 code = BFD_RELOC_64_PCREL;
6652 break;
6653
6654 default:
6655 break;
6656 }
6657
6658 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
6659 if (reloc->howto == NULL)
6660 {
6661 as_bad_where (fixp->fx_file, fixp->fx_line,
6662 _
6663 ("cannot represent %s relocation in this object file format"),
6664 bfd_get_reloc_code_name (code));
6665 return NULL;
6666 }
6667
6668 return reloc;
6669 }
6670
6671 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
6672
6673 void
6674 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
6675 {
6676 bfd_reloc_code_real_type type;
6677 int pcrel = 0;
6678
6679 /* Pick a reloc.
6680 FIXME: @@ Should look at CPU word size. */
6681 switch (size)
6682 {
6683 case 1:
6684 type = BFD_RELOC_8;
6685 break;
6686 case 2:
6687 type = BFD_RELOC_16;
6688 break;
6689 case 4:
6690 type = BFD_RELOC_32;
6691 break;
6692 case 8:
6693 type = BFD_RELOC_64;
6694 break;
6695 default:
6696 as_bad (_("cannot do %u-byte relocation"), size);
6697 type = BFD_RELOC_UNUSED;
6698 break;
6699 }
6700
6701 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
6702 }
6703
6704 int
6705 aarch64_force_relocation (struct fix *fixp)
6706 {
6707 switch (fixp->fx_r_type)
6708 {
6709 case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
6710 /* Perform these "immediate" internal relocations
6711 even if the symbol is extern or weak. */
6712 return 0;
6713
6714 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
6715 case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
6716 case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
6717 /* Pseudo relocs that need to be fixed up according to
6718 ilp32_p. */
6719 return 0;
6720
6721 case BFD_RELOC_AARCH64_ADD_LO12:
6722 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6723 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6724 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6725 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6726 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6727 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6728 case BFD_RELOC_AARCH64_LDST128_LO12:
6729 case BFD_RELOC_AARCH64_LDST16_LO12:
6730 case BFD_RELOC_AARCH64_LDST32_LO12:
6731 case BFD_RELOC_AARCH64_LDST64_LO12:
6732 case BFD_RELOC_AARCH64_LDST8_LO12:
6733 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6734 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6735 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6736 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6737 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6738 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6739 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6740 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6741 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6742 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6743 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6744 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6745 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6746 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6747 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6748 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6749 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6750 /* Always leave these relocations for the linker. */
6751 return 1;
6752
6753 default:
6754 break;
6755 }
6756
6757 return generic_force_reloc (fixp);
6758 }
6759
6760 #ifdef OBJ_ELF
6761
6762 const char *
6763 elf64_aarch64_target_format (void)
6764 {
6765 if (target_big_endian)
6766 return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
6767 else
6768 return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
6769 }
6770
6771 void
6772 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
6773 {
6774 elf_frob_symbol (symp, puntp);
6775 }
6776 #endif
6777
6778 /* MD interface: Finalization. */
6779
6780 /* A good place to do this, although this was probably not intended
6781 for this kind of use. We need to dump the literal pool before
6782 references are made to a null symbol pointer. */
6783
6784 void
6785 aarch64_cleanup (void)
6786 {
6787 literal_pool *pool;
6788
6789 for (pool = list_of_pools; pool; pool = pool->next)
6790 {
6791 /* Put it at the end of the relevant section. */
6792 subseg_set (pool->section, pool->sub_section);
6793 s_ltorg (0);
6794 }
6795 }
6796
6797 #ifdef OBJ_ELF
6798 /* Remove any excess mapping symbols generated for alignment frags in
6799 SEC. We may have created a mapping symbol before a zero byte
6800 alignment; remove it if there's a mapping symbol after the
6801 alignment. */
6802 static void
6803 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
6804 void *dummy ATTRIBUTE_UNUSED)
6805 {
6806 segment_info_type *seginfo = seg_info (sec);
6807 fragS *fragp;
6808
6809 if (seginfo == NULL || seginfo->frchainP == NULL)
6810 return;
6811
6812 for (fragp = seginfo->frchainP->frch_root;
6813 fragp != NULL; fragp = fragp->fr_next)
6814 {
6815 symbolS *sym = fragp->tc_frag_data.last_map;
6816 fragS *next = fragp->fr_next;
6817
6818 /* Variable-sized frags have been converted to fixed size by
6819 this point. But if this was variable-sized to start with,
6820 there will be a fixed-size frag after it. So don't handle
6821 next == NULL. */
6822 if (sym == NULL || next == NULL)
6823 continue;
6824
6825 if (S_GET_VALUE (sym) < next->fr_address)
6826 /* Not at the end of this frag. */
6827 continue;
6828 know (S_GET_VALUE (sym) == next->fr_address);
6829
6830 do
6831 {
6832 if (next->tc_frag_data.first_map != NULL)
6833 {
6834 /* Next frag starts with a mapping symbol. Discard this
6835 one. */
6836 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6837 break;
6838 }
6839
6840 if (next->fr_next == NULL)
6841 {
6842 /* This mapping symbol is at the end of the section. Discard
6843 it. */
6844 know (next->fr_fix == 0 && next->fr_var == 0);
6845 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
6846 break;
6847 }
6848
6849 /* As long as we have empty frags without any mapping symbols,
6850 keep looking. */
6851 /* If the next frag is non-empty and does not start with a
6852 mapping symbol, then this mapping symbol is required. */
6853 if (next->fr_address != next->fr_next->fr_address)
6854 break;
6855
6856 next = next->fr_next;
6857 }
6858 while (next != NULL);
6859 }
6860 }
6861 #endif
6862
6863 /* Adjust the symbol table. */
6864
6865 void
6866 aarch64_adjust_symtab (void)
6867 {
6868 #ifdef OBJ_ELF
6869 /* Remove any overlapping mapping symbols generated by alignment frags. */
6870 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
6871 /* Now do generic ELF adjustments. */
6872 elf_adjust_symtab ();
6873 #endif
6874 }
6875
6876 static void
6877 checked_hash_insert (struct hash_control *table, const char *key, void *value)
6878 {
6879 const char *hash_err;
6880
6881 hash_err = hash_insert (table, key, value);
6882 if (hash_err)
6883 printf ("Internal Error: Can't hash %s\n", key);
6884 }
6885
6886 static void
6887 fill_instruction_hash_table (void)
6888 {
6889 aarch64_opcode *opcode = aarch64_opcode_table;
6890
6891 while (opcode->name != NULL)
6892 {
6893 templates *templ, *new_templ;
6894 templ = hash_find (aarch64_ops_hsh, opcode->name);
6895
6896 new_templ = (templates *) xmalloc (sizeof (templates));
6897 new_templ->opcode = opcode;
6898 new_templ->next = NULL;
6899
6900 if (!templ)
6901 checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
6902 else
6903 {
6904 new_templ->next = templ->next;
6905 templ->next = new_templ;
6906 }
6907 ++opcode;
6908 }
6909 }
6910
6911 static inline void
6912 convert_to_upper (char *dst, const char *src, size_t num)
6913 {
6914 unsigned int i;
6915 for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
6916 *dst = TOUPPER (*src);
6917 *dst = '\0';
6918 }
6919
6920 /* Assume STR point to a lower-case string, allocate, convert and return
6921 the corresponding upper-case string. */
6922 static inline const char*
6923 get_upper_str (const char *str)
6924 {
6925 char *ret;
6926 size_t len = strlen (str);
6927 if ((ret = xmalloc (len + 1)) == NULL)
6928 abort ();
6929 convert_to_upper (ret, str, len);
6930 return ret;
6931 }
6932
6933 /* MD interface: Initialization. */
6934
6935 void
6936 md_begin (void)
6937 {
6938 unsigned mach;
6939 unsigned int i;
6940
6941 if ((aarch64_ops_hsh = hash_new ()) == NULL
6942 || (aarch64_cond_hsh = hash_new ()) == NULL
6943 || (aarch64_shift_hsh = hash_new ()) == NULL
6944 || (aarch64_sys_regs_hsh = hash_new ()) == NULL
6945 || (aarch64_pstatefield_hsh = hash_new ()) == NULL
6946 || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
6947 || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
6948 || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
6949 || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
6950 || (aarch64_reg_hsh = hash_new ()) == NULL
6951 || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
6952 || (aarch64_nzcv_hsh = hash_new ()) == NULL
6953 || (aarch64_pldop_hsh = hash_new ()) == NULL)
6954 as_fatal (_("virtual memory exhausted"));
6955
6956 fill_instruction_hash_table ();
6957
6958 for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
6959 checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
6960 (void *) (aarch64_sys_regs + i));
6961
6962 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
6963 checked_hash_insert (aarch64_pstatefield_hsh,
6964 aarch64_pstatefields[i].name,
6965 (void *) (aarch64_pstatefields + i));
6966
6967 for (i = 0; aarch64_sys_regs_ic[i].template != NULL; i++)
6968 checked_hash_insert (aarch64_sys_regs_ic_hsh,
6969 aarch64_sys_regs_ic[i].template,
6970 (void *) (aarch64_sys_regs_ic + i));
6971
6972 for (i = 0; aarch64_sys_regs_dc[i].template != NULL; i++)
6973 checked_hash_insert (aarch64_sys_regs_dc_hsh,
6974 aarch64_sys_regs_dc[i].template,
6975 (void *) (aarch64_sys_regs_dc + i));
6976
6977 for (i = 0; aarch64_sys_regs_at[i].template != NULL; i++)
6978 checked_hash_insert (aarch64_sys_regs_at_hsh,
6979 aarch64_sys_regs_at[i].template,
6980 (void *) (aarch64_sys_regs_at + i));
6981
6982 for (i = 0; aarch64_sys_regs_tlbi[i].template != NULL; i++)
6983 checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
6984 aarch64_sys_regs_tlbi[i].template,
6985 (void *) (aarch64_sys_regs_tlbi + i));
6986
6987 for (i = 0; i < ARRAY_SIZE (reg_names); i++)
6988 checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
6989 (void *) (reg_names + i));
6990
6991 for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
6992 checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
6993 (void *) (nzcv_names + i));
6994
6995 for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
6996 {
6997 const char *name = aarch64_operand_modifiers[i].name;
6998 checked_hash_insert (aarch64_shift_hsh, name,
6999 (void *) (aarch64_operand_modifiers + i));
7000 /* Also hash the name in the upper case. */
7001 checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
7002 (void *) (aarch64_operand_modifiers + i));
7003 }
7004
7005 for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
7006 {
7007 unsigned int j;
7008 /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
7009 the same condition code. */
7010 for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
7011 {
7012 const char *name = aarch64_conds[i].names[j];
7013 if (name == NULL)
7014 break;
7015 checked_hash_insert (aarch64_cond_hsh, name,
7016 (void *) (aarch64_conds + i));
7017 /* Also hash the name in the upper case. */
7018 checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
7019 (void *) (aarch64_conds + i));
7020 }
7021 }
7022
7023 for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
7024 {
7025 const char *name = aarch64_barrier_options[i].name;
7026 /* Skip xx00 - the unallocated values of option. */
7027 if ((i & 0x3) == 0)
7028 continue;
7029 checked_hash_insert (aarch64_barrier_opt_hsh, name,
7030 (void *) (aarch64_barrier_options + i));
7031 /* Also hash the name in the upper case. */
7032 checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
7033 (void *) (aarch64_barrier_options + i));
7034 }
7035
7036 for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
7037 {
7038 const char* name = aarch64_prfops[i].name;
7039 /* Skip the unallocated hint encodings. */
7040 if (name == NULL)
7041 continue;
7042 checked_hash_insert (aarch64_pldop_hsh, name,
7043 (void *) (aarch64_prfops + i));
7044 /* Also hash the name in the upper case. */
7045 checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
7046 (void *) (aarch64_prfops + i));
7047 }
7048
7049 /* Set the cpu variant based on the command-line options. */
7050 if (!mcpu_cpu_opt)
7051 mcpu_cpu_opt = march_cpu_opt;
7052
7053 if (!mcpu_cpu_opt)
7054 mcpu_cpu_opt = &cpu_default;
7055
7056 cpu_variant = *mcpu_cpu_opt;
7057
7058 /* Record the CPU type. */
7059 mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
7060
7061 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
7062 }
7063
7064 /* Command line processing. */
7065
7066 const char *md_shortopts = "m:";
7067
7068 #ifdef AARCH64_BI_ENDIAN
7069 #define OPTION_EB (OPTION_MD_BASE + 0)
7070 #define OPTION_EL (OPTION_MD_BASE + 1)
7071 #else
7072 #if TARGET_BYTES_BIG_ENDIAN
7073 #define OPTION_EB (OPTION_MD_BASE + 0)
7074 #else
7075 #define OPTION_EL (OPTION_MD_BASE + 1)
7076 #endif
7077 #endif
7078
7079 struct option md_longopts[] = {
7080 #ifdef OPTION_EB
7081 {"EB", no_argument, NULL, OPTION_EB},
7082 #endif
7083 #ifdef OPTION_EL
7084 {"EL", no_argument, NULL, OPTION_EL},
7085 #endif
7086 {NULL, no_argument, NULL, 0}
7087 };
7088
7089 size_t md_longopts_size = sizeof (md_longopts);
7090
7091 struct aarch64_option_table
7092 {
7093 char *option; /* Option name to match. */
7094 char *help; /* Help information. */
7095 int *var; /* Variable to change. */
7096 int value; /* What to change it to. */
7097 char *deprecated; /* If non-null, print this message. */
7098 };
7099
7100 static struct aarch64_option_table aarch64_opts[] = {
7101 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
7102 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
7103 NULL},
7104 #ifdef DEBUG_AARCH64
7105 {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
7106 #endif /* DEBUG_AARCH64 */
7107 {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
7108 NULL},
7109 {NULL, NULL, NULL, 0, NULL}
7110 };
7111
7112 struct aarch64_cpu_option_table
7113 {
7114 char *name;
7115 const aarch64_feature_set value;
7116 /* The canonical name of the CPU, or NULL to use NAME converted to upper
7117 case. */
7118 const char *canonical_name;
7119 };
7120
7121 /* This list should, at a minimum, contain all the cpu names
7122 recognized by GCC. */
7123 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
7124 {"all", AARCH64_ANY, NULL},
7125 {"cortex-a53", AARCH64_ARCH_V8, "Cortex-A53"},
7126 {"cortex-a57", AARCH64_ARCH_V8, "Cortex-A57"},
7127 {"generic", AARCH64_ARCH_V8, NULL},
7128
7129 /* These two are example CPUs supported in GCC, once we have real
7130 CPUs they will be removed. */
7131 {"example-1", AARCH64_ARCH_V8, NULL},
7132 {"example-2", AARCH64_ARCH_V8, NULL},
7133
7134 {NULL, AARCH64_ARCH_NONE, NULL}
7135 };
7136
7137 struct aarch64_arch_option_table
7138 {
7139 char *name;
7140 const aarch64_feature_set value;
7141 };
7142
7143 /* This list should, at a minimum, contain all the architecture names
7144 recognized by GCC. */
7145 static const struct aarch64_arch_option_table aarch64_archs[] = {
7146 {"all", AARCH64_ANY},
7147 {"armv8-a", AARCH64_ARCH_V8},
7148 {NULL, AARCH64_ARCH_NONE}
7149 };
7150
7151 /* ISA extensions. */
7152 struct aarch64_option_cpu_value_table
7153 {
7154 char *name;
7155 const aarch64_feature_set value;
7156 };
7157
7158 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
7159 {"crc", AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0)},
7160 {"crypto", AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0)},
7161 {"fp", AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
7162 {"simd", AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
7163 {NULL, AARCH64_ARCH_NONE}
7164 };
7165
7166 struct aarch64_long_option_table
7167 {
7168 char *option; /* Substring to match. */
7169 char *help; /* Help information. */
7170 int (*func) (char *subopt); /* Function to decode sub-option. */
7171 char *deprecated; /* If non-null, print this message. */
7172 };
7173
7174 static int
7175 aarch64_parse_features (char *str, const aarch64_feature_set **opt_p)
7176 {
7177 /* We insist on extensions being added before being removed. We achieve
7178 this by using the ADDING_VALUE variable to indicate whether we are
7179 adding an extension (1) or removing it (0) and only allowing it to
7180 change in the order -1 -> 1 -> 0. */
7181 int adding_value = -1;
7182 aarch64_feature_set *ext_set = xmalloc (sizeof (aarch64_feature_set));
7183
7184 /* Copy the feature set, so that we can modify it. */
7185 *ext_set = **opt_p;
7186 *opt_p = ext_set;
7187
7188 while (str != NULL && *str != 0)
7189 {
7190 const struct aarch64_option_cpu_value_table *opt;
7191 char *ext;
7192 int optlen;
7193
7194 if (*str != '+')
7195 {
7196 as_bad (_("invalid architectural extension"));
7197 return 0;
7198 }
7199
7200 str++;
7201 ext = strchr (str, '+');
7202
7203 if (ext != NULL)
7204 optlen = ext - str;
7205 else
7206 optlen = strlen (str);
7207
7208 if (optlen >= 2 && strncmp (str, "no", 2) == 0)
7209 {
7210 if (adding_value != 0)
7211 adding_value = 0;
7212 optlen -= 2;
7213 str += 2;
7214 }
7215 else if (optlen > 0)
7216 {
7217 if (adding_value == -1)
7218 adding_value = 1;
7219 else if (adding_value != 1)
7220 {
7221 as_bad (_("must specify extensions to add before specifying "
7222 "those to remove"));
7223 return FALSE;
7224 }
7225 }
7226
7227 if (optlen == 0)
7228 {
7229 as_bad (_("missing architectural extension"));
7230 return 0;
7231 }
7232
7233 gas_assert (adding_value != -1);
7234
7235 for (opt = aarch64_features; opt->name != NULL; opt++)
7236 if (strncmp (opt->name, str, optlen) == 0)
7237 {
7238 /* Add or remove the extension. */
7239 if (adding_value)
7240 AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
7241 else
7242 AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
7243 break;
7244 }
7245
7246 if (opt->name == NULL)
7247 {
7248 as_bad (_("unknown architectural extension `%s'"), str);
7249 return 0;
7250 }
7251
7252 str = ext;
7253 };
7254
7255 return 1;
7256 }
7257
7258 static int
7259 aarch64_parse_cpu (char *str)
7260 {
7261 const struct aarch64_cpu_option_table *opt;
7262 char *ext = strchr (str, '+');
7263 size_t optlen;
7264
7265 if (ext != NULL)
7266 optlen = ext - str;
7267 else
7268 optlen = strlen (str);
7269
7270 if (optlen == 0)
7271 {
7272 as_bad (_("missing cpu name `%s'"), str);
7273 return 0;
7274 }
7275
7276 for (opt = aarch64_cpus; opt->name != NULL; opt++)
7277 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7278 {
7279 mcpu_cpu_opt = &opt->value;
7280 if (ext != NULL)
7281 return aarch64_parse_features (ext, &mcpu_cpu_opt);
7282
7283 return 1;
7284 }
7285
7286 as_bad (_("unknown cpu `%s'"), str);
7287 return 0;
7288 }
7289
7290 static int
7291 aarch64_parse_arch (char *str)
7292 {
7293 const struct aarch64_arch_option_table *opt;
7294 char *ext = strchr (str, '+');
7295 size_t optlen;
7296
7297 if (ext != NULL)
7298 optlen = ext - str;
7299 else
7300 optlen = strlen (str);
7301
7302 if (optlen == 0)
7303 {
7304 as_bad (_("missing architecture name `%s'"), str);
7305 return 0;
7306 }
7307
7308 for (opt = aarch64_archs; opt->name != NULL; opt++)
7309 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7310 {
7311 march_cpu_opt = &opt->value;
7312 if (ext != NULL)
7313 return aarch64_parse_features (ext, &march_cpu_opt);
7314
7315 return 1;
7316 }
7317
7318 as_bad (_("unknown architecture `%s'\n"), str);
7319 return 0;
7320 }
7321
7322 /* ABIs. */
7323 struct aarch64_option_abi_value_table
7324 {
7325 char *name;
7326 enum aarch64_abi_type value;
7327 };
7328
7329 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
7330 {"ilp32", AARCH64_ABI_ILP32},
7331 {"lp64", AARCH64_ABI_LP64},
7332 {NULL, 0}
7333 };
7334
7335 static int
7336 aarch64_parse_abi (char *str)
7337 {
7338 const struct aarch64_option_abi_value_table *opt;
7339 size_t optlen = strlen (str);
7340
7341 if (optlen == 0)
7342 {
7343 as_bad (_("missing abi name `%s'"), str);
7344 return 0;
7345 }
7346
7347 for (opt = aarch64_abis; opt->name != NULL; opt++)
7348 if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
7349 {
7350 aarch64_abi = opt->value;
7351 return 1;
7352 }
7353
7354 as_bad (_("unknown abi `%s'\n"), str);
7355 return 0;
7356 }
7357
7358 static struct aarch64_long_option_table aarch64_long_opts[] = {
7359 #ifdef OBJ_ELF
7360 {"mabi=", N_("<abi name>\t specify for ABI <abi name>"),
7361 aarch64_parse_abi, NULL},
7362 #endif /* OBJ_ELF */
7363 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
7364 aarch64_parse_cpu, NULL},
7365 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
7366 aarch64_parse_arch, NULL},
7367 {NULL, NULL, 0, NULL}
7368 };
7369
7370 int
7371 md_parse_option (int c, char *arg)
7372 {
7373 struct aarch64_option_table *opt;
7374 struct aarch64_long_option_table *lopt;
7375
7376 switch (c)
7377 {
7378 #ifdef OPTION_EB
7379 case OPTION_EB:
7380 target_big_endian = 1;
7381 break;
7382 #endif
7383
7384 #ifdef OPTION_EL
7385 case OPTION_EL:
7386 target_big_endian = 0;
7387 break;
7388 #endif
7389
7390 case 'a':
7391 /* Listing option. Just ignore these, we don't support additional
7392 ones. */
7393 return 0;
7394
7395 default:
7396 for (opt = aarch64_opts; opt->option != NULL; opt++)
7397 {
7398 if (c == opt->option[0]
7399 && ((arg == NULL && opt->option[1] == 0)
7400 || streq (arg, opt->option + 1)))
7401 {
7402 /* If the option is deprecated, tell the user. */
7403 if (opt->deprecated != NULL)
7404 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
7405 arg ? arg : "", _(opt->deprecated));
7406
7407 if (opt->var != NULL)
7408 *opt->var = opt->value;
7409
7410 return 1;
7411 }
7412 }
7413
7414 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7415 {
7416 /* These options are expected to have an argument. */
7417 if (c == lopt->option[0]
7418 && arg != NULL
7419 && strncmp (arg, lopt->option + 1,
7420 strlen (lopt->option + 1)) == 0)
7421 {
7422 /* If the option is deprecated, tell the user. */
7423 if (lopt->deprecated != NULL)
7424 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
7425 _(lopt->deprecated));
7426
7427 /* Call the sup-option parser. */
7428 return lopt->func (arg + strlen (lopt->option) - 1);
7429 }
7430 }
7431
7432 return 0;
7433 }
7434
7435 return 1;
7436 }
7437
7438 void
7439 md_show_usage (FILE * fp)
7440 {
7441 struct aarch64_option_table *opt;
7442 struct aarch64_long_option_table *lopt;
7443
7444 fprintf (fp, _(" AArch64-specific assembler options:\n"));
7445
7446 for (opt = aarch64_opts; opt->option != NULL; opt++)
7447 if (opt->help != NULL)
7448 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
7449
7450 for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
7451 if (lopt->help != NULL)
7452 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
7453
7454 #ifdef OPTION_EB
7455 fprintf (fp, _("\
7456 -EB assemble code for a big-endian cpu\n"));
7457 #endif
7458
7459 #ifdef OPTION_EL
7460 fprintf (fp, _("\
7461 -EL assemble code for a little-endian cpu\n"));
7462 #endif
7463 }
7464
7465 /* Parse a .cpu directive. */
7466
7467 static void
7468 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
7469 {
7470 const struct aarch64_cpu_option_table *opt;
7471 char saved_char;
7472 char *name;
7473 char *ext;
7474 size_t optlen;
7475
7476 name = input_line_pointer;
7477 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7478 input_line_pointer++;
7479 saved_char = *input_line_pointer;
7480 *input_line_pointer = 0;
7481
7482 ext = strchr (name, '+');
7483
7484 if (ext != NULL)
7485 optlen = ext - name;
7486 else
7487 optlen = strlen (name);
7488
7489 /* Skip the first "all" entry. */
7490 for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
7491 if (strlen (opt->name) == optlen
7492 && strncmp (name, opt->name, optlen) == 0)
7493 {
7494 mcpu_cpu_opt = &opt->value;
7495 if (ext != NULL)
7496 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7497 return;
7498
7499 cpu_variant = *mcpu_cpu_opt;
7500
7501 *input_line_pointer = saved_char;
7502 demand_empty_rest_of_line ();
7503 return;
7504 }
7505 as_bad (_("unknown cpu `%s'"), name);
7506 *input_line_pointer = saved_char;
7507 ignore_rest_of_line ();
7508 }
7509
7510
7511 /* Parse a .arch directive. */
7512
7513 static void
7514 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
7515 {
7516 const struct aarch64_arch_option_table *opt;
7517 char saved_char;
7518 char *name;
7519 char *ext;
7520 size_t optlen;
7521
7522 name = input_line_pointer;
7523 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
7524 input_line_pointer++;
7525 saved_char = *input_line_pointer;
7526 *input_line_pointer = 0;
7527
7528 ext = strchr (name, '+');
7529
7530 if (ext != NULL)
7531 optlen = ext - name;
7532 else
7533 optlen = strlen (name);
7534
7535 /* Skip the first "all" entry. */
7536 for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
7537 if (strlen (opt->name) == optlen
7538 && strncmp (name, opt->name, optlen) == 0)
7539 {
7540 mcpu_cpu_opt = &opt->value;
7541 if (ext != NULL)
7542 if (!aarch64_parse_features (ext, &mcpu_cpu_opt))
7543 return;
7544
7545 cpu_variant = *mcpu_cpu_opt;
7546
7547 *input_line_pointer = saved_char;
7548 demand_empty_rest_of_line ();
7549 return;
7550 }
7551
7552 as_bad (_("unknown architecture `%s'\n"), name);
7553 *input_line_pointer = saved_char;
7554 ignore_rest_of_line ();
7555 }
7556
7557 /* Copy symbol information. */
7558
7559 void
7560 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
7561 {
7562 AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
7563 }
This page took 0.191918 seconds and 4 git commands to generate.