Make psymtab range adapter a method on objfile
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define INSNLEN 4
30
31 /* Cached mapping symbol state. */
32 enum map_type
33 {
34 MAP_INSN,
35 MAP_DATA
36 };
37
38 static enum map_type last_type;
39 static int last_mapping_sym = -1;
40 static bfd_vma last_mapping_addr = 0;
41
42 /* Other options */
43 static int no_aliases = 0; /* If set disassemble as most general inst. */
44 \fstatic int no_notes = 1; /* If set do not print disassemble notes in the
45 output as comments. */
46
47 /* Currently active instruction sequence. */
48 static aarch64_instr_sequence insn_sequence;
49
50 static void
51 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
52 {
53 }
54
55 static void
56 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
57 {
58 /* Try to match options that are simple flags */
59 if (CONST_STRNEQ (option, "no-aliases"))
60 {
61 no_aliases = 1;
62 return;
63 }
64
65 if (CONST_STRNEQ (option, "aliases"))
66 {
67 no_aliases = 0;
68 return;
69 }
70
71 if (CONST_STRNEQ (option, "no-notes"))
72 {
73 no_notes = 1;
74 return;
75 }
76
77 if (CONST_STRNEQ (option, "notes"))
78 {
79 no_notes = 0;
80 return;
81 }
82
83 #ifdef DEBUG_AARCH64
84 if (CONST_STRNEQ (option, "debug_dump"))
85 {
86 debug_dump = 1;
87 return;
88 }
89 #endif /* DEBUG_AARCH64 */
90
91 /* Invalid option. */
92 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
93 }
94
95 static void
96 parse_aarch64_dis_options (const char *options)
97 {
98 const char *option_end;
99
100 if (options == NULL)
101 return;
102
103 while (*options != '\0')
104 {
105 /* Skip empty options. */
106 if (*options == ',')
107 {
108 options++;
109 continue;
110 }
111
112 /* We know that *options is neither NUL or a comma. */
113 option_end = options + 1;
114 while (*option_end != ',' && *option_end != '\0')
115 option_end++;
116
117 parse_aarch64_dis_option (options, option_end - options);
118
119 /* Go on to the next one. If option_end points to a comma, it
120 will be skipped above. */
121 options = option_end;
122 }
123 }
124 \f
125 /* Functions doing the instruction disassembling. */
126
127 /* The unnamed arguments consist of the number of fields and information about
128 these fields where the VALUE will be extracted from CODE and returned.
129 MASK can be zero or the base mask of the opcode.
130
131 N.B. the fields are required to be in such an order than the most signficant
132 field for VALUE comes the first, e.g. the <index> in
133 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
134 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
135 the order of H, L, M. */
136
137 aarch64_insn
138 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
139 {
140 uint32_t num;
141 const aarch64_field *field;
142 enum aarch64_field_kind kind;
143 va_list va;
144
145 va_start (va, mask);
146 num = va_arg (va, uint32_t);
147 assert (num <= 5);
148 aarch64_insn value = 0x0;
149 while (num--)
150 {
151 kind = va_arg (va, enum aarch64_field_kind);
152 field = &fields[kind];
153 value <<= field->width;
154 value |= extract_field (kind, code, mask);
155 }
156 return value;
157 }
158
159 /* Extract the value of all fields in SELF->fields from instruction CODE.
160 The least significant bit comes from the final field. */
161
162 static aarch64_insn
163 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
164 {
165 aarch64_insn value;
166 unsigned int i;
167 enum aarch64_field_kind kind;
168
169 value = 0;
170 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
171 {
172 kind = self->fields[i];
173 value <<= fields[kind].width;
174 value |= extract_field (kind, code, 0);
175 }
176 return value;
177 }
178
179 /* Sign-extend bit I of VALUE. */
180 static inline int32_t
181 sign_extend (aarch64_insn value, unsigned i)
182 {
183 uint32_t ret = value;
184
185 assert (i < 32);
186 if ((value >> i) & 0x1)
187 {
188 uint32_t val = (uint32_t)(-1) << i;
189 ret = ret | val;
190 }
191 return (int32_t) ret;
192 }
193
194 /* N.B. the following inline helpfer functions create a dependency on the
195 order of operand qualifier enumerators. */
196
197 /* Given VALUE, return qualifier for a general purpose register. */
198 static inline enum aarch64_opnd_qualifier
199 get_greg_qualifier_from_value (aarch64_insn value)
200 {
201 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
202 assert (value <= 0x1
203 && aarch64_get_qualifier_standard_value (qualifier) == value);
204 return qualifier;
205 }
206
207 /* Given VALUE, return qualifier for a vector register. This does not support
208 decoding instructions that accept the 2H vector type. */
209
210 static inline enum aarch64_opnd_qualifier
211 get_vreg_qualifier_from_value (aarch64_insn value)
212 {
213 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
214
215 /* Instructions using vector type 2H should not call this function. Skip over
216 the 2H qualifier. */
217 if (qualifier >= AARCH64_OPND_QLF_V_2H)
218 qualifier += 1;
219
220 assert (value <= 0x8
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
226 static inline enum aarch64_opnd_qualifier
227 get_sreg_qualifier_from_value (aarch64_insn value)
228 {
229 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
230
231 assert (value <= 0x4
232 && aarch64_get_qualifier_standard_value (qualifier) == value);
233 return qualifier;
234 }
235
236 /* Given the instruction in *INST which is probably half way through the
237 decoding and our caller wants to know the expected qualifier for operand
238 I. Return such a qualifier if we can establish it; otherwise return
239 AARCH64_OPND_QLF_NIL. */
240
241 static aarch64_opnd_qualifier_t
242 get_expected_qualifier (const aarch64_inst *inst, int i)
243 {
244 aarch64_opnd_qualifier_seq_t qualifiers;
245 /* Should not be called if the qualifier is known. */
246 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
247 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
248 i, qualifiers))
249 return qualifiers[i];
250 else
251 return AARCH64_OPND_QLF_NIL;
252 }
253
254 /* Operand extractors. */
255
256 bfd_boolean
257 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
258 const aarch64_insn code,
259 const aarch64_inst *inst ATTRIBUTE_UNUSED,
260 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
261 {
262 info->reg.regno = extract_field (self->fields[0], code, 0);
263 return TRUE;
264 }
265
266 bfd_boolean
267 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
268 const aarch64_insn code ATTRIBUTE_UNUSED,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED,
270 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
271 {
272 assert (info->idx == 1
273 || info->idx ==3);
274 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
275 return TRUE;
276 }
277
278 /* e.g. IC <ic_op>{, <Xt>}. */
279 bfd_boolean
280 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
281 const aarch64_insn code,
282 const aarch64_inst *inst ATTRIBUTE_UNUSED,
283 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
284 {
285 info->reg.regno = extract_field (self->fields[0], code, 0);
286 assert (info->idx == 1
287 && (aarch64_get_operand_class (inst->operands[0].type)
288 == AARCH64_OPND_CLASS_SYSTEM));
289 /* This will make the constraint checking happy and more importantly will
290 help the disassembler determine whether this operand is optional or
291 not. */
292 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
293
294 return TRUE;
295 }
296
297 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
298 bfd_boolean
299 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
300 const aarch64_insn code,
301 const aarch64_inst *inst ATTRIBUTE_UNUSED,
302 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
303 {
304 /* regno */
305 info->reglane.regno = extract_field (self->fields[0], code,
306 inst->opcode->mask);
307
308 /* Index and/or type. */
309 if (inst->opcode->iclass == asisdone
310 || inst->opcode->iclass == asimdins)
311 {
312 if (info->type == AARCH64_OPND_En
313 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
314 {
315 unsigned shift;
316 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
317 assert (info->idx == 1); /* Vn */
318 aarch64_insn value = extract_field (FLD_imm4, code, 0);
319 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
320 info->qualifier = get_expected_qualifier (inst, info->idx);
321 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
322 info->reglane.index = value >> shift;
323 }
324 else
325 {
326 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
327 imm5<3:0> <V>
328 0000 RESERVED
329 xxx1 B
330 xx10 H
331 x100 S
332 1000 D */
333 int pos = -1;
334 aarch64_insn value = extract_field (FLD_imm5, code, 0);
335 while (++pos <= 3 && (value & 0x1) == 0)
336 value >>= 1;
337 if (pos > 3)
338 return FALSE;
339 info->qualifier = get_sreg_qualifier_from_value (pos);
340 info->reglane.index = (unsigned) (value >> 1);
341 }
342 }
343 else if (inst->opcode->iclass == dotproduct)
344 {
345 /* Need information in other operand(s) to help decoding. */
346 info->qualifier = get_expected_qualifier (inst, info->idx);
347 switch (info->qualifier)
348 {
349 case AARCH64_OPND_QLF_S_4B:
350 /* L:H */
351 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
352 info->reglane.regno &= 0x1f;
353 break;
354 default:
355 return FALSE;
356 }
357 }
358 else if (inst->opcode->iclass == cryptosm3)
359 {
360 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
361 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
362 }
363 else
364 {
365 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
366 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
367
368 /* Need information in other operand(s) to help decoding. */
369 info->qualifier = get_expected_qualifier (inst, info->idx);
370 switch (info->qualifier)
371 {
372 case AARCH64_OPND_QLF_S_H:
373 if (info->type == AARCH64_OPND_Em16)
374 {
375 /* h:l:m */
376 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
377 FLD_M);
378 info->reglane.regno &= 0xf;
379 }
380 else
381 {
382 /* h:l */
383 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
384 }
385 break;
386 case AARCH64_OPND_QLF_S_S:
387 /* h:l */
388 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
389 break;
390 case AARCH64_OPND_QLF_S_D:
391 /* H */
392 info->reglane.index = extract_field (FLD_H, code, 0);
393 break;
394 default:
395 return FALSE;
396 }
397
398 if (inst->opcode->op == OP_FCMLA_ELEM
399 && info->qualifier != AARCH64_OPND_QLF_S_H)
400 {
401 /* Complex operand takes two elements. */
402 if (info->reglane.index & 1)
403 return FALSE;
404 info->reglane.index /= 2;
405 }
406 }
407
408 return TRUE;
409 }
410
411 bfd_boolean
412 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
413 const aarch64_insn code,
414 const aarch64_inst *inst ATTRIBUTE_UNUSED,
415 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
416 {
417 /* R */
418 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
419 /* len */
420 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
421 return TRUE;
422 }
423
424 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
425 bfd_boolean
426 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
427 aarch64_opnd_info *info, const aarch64_insn code,
428 const aarch64_inst *inst,
429 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
430 {
431 aarch64_insn value;
432 /* Number of elements in each structure to be loaded/stored. */
433 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
434
435 struct
436 {
437 unsigned is_reserved;
438 unsigned num_regs;
439 unsigned num_elements;
440 } data [] =
441 { {0, 4, 4},
442 {1, 4, 4},
443 {0, 4, 1},
444 {0, 4, 2},
445 {0, 3, 3},
446 {1, 3, 3},
447 {0, 3, 1},
448 {0, 1, 1},
449 {0, 2, 2},
450 {1, 2, 2},
451 {0, 2, 1},
452 };
453
454 /* Rt */
455 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
456 /* opcode */
457 value = extract_field (FLD_opcode, code, 0);
458 /* PR 21595: Check for a bogus value. */
459 if (value >= ARRAY_SIZE (data))
460 return FALSE;
461 if (expected_num != data[value].num_elements || data[value].is_reserved)
462 return FALSE;
463 info->reglist.num_regs = data[value].num_regs;
464
465 return TRUE;
466 }
467
468 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
469 lanes instructions. */
470 bfd_boolean
471 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst,
474 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
475 {
476 aarch64_insn value;
477
478 /* Rt */
479 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
480 /* S */
481 value = extract_field (FLD_S, code, 0);
482
483 /* Number of registers is equal to the number of elements in
484 each structure to be loaded/stored. */
485 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
486 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
487
488 /* Except when it is LD1R. */
489 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
490 info->reglist.num_regs = 2;
491
492 return TRUE;
493 }
494
495 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
496 load/store single element instructions. */
497 bfd_boolean
498 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
499 aarch64_opnd_info *info, const aarch64_insn code,
500 const aarch64_inst *inst ATTRIBUTE_UNUSED,
501 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
502 {
503 aarch64_field field = {0, 0};
504 aarch64_insn QSsize; /* fields Q:S:size. */
505 aarch64_insn opcodeh2; /* opcode<2:1> */
506
507 /* Rt */
508 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
509
510 /* Decode the index, opcode<2:1> and size. */
511 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
512 opcodeh2 = extract_field_2 (&field, code, 0);
513 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
514 switch (opcodeh2)
515 {
516 case 0x0:
517 info->qualifier = AARCH64_OPND_QLF_S_B;
518 /* Index encoded in "Q:S:size". */
519 info->reglist.index = QSsize;
520 break;
521 case 0x1:
522 if (QSsize & 0x1)
523 /* UND. */
524 return FALSE;
525 info->qualifier = AARCH64_OPND_QLF_S_H;
526 /* Index encoded in "Q:S:size<1>". */
527 info->reglist.index = QSsize >> 1;
528 break;
529 case 0x2:
530 if ((QSsize >> 1) & 0x1)
531 /* UND. */
532 return FALSE;
533 if ((QSsize & 0x1) == 0)
534 {
535 info->qualifier = AARCH64_OPND_QLF_S_S;
536 /* Index encoded in "Q:S". */
537 info->reglist.index = QSsize >> 2;
538 }
539 else
540 {
541 if (extract_field (FLD_S, code, 0))
542 /* UND */
543 return FALSE;
544 info->qualifier = AARCH64_OPND_QLF_S_D;
545 /* Index encoded in "Q". */
546 info->reglist.index = QSsize >> 3;
547 }
548 break;
549 default:
550 return FALSE;
551 }
552
553 info->reglist.has_index = 1;
554 info->reglist.num_regs = 0;
555 /* Number of registers is equal to the number of elements in
556 each structure to be loaded/stored. */
557 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
558 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
559
560 return TRUE;
561 }
562
563 /* Decode fields immh:immb and/or Q for e.g.
564 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
565 or SSHR <V><d>, <V><n>, #<shift>. */
566
567 bfd_boolean
568 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
569 aarch64_opnd_info *info, const aarch64_insn code,
570 const aarch64_inst *inst,
571 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
572 {
573 int pos;
574 aarch64_insn Q, imm, immh;
575 enum aarch64_insn_class iclass = inst->opcode->iclass;
576
577 immh = extract_field (FLD_immh, code, 0);
578 if (immh == 0)
579 return FALSE;
580 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
581 pos = 4;
582 /* Get highest set bit in immh. */
583 while (--pos >= 0 && (immh & 0x8) == 0)
584 immh <<= 1;
585
586 assert ((iclass == asimdshf || iclass == asisdshf)
587 && (info->type == AARCH64_OPND_IMM_VLSR
588 || info->type == AARCH64_OPND_IMM_VLSL));
589
590 if (iclass == asimdshf)
591 {
592 Q = extract_field (FLD_Q, code, 0);
593 /* immh Q <T>
594 0000 x SEE AdvSIMD modified immediate
595 0001 0 8B
596 0001 1 16B
597 001x 0 4H
598 001x 1 8H
599 01xx 0 2S
600 01xx 1 4S
601 1xxx 0 RESERVED
602 1xxx 1 2D */
603 info->qualifier =
604 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
605 }
606 else
607 info->qualifier = get_sreg_qualifier_from_value (pos);
608
609 if (info->type == AARCH64_OPND_IMM_VLSR)
610 /* immh <shift>
611 0000 SEE AdvSIMD modified immediate
612 0001 (16-UInt(immh:immb))
613 001x (32-UInt(immh:immb))
614 01xx (64-UInt(immh:immb))
615 1xxx (128-UInt(immh:immb)) */
616 info->imm.value = (16 << pos) - imm;
617 else
618 /* immh:immb
619 immh <shift>
620 0000 SEE AdvSIMD modified immediate
621 0001 (UInt(immh:immb)-8)
622 001x (UInt(immh:immb)-16)
623 01xx (UInt(immh:immb)-32)
624 1xxx (UInt(immh:immb)-64) */
625 info->imm.value = imm - (8 << pos);
626
627 return TRUE;
628 }
629
630 /* Decode shift immediate for e.g. sshr (imm). */
631 bfd_boolean
632 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
633 aarch64_opnd_info *info, const aarch64_insn code,
634 const aarch64_inst *inst ATTRIBUTE_UNUSED,
635 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
636 {
637 int64_t imm;
638 aarch64_insn val;
639 val = extract_field (FLD_size, code, 0);
640 switch (val)
641 {
642 case 0: imm = 8; break;
643 case 1: imm = 16; break;
644 case 2: imm = 32; break;
645 default: return FALSE;
646 }
647 info->imm.value = imm;
648 return TRUE;
649 }
650
651 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
652 value in the field(s) will be extracted as unsigned immediate value. */
653 bfd_boolean
654 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
655 const aarch64_insn code,
656 const aarch64_inst *inst ATTRIBUTE_UNUSED,
657 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
658 {
659 int64_t imm;
660
661 imm = extract_all_fields (self, code);
662
663 if (operand_need_sign_extension (self))
664 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
665
666 if (operand_need_shift_by_two (self))
667 imm <<= 2;
668 else if (operand_need_shift_by_four (self))
669 imm <<= 4;
670
671 if (info->type == AARCH64_OPND_ADDR_ADRP)
672 imm <<= 12;
673
674 info->imm.value = imm;
675 return TRUE;
676 }
677
678 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
679 bfd_boolean
680 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
681 const aarch64_insn code,
682 const aarch64_inst *inst ATTRIBUTE_UNUSED,
683 aarch64_operand_error *errors)
684 {
685 aarch64_ext_imm (self, info, code, inst, errors);
686 info->shifter.kind = AARCH64_MOD_LSL;
687 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
688 return TRUE;
689 }
690
691 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
692 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
693 bfd_boolean
694 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
695 aarch64_opnd_info *info,
696 const aarch64_insn code,
697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
699 {
700 uint64_t imm;
701 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
702 aarch64_field field = {0, 0};
703
704 assert (info->idx == 1);
705
706 if (info->type == AARCH64_OPND_SIMD_FPIMM)
707 info->imm.is_fp = 1;
708
709 /* a:b:c:d:e:f:g:h */
710 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
711 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
712 {
713 /* Either MOVI <Dd>, #<imm>
714 or MOVI <Vd>.2D, #<imm>.
715 <imm> is a 64-bit immediate
716 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
717 encoded in "a:b:c:d:e:f:g:h". */
718 int i;
719 unsigned abcdefgh = imm;
720 for (imm = 0ull, i = 0; i < 8; i++)
721 if (((abcdefgh >> i) & 0x1) != 0)
722 imm |= 0xffull << (8 * i);
723 }
724 info->imm.value = imm;
725
726 /* cmode */
727 info->qualifier = get_expected_qualifier (inst, info->idx);
728 switch (info->qualifier)
729 {
730 case AARCH64_OPND_QLF_NIL:
731 /* no shift */
732 info->shifter.kind = AARCH64_MOD_NONE;
733 return 1;
734 case AARCH64_OPND_QLF_LSL:
735 /* shift zeros */
736 info->shifter.kind = AARCH64_MOD_LSL;
737 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
738 {
739 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
740 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
741 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
742 default: assert (0); return FALSE;
743 }
744 /* 00: 0; 01: 8; 10:16; 11:24. */
745 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
746 break;
747 case AARCH64_OPND_QLF_MSL:
748 /* shift ones */
749 info->shifter.kind = AARCH64_MOD_MSL;
750 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
751 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
752 break;
753 default:
754 assert (0);
755 return FALSE;
756 }
757
758 return TRUE;
759 }
760
761 /* Decode an 8-bit floating-point immediate. */
762 bfd_boolean
763 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
764 const aarch64_insn code,
765 const aarch64_inst *inst ATTRIBUTE_UNUSED,
766 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
767 {
768 info->imm.value = extract_all_fields (self, code);
769 info->imm.is_fp = 1;
770 return TRUE;
771 }
772
773 /* Decode a 1-bit rotate immediate (#90 or #270). */
774 bfd_boolean
775 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
776 const aarch64_insn code,
777 const aarch64_inst *inst ATTRIBUTE_UNUSED,
778 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
779 {
780 uint64_t rot = extract_field (self->fields[0], code, 0);
781 assert (rot < 2U);
782 info->imm.value = rot * 180 + 90;
783 return TRUE;
784 }
785
786 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
787 bfd_boolean
788 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
789 const aarch64_insn code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
792 {
793 uint64_t rot = extract_field (self->fields[0], code, 0);
794 assert (rot < 4U);
795 info->imm.value = rot * 90;
796 return TRUE;
797 }
798
799 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
800 bfd_boolean
801 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
802 aarch64_opnd_info *info, const aarch64_insn code,
803 const aarch64_inst *inst ATTRIBUTE_UNUSED,
804 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
805 {
806 info->imm.value = 64- extract_field (FLD_scale, code, 0);
807 return TRUE;
808 }
809
810 /* Decode arithmetic immediate for e.g.
811 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
812 bfd_boolean
813 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
814 aarch64_opnd_info *info, const aarch64_insn code,
815 const aarch64_inst *inst ATTRIBUTE_UNUSED,
816 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
817 {
818 aarch64_insn value;
819
820 info->shifter.kind = AARCH64_MOD_LSL;
821 /* shift */
822 value = extract_field (FLD_shift, code, 0);
823 if (value >= 2)
824 return FALSE;
825 info->shifter.amount = value ? 12 : 0;
826 /* imm12 (unsigned) */
827 info->imm.value = extract_field (FLD_imm12, code, 0);
828
829 return TRUE;
830 }
831
832 /* Return true if VALUE is a valid logical immediate encoding, storing the
833 decoded value in *RESULT if so. ESIZE is the number of bytes in the
834 decoded immediate. */
835 static bfd_boolean
836 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
837 {
838 uint64_t imm, mask;
839 uint32_t N, R, S;
840 unsigned simd_size;
841
842 /* value is N:immr:imms. */
843 S = value & 0x3f;
844 R = (value >> 6) & 0x3f;
845 N = (value >> 12) & 0x1;
846
847 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
848 (in other words, right rotated by R), then replicated. */
849 if (N != 0)
850 {
851 simd_size = 64;
852 mask = 0xffffffffffffffffull;
853 }
854 else
855 {
856 switch (S)
857 {
858 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
859 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
860 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
861 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
862 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
863 default: return FALSE;
864 }
865 mask = (1ull << simd_size) - 1;
866 /* Top bits are IGNORED. */
867 R &= simd_size - 1;
868 }
869
870 if (simd_size > esize * 8)
871 return FALSE;
872
873 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
874 if (S == simd_size - 1)
875 return FALSE;
876 /* S+1 consecutive bits to 1. */
877 /* NOTE: S can't be 63 due to detection above. */
878 imm = (1ull << (S + 1)) - 1;
879 /* Rotate to the left by simd_size - R. */
880 if (R != 0)
881 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
882 /* Replicate the value according to SIMD size. */
883 switch (simd_size)
884 {
885 case 2: imm = (imm << 2) | imm;
886 /* Fall through. */
887 case 4: imm = (imm << 4) | imm;
888 /* Fall through. */
889 case 8: imm = (imm << 8) | imm;
890 /* Fall through. */
891 case 16: imm = (imm << 16) | imm;
892 /* Fall through. */
893 case 32: imm = (imm << 32) | imm;
894 /* Fall through. */
895 case 64: break;
896 default: assert (0); return 0;
897 }
898
899 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
900
901 return TRUE;
902 }
903
904 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
905 bfd_boolean
906 aarch64_ext_limm (const aarch64_operand *self,
907 aarch64_opnd_info *info, const aarch64_insn code,
908 const aarch64_inst *inst,
909 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
910 {
911 uint32_t esize;
912 aarch64_insn value;
913
914 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
915 self->fields[2]);
916 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
917 return decode_limm (esize, value, &info->imm.value);
918 }
919
920 /* Decode a logical immediate for the BIC alias of AND (etc.). */
921 bfd_boolean
922 aarch64_ext_inv_limm (const aarch64_operand *self,
923 aarch64_opnd_info *info, const aarch64_insn code,
924 const aarch64_inst *inst,
925 aarch64_operand_error *errors)
926 {
927 if (!aarch64_ext_limm (self, info, code, inst, errors))
928 return FALSE;
929 info->imm.value = ~info->imm.value;
930 return TRUE;
931 }
932
933 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
934 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
935 bfd_boolean
936 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
937 aarch64_opnd_info *info,
938 const aarch64_insn code, const aarch64_inst *inst,
939 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
940 {
941 aarch64_insn value;
942
943 /* Rt */
944 info->reg.regno = extract_field (FLD_Rt, code, 0);
945
946 /* size */
947 value = extract_field (FLD_ldst_size, code, 0);
948 if (inst->opcode->iclass == ldstpair_indexed
949 || inst->opcode->iclass == ldstnapair_offs
950 || inst->opcode->iclass == ldstpair_off
951 || inst->opcode->iclass == loadlit)
952 {
953 enum aarch64_opnd_qualifier qualifier;
954 switch (value)
955 {
956 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
957 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
958 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
959 default: return FALSE;
960 }
961 info->qualifier = qualifier;
962 }
963 else
964 {
965 /* opc1:size */
966 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
967 if (value > 0x4)
968 return FALSE;
969 info->qualifier = get_sreg_qualifier_from_value (value);
970 }
971
972 return TRUE;
973 }
974
975 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
976 bfd_boolean
977 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
978 aarch64_opnd_info *info,
979 aarch64_insn code,
980 const aarch64_inst *inst ATTRIBUTE_UNUSED,
981 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
982 {
983 /* Rn */
984 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
985 return TRUE;
986 }
987
988
989
990 /* Decode the address operand for e.g. STGV <Xt>, [<Xn|SP>]!. */
991 bfd_boolean
992 aarch64_ext_addr_simple_2 (const aarch64_operand *self ATTRIBUTE_UNUSED,
993 aarch64_opnd_info *info,
994 aarch64_insn code,
995 const aarch64_inst *inst ATTRIBUTE_UNUSED,
996 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
997 {
998 /* Rn */
999 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1000 info->addr.writeback = 1;
1001 info->addr.preind = 1;
1002 return TRUE;
1003 }
1004
1005 /* Decode the address operand for e.g.
1006 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
1007 bfd_boolean
1008 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1010 aarch64_insn code, const aarch64_inst *inst,
1011 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1012 {
1013 info->qualifier = get_expected_qualifier (inst, info->idx);
1014
1015 /* Rn */
1016 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1017
1018 /* simm9 */
1019 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1020 info->addr.offset.imm = sign_extend (imm, 8);
1021 if (extract_field (self->fields[2], code, 0) == 1) {
1022 info->addr.writeback = 1;
1023 info->addr.preind = 1;
1024 }
1025 return TRUE;
1026 }
1027
1028 /* Decode the address operand for e.g.
1029 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1030 bfd_boolean
1031 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1032 aarch64_opnd_info *info,
1033 aarch64_insn code, const aarch64_inst *inst,
1034 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1035 {
1036 aarch64_insn S, value;
1037
1038 /* Rn */
1039 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1040 /* Rm */
1041 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1042 /* option */
1043 value = extract_field (FLD_option, code, 0);
1044 info->shifter.kind =
1045 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1046 /* Fix-up the shifter kind; although the table-driven approach is
1047 efficient, it is slightly inflexible, thus needing this fix-up. */
1048 if (info->shifter.kind == AARCH64_MOD_UXTX)
1049 info->shifter.kind = AARCH64_MOD_LSL;
1050 /* S */
1051 S = extract_field (FLD_S, code, 0);
1052 if (S == 0)
1053 {
1054 info->shifter.amount = 0;
1055 info->shifter.amount_present = 0;
1056 }
1057 else
1058 {
1059 int size;
1060 /* Need information in other operand(s) to help achieve the decoding
1061 from 'S' field. */
1062 info->qualifier = get_expected_qualifier (inst, info->idx);
1063 /* Get the size of the data element that is accessed, which may be
1064 different from that of the source register size, e.g. in strb/ldrb. */
1065 size = aarch64_get_qualifier_esize (info->qualifier);
1066 info->shifter.amount = get_logsz (size);
1067 info->shifter.amount_present = 1;
1068 }
1069
1070 return TRUE;
1071 }
1072
1073 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1074 bfd_boolean
1075 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1076 aarch64_insn code, const aarch64_inst *inst,
1077 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1078 {
1079 aarch64_insn imm;
1080 info->qualifier = get_expected_qualifier (inst, info->idx);
1081
1082 /* Rn */
1083 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1084 /* simm (imm9 or imm7) */
1085 imm = extract_field (self->fields[0], code, 0);
1086 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1087 if (self->fields[0] == FLD_imm7
1088 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1089 /* scaled immediate in ld/st pair instructions. */
1090 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1091 /* qualifier */
1092 if (inst->opcode->iclass == ldst_unscaled
1093 || inst->opcode->iclass == ldstnapair_offs
1094 || inst->opcode->iclass == ldstpair_off
1095 || inst->opcode->iclass == ldst_unpriv)
1096 info->addr.writeback = 0;
1097 else
1098 {
1099 /* pre/post- index */
1100 info->addr.writeback = 1;
1101 if (extract_field (self->fields[1], code, 0) == 1)
1102 info->addr.preind = 1;
1103 else
1104 info->addr.postind = 1;
1105 }
1106
1107 return TRUE;
1108 }
1109
1110 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1111 bfd_boolean
1112 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1113 aarch64_insn code,
1114 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1115 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1116 {
1117 int shift;
1118 info->qualifier = get_expected_qualifier (inst, info->idx);
1119 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1120 /* Rn */
1121 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1122 /* uimm12 */
1123 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1124 return TRUE;
1125 }
1126
1127 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1128 bfd_boolean
1129 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1130 aarch64_insn code,
1131 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1132 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1133 {
1134 aarch64_insn imm;
1135
1136 info->qualifier = get_expected_qualifier (inst, info->idx);
1137 /* Rn */
1138 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1139 /* simm10 */
1140 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1141 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1142 if (extract_field (self->fields[3], code, 0) == 1) {
1143 info->addr.writeback = 1;
1144 info->addr.preind = 1;
1145 }
1146 return TRUE;
1147 }
1148
1149 /* Decode the address operand for e.g.
1150 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1151 bfd_boolean
1152 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1153 aarch64_opnd_info *info,
1154 aarch64_insn code, const aarch64_inst *inst,
1155 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1156 {
1157 /* The opcode dependent area stores the number of elements in
1158 each structure to be loaded/stored. */
1159 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1160
1161 /* Rn */
1162 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1163 /* Rm | #<amount> */
1164 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1165 if (info->addr.offset.regno == 31)
1166 {
1167 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1168 /* Special handling of loading single structure to all lane. */
1169 info->addr.offset.imm = (is_ld1r ? 1
1170 : inst->operands[0].reglist.num_regs)
1171 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1172 else
1173 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1174 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1175 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1176 }
1177 else
1178 info->addr.offset.is_reg = 1;
1179 info->addr.writeback = 1;
1180
1181 return TRUE;
1182 }
1183
1184 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1185 bfd_boolean
1186 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1187 aarch64_opnd_info *info,
1188 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1189 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 {
1191 aarch64_insn value;
1192 /* cond */
1193 value = extract_field (FLD_cond, code, 0);
1194 info->cond = get_cond_from_value (value);
1195 return TRUE;
1196 }
1197
1198 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1199 bfd_boolean
1200 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1201 aarch64_opnd_info *info,
1202 aarch64_insn code,
1203 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1204 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1205 {
1206 /* op0:op1:CRn:CRm:op2 */
1207 info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1208 FLD_CRm, FLD_op2);
1209 info->sysreg.flags = 0;
1210
1211 /* If a system instruction, check which restrictions should be on the register
1212 value during decoding, these will be enforced then. */
1213 if (inst->opcode->iclass == ic_system)
1214 {
1215 /* Check to see if it's read-only, else check if it's write only.
1216 if it's both or unspecified don't care. */
1217 if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1218 info->sysreg.flags = F_REG_READ;
1219 else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1220 == F_SYS_WRITE)
1221 info->sysreg.flags = F_REG_WRITE;
1222 }
1223
1224 return TRUE;
1225 }
1226
1227 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1228 bfd_boolean
1229 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1230 aarch64_opnd_info *info, aarch64_insn code,
1231 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1232 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1233 {
1234 int i;
1235 /* op1:op2 */
1236 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1237 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1238 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1239 return TRUE;
1240 /* Reserved value in <pstatefield>. */
1241 return FALSE;
1242 }
1243
1244 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1245 bfd_boolean
1246 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1247 aarch64_opnd_info *info,
1248 aarch64_insn code,
1249 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1250 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1251 {
1252 int i;
1253 aarch64_insn value;
1254 const aarch64_sys_ins_reg *sysins_ops;
1255 /* op0:op1:CRn:CRm:op2 */
1256 value = extract_fields (code, 0, 5,
1257 FLD_op0, FLD_op1, FLD_CRn,
1258 FLD_CRm, FLD_op2);
1259
1260 switch (info->type)
1261 {
1262 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1263 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1264 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1265 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1266 case AARCH64_OPND_SYSREG_SR:
1267 sysins_ops = aarch64_sys_regs_sr;
1268 /* Let's remove op2 for rctx. Refer to comments in the definition of
1269 aarch64_sys_regs_sr[]. */
1270 value = value & ~(0x7);
1271 break;
1272 default: assert (0); return FALSE;
1273 }
1274
1275 for (i = 0; sysins_ops[i].name != NULL; ++i)
1276 if (sysins_ops[i].value == value)
1277 {
1278 info->sysins_op = sysins_ops + i;
1279 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1280 info->sysins_op->name,
1281 (unsigned)info->sysins_op->value,
1282 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1283 return TRUE;
1284 }
1285
1286 return FALSE;
1287 }
1288
1289 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1290
1291 bfd_boolean
1292 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1293 aarch64_opnd_info *info,
1294 aarch64_insn code,
1295 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1296 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1297 {
1298 /* CRm */
1299 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1300 return TRUE;
1301 }
1302
1303 /* Decode the prefetch operation option operand for e.g.
1304 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1305
1306 bfd_boolean
1307 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1308 aarch64_opnd_info *info,
1309 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1310 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1311 {
1312 /* prfop in Rt */
1313 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1314 return TRUE;
1315 }
1316
1317 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1318 to the matching name/value pair in aarch64_hint_options. */
1319
1320 bfd_boolean
1321 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1322 aarch64_opnd_info *info,
1323 aarch64_insn code,
1324 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1325 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1326 {
1327 /* CRm:op2. */
1328 unsigned hint_number;
1329 int i;
1330
1331 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1332
1333 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1334 {
1335 if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1336 {
1337 info->hint_option = &(aarch64_hint_options[i]);
1338 return TRUE;
1339 }
1340 }
1341
1342 return FALSE;
1343 }
1344
1345 /* Decode the extended register operand for e.g.
1346 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1347 bfd_boolean
1348 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1349 aarch64_opnd_info *info,
1350 aarch64_insn code,
1351 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1352 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1353 {
1354 aarch64_insn value;
1355
1356 /* Rm */
1357 info->reg.regno = extract_field (FLD_Rm, code, 0);
1358 /* option */
1359 value = extract_field (FLD_option, code, 0);
1360 info->shifter.kind =
1361 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1362 /* imm3 */
1363 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1364
1365 /* This makes the constraint checking happy. */
1366 info->shifter.operator_present = 1;
1367
1368 /* Assume inst->operands[0].qualifier has been resolved. */
1369 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1370 info->qualifier = AARCH64_OPND_QLF_W;
1371 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1372 && (info->shifter.kind == AARCH64_MOD_UXTX
1373 || info->shifter.kind == AARCH64_MOD_SXTX))
1374 info->qualifier = AARCH64_OPND_QLF_X;
1375
1376 return TRUE;
1377 }
1378
1379 /* Decode the shifted register operand for e.g.
1380 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1381 bfd_boolean
1382 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1383 aarch64_opnd_info *info,
1384 aarch64_insn code,
1385 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1386 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1387 {
1388 aarch64_insn value;
1389
1390 /* Rm */
1391 info->reg.regno = extract_field (FLD_Rm, code, 0);
1392 /* shift */
1393 value = extract_field (FLD_shift, code, 0);
1394 info->shifter.kind =
1395 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1396 if (info->shifter.kind == AARCH64_MOD_ROR
1397 && inst->opcode->iclass != log_shift)
1398 /* ROR is not available for the shifted register operand in arithmetic
1399 instructions. */
1400 return FALSE;
1401 /* imm6 */
1402 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1403
1404 /* This makes the constraint checking happy. */
1405 info->shifter.operator_present = 1;
1406
1407 return TRUE;
1408 }
1409
1410 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1411 where <offset> is given by the OFFSET parameter and where <factor> is
1412 1 plus SELF's operand-dependent value. fields[0] specifies the field
1413 that holds <base>. */
1414 static bfd_boolean
1415 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1416 aarch64_opnd_info *info, aarch64_insn code,
1417 int64_t offset)
1418 {
1419 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1420 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1421 info->addr.offset.is_reg = FALSE;
1422 info->addr.writeback = FALSE;
1423 info->addr.preind = TRUE;
1424 if (offset != 0)
1425 info->shifter.kind = AARCH64_MOD_MUL_VL;
1426 info->shifter.amount = 1;
1427 info->shifter.operator_present = (info->addr.offset.imm != 0);
1428 info->shifter.amount_present = FALSE;
1429 return TRUE;
1430 }
1431
1432 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1433 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1434 SELF's operand-dependent value. fields[0] specifies the field that
1435 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1436 bfd_boolean
1437 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1438 aarch64_opnd_info *info, aarch64_insn code,
1439 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1440 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1441 {
1442 int offset;
1443
1444 offset = extract_field (FLD_SVE_imm4, code, 0);
1445 offset = ((offset + 8) & 15) - 8;
1446 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1447 }
1448
1449 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1450 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1451 SELF's operand-dependent value. fields[0] specifies the field that
1452 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1453 bfd_boolean
1454 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1455 aarch64_opnd_info *info, aarch64_insn code,
1456 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1457 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1458 {
1459 int offset;
1460
1461 offset = extract_field (FLD_SVE_imm6, code, 0);
1462 offset = (((offset + 32) & 63) - 32);
1463 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1464 }
1465
1466 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1467 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1468 SELF's operand-dependent value. fields[0] specifies the field that
1469 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1470 and imm3 fields, with imm3 being the less-significant part. */
1471 bfd_boolean
1472 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1473 aarch64_opnd_info *info,
1474 aarch64_insn code,
1475 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1476 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1477 {
1478 int offset;
1479
1480 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1481 offset = (((offset + 256) & 511) - 256);
1482 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1483 }
1484
1485 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1486 is given by the OFFSET parameter and where <shift> is SELF's operand-
1487 dependent value. fields[0] specifies the base register field <base>. */
1488 static bfd_boolean
1489 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1490 aarch64_opnd_info *info, aarch64_insn code,
1491 int64_t offset)
1492 {
1493 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1494 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1495 info->addr.offset.is_reg = FALSE;
1496 info->addr.writeback = FALSE;
1497 info->addr.preind = TRUE;
1498 info->shifter.operator_present = FALSE;
1499 info->shifter.amount_present = FALSE;
1500 return TRUE;
1501 }
1502
1503 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1504 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1505 value. fields[0] specifies the base register field. */
1506 bfd_boolean
1507 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1508 aarch64_opnd_info *info, aarch64_insn code,
1509 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1510 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1511 {
1512 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1513 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1514 }
1515
1516 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1517 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1518 value. fields[0] specifies the base register field. */
1519 bfd_boolean
1520 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1521 aarch64_opnd_info *info, aarch64_insn code,
1522 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1523 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1524 {
1525 int offset = extract_field (FLD_SVE_imm6, code, 0);
1526 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1527 }
1528
1529 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1530 is SELF's operand-dependent value. fields[0] specifies the base
1531 register field and fields[1] specifies the offset register field. */
1532 bfd_boolean
1533 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1534 aarch64_opnd_info *info, aarch64_insn code,
1535 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1536 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1537 {
1538 int index_regno;
1539
1540 index_regno = extract_field (self->fields[1], code, 0);
1541 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1542 return FALSE;
1543
1544 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1545 info->addr.offset.regno = index_regno;
1546 info->addr.offset.is_reg = TRUE;
1547 info->addr.writeback = FALSE;
1548 info->addr.preind = TRUE;
1549 info->shifter.kind = AARCH64_MOD_LSL;
1550 info->shifter.amount = get_operand_specific_data (self);
1551 info->shifter.operator_present = (info->shifter.amount != 0);
1552 info->shifter.amount_present = (info->shifter.amount != 0);
1553 return TRUE;
1554 }
1555
1556 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1557 <shift> is SELF's operand-dependent value. fields[0] specifies the
1558 base register field, fields[1] specifies the offset register field and
1559 fields[2] is a single-bit field that selects SXTW over UXTW. */
1560 bfd_boolean
1561 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1562 aarch64_opnd_info *info, aarch64_insn code,
1563 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1564 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1565 {
1566 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1567 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1568 info->addr.offset.is_reg = TRUE;
1569 info->addr.writeback = FALSE;
1570 info->addr.preind = TRUE;
1571 if (extract_field (self->fields[2], code, 0))
1572 info->shifter.kind = AARCH64_MOD_SXTW;
1573 else
1574 info->shifter.kind = AARCH64_MOD_UXTW;
1575 info->shifter.amount = get_operand_specific_data (self);
1576 info->shifter.operator_present = TRUE;
1577 info->shifter.amount_present = (info->shifter.amount != 0);
1578 return TRUE;
1579 }
1580
1581 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1582 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1583 fields[0] specifies the base register field. */
1584 bfd_boolean
1585 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1586 aarch64_opnd_info *info, aarch64_insn code,
1587 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1588 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1589 {
1590 int offset = extract_field (FLD_imm5, code, 0);
1591 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1592 }
1593
1594 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1595 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1596 number. fields[0] specifies the base register field and fields[1]
1597 specifies the offset register field. */
1598 static bfd_boolean
1599 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1600 aarch64_insn code, enum aarch64_modifier_kind kind)
1601 {
1602 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1603 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1604 info->addr.offset.is_reg = TRUE;
1605 info->addr.writeback = FALSE;
1606 info->addr.preind = TRUE;
1607 info->shifter.kind = kind;
1608 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1609 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1610 || info->shifter.amount != 0);
1611 info->shifter.amount_present = (info->shifter.amount != 0);
1612 return TRUE;
1613 }
1614
1615 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1616 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1617 field and fields[1] specifies the offset register field. */
1618 bfd_boolean
1619 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1620 aarch64_opnd_info *info, aarch64_insn code,
1621 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1622 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1623 {
1624 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1625 }
1626
1627 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1628 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1629 field and fields[1] specifies the offset register field. */
1630 bfd_boolean
1631 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1632 aarch64_opnd_info *info, aarch64_insn code,
1633 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1634 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1635 {
1636 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1637 }
1638
1639 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1640 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1641 field and fields[1] specifies the offset register field. */
1642 bfd_boolean
1643 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1644 aarch64_opnd_info *info, aarch64_insn code,
1645 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1646 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1647 {
1648 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1649 }
1650
1651 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1652 has the raw field value and that the low 8 bits decode to VALUE. */
1653 static bfd_boolean
1654 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1655 {
1656 info->shifter.kind = AARCH64_MOD_LSL;
1657 info->shifter.amount = 0;
1658 if (info->imm.value & 0x100)
1659 {
1660 if (value == 0)
1661 /* Decode 0x100 as #0, LSL #8. */
1662 info->shifter.amount = 8;
1663 else
1664 value *= 256;
1665 }
1666 info->shifter.operator_present = (info->shifter.amount != 0);
1667 info->shifter.amount_present = (info->shifter.amount != 0);
1668 info->imm.value = value;
1669 return TRUE;
1670 }
1671
1672 /* Decode an SVE ADD/SUB immediate. */
1673 bfd_boolean
1674 aarch64_ext_sve_aimm (const aarch64_operand *self,
1675 aarch64_opnd_info *info, const aarch64_insn code,
1676 const aarch64_inst *inst,
1677 aarch64_operand_error *errors)
1678 {
1679 return (aarch64_ext_imm (self, info, code, inst, errors)
1680 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1681 }
1682
1683 /* Decode an SVE CPY/DUP immediate. */
1684 bfd_boolean
1685 aarch64_ext_sve_asimm (const aarch64_operand *self,
1686 aarch64_opnd_info *info, const aarch64_insn code,
1687 const aarch64_inst *inst,
1688 aarch64_operand_error *errors)
1689 {
1690 return (aarch64_ext_imm (self, info, code, inst, errors)
1691 && decode_sve_aimm (info, (int8_t) info->imm.value));
1692 }
1693
1694 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1695 The fields array specifies which field to use. */
1696 bfd_boolean
1697 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1698 aarch64_opnd_info *info, aarch64_insn code,
1699 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1700 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1701 {
1702 if (extract_field (self->fields[0], code, 0))
1703 info->imm.value = 0x3f800000;
1704 else
1705 info->imm.value = 0x3f000000;
1706 info->imm.is_fp = TRUE;
1707 return TRUE;
1708 }
1709
1710 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1711 The fields array specifies which field to use. */
1712 bfd_boolean
1713 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1714 aarch64_opnd_info *info, aarch64_insn code,
1715 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1716 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1717 {
1718 if (extract_field (self->fields[0], code, 0))
1719 info->imm.value = 0x40000000;
1720 else
1721 info->imm.value = 0x3f000000;
1722 info->imm.is_fp = TRUE;
1723 return TRUE;
1724 }
1725
1726 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1727 The fields array specifies which field to use. */
1728 bfd_boolean
1729 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1730 aarch64_opnd_info *info, aarch64_insn code,
1731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1733 {
1734 if (extract_field (self->fields[0], code, 0))
1735 info->imm.value = 0x3f800000;
1736 else
1737 info->imm.value = 0x0;
1738 info->imm.is_fp = TRUE;
1739 return TRUE;
1740 }
1741
1742 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1743 array specifies which field to use for Zn. MM is encoded in the
1744 concatenation of imm5 and SVE_tszh, with imm5 being the less
1745 significant part. */
1746 bfd_boolean
1747 aarch64_ext_sve_index (const aarch64_operand *self,
1748 aarch64_opnd_info *info, aarch64_insn code,
1749 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1750 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1751 {
1752 int val;
1753
1754 info->reglane.regno = extract_field (self->fields[0], code, 0);
1755 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1756 if ((val & 31) == 0)
1757 return 0;
1758 while ((val & 1) == 0)
1759 val /= 2;
1760 info->reglane.index = val / 2;
1761 return TRUE;
1762 }
1763
1764 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1765 bfd_boolean
1766 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1767 aarch64_opnd_info *info, const aarch64_insn code,
1768 const aarch64_inst *inst,
1769 aarch64_operand_error *errors)
1770 {
1771 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1772 return (aarch64_ext_limm (self, info, code, inst, errors)
1773 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1774 }
1775
1776 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1777 and where MM occupies the most-significant part. The operand-dependent
1778 value specifies the number of bits in Zn. */
1779 bfd_boolean
1780 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1781 aarch64_opnd_info *info, aarch64_insn code,
1782 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1783 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1784 {
1785 unsigned int reg_bits = get_operand_specific_data (self);
1786 unsigned int val = extract_all_fields (self, code);
1787 info->reglane.regno = val & ((1 << reg_bits) - 1);
1788 info->reglane.index = val >> reg_bits;
1789 return TRUE;
1790 }
1791
1792 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1793 to use for Zn. The opcode-dependent value specifies the number
1794 of registers in the list. */
1795 bfd_boolean
1796 aarch64_ext_sve_reglist (const aarch64_operand *self,
1797 aarch64_opnd_info *info, aarch64_insn code,
1798 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1799 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1800 {
1801 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1802 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1803 return TRUE;
1804 }
1805
1806 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1807 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1808 field. */
1809 bfd_boolean
1810 aarch64_ext_sve_scale (const aarch64_operand *self,
1811 aarch64_opnd_info *info, aarch64_insn code,
1812 const aarch64_inst *inst, aarch64_operand_error *errors)
1813 {
1814 int val;
1815
1816 if (!aarch64_ext_imm (self, info, code, inst, errors))
1817 return FALSE;
1818 val = extract_field (FLD_SVE_imm4, code, 0);
1819 info->shifter.kind = AARCH64_MOD_MUL;
1820 info->shifter.amount = val + 1;
1821 info->shifter.operator_present = (val != 0);
1822 info->shifter.amount_present = (val != 0);
1823 return TRUE;
1824 }
1825
1826 /* Return the top set bit in VALUE, which is expected to be relatively
1827 small. */
1828 static uint64_t
1829 get_top_bit (uint64_t value)
1830 {
1831 while ((value & -value) != value)
1832 value -= value & -value;
1833 return value;
1834 }
1835
1836 /* Decode an SVE shift-left immediate. */
1837 bfd_boolean
1838 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1839 aarch64_opnd_info *info, const aarch64_insn code,
1840 const aarch64_inst *inst, aarch64_operand_error *errors)
1841 {
1842 if (!aarch64_ext_imm (self, info, code, inst, errors)
1843 || info->imm.value == 0)
1844 return FALSE;
1845
1846 info->imm.value -= get_top_bit (info->imm.value);
1847 return TRUE;
1848 }
1849
1850 /* Decode an SVE shift-right immediate. */
1851 bfd_boolean
1852 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1853 aarch64_opnd_info *info, const aarch64_insn code,
1854 const aarch64_inst *inst, aarch64_operand_error *errors)
1855 {
1856 if (!aarch64_ext_imm (self, info, code, inst, errors)
1857 || info->imm.value == 0)
1858 return FALSE;
1859
1860 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1861 return TRUE;
1862 }
1863 \f
1864 /* Bitfields that are commonly used to encode certain operands' information
1865 may be partially used as part of the base opcode in some instructions.
1866 For example, the bit 1 of the field 'size' in
1867 FCVTXN <Vb><d>, <Va><n>
1868 is actually part of the base opcode, while only size<0> is available
1869 for encoding the register type. Another example is the AdvSIMD
1870 instruction ORR (register), in which the field 'size' is also used for
1871 the base opcode, leaving only the field 'Q' available to encode the
1872 vector register arrangement specifier '8B' or '16B'.
1873
1874 This function tries to deduce the qualifier from the value of partially
1875 constrained field(s). Given the VALUE of such a field or fields, the
1876 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1877 operand encoding), the function returns the matching qualifier or
1878 AARCH64_OPND_QLF_NIL if nothing matches.
1879
1880 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1881 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1882 may end with AARCH64_OPND_QLF_NIL. */
1883
1884 static enum aarch64_opnd_qualifier
1885 get_qualifier_from_partial_encoding (aarch64_insn value,
1886 const enum aarch64_opnd_qualifier* \
1887 candidates,
1888 aarch64_insn mask)
1889 {
1890 int i;
1891 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1892 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1893 {
1894 aarch64_insn standard_value;
1895 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1896 break;
1897 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1898 if ((standard_value & mask) == (value & mask))
1899 return candidates[i];
1900 }
1901 return AARCH64_OPND_QLF_NIL;
1902 }
1903
1904 /* Given a list of qualifier sequences, return all possible valid qualifiers
1905 for operand IDX in QUALIFIERS.
1906 Assume QUALIFIERS is an array whose length is large enough. */
1907
1908 static void
1909 get_operand_possible_qualifiers (int idx,
1910 const aarch64_opnd_qualifier_seq_t *list,
1911 enum aarch64_opnd_qualifier *qualifiers)
1912 {
1913 int i;
1914 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1915 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1916 break;
1917 }
1918
1919 /* Decode the size Q field for e.g. SHADD.
1920 We tag one operand with the qualifer according to the code;
1921 whether the qualifier is valid for this opcode or not, it is the
1922 duty of the semantic checking. */
1923
1924 static int
1925 decode_sizeq (aarch64_inst *inst)
1926 {
1927 int idx;
1928 enum aarch64_opnd_qualifier qualifier;
1929 aarch64_insn code;
1930 aarch64_insn value, mask;
1931 enum aarch64_field_kind fld_sz;
1932 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1933
1934 if (inst->opcode->iclass == asisdlse
1935 || inst->opcode->iclass == asisdlsep
1936 || inst->opcode->iclass == asisdlso
1937 || inst->opcode->iclass == asisdlsop)
1938 fld_sz = FLD_vldst_size;
1939 else
1940 fld_sz = FLD_size;
1941
1942 code = inst->value;
1943 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1944 /* Obtain the info that which bits of fields Q and size are actually
1945 available for operand encoding. Opcodes like FMAXNM and FMLA have
1946 size[1] unavailable. */
1947 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1948
1949 /* The index of the operand we are going to tag a qualifier and the qualifer
1950 itself are reasoned from the value of the size and Q fields and the
1951 possible valid qualifier lists. */
1952 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1953 DEBUG_TRACE ("key idx: %d", idx);
1954
1955 /* For most related instruciton, size:Q are fully available for operand
1956 encoding. */
1957 if (mask == 0x7)
1958 {
1959 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1960 return 1;
1961 }
1962
1963 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1964 candidates);
1965 #ifdef DEBUG_AARCH64
1966 if (debug_dump)
1967 {
1968 int i;
1969 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1970 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1971 DEBUG_TRACE ("qualifier %d: %s", i,
1972 aarch64_get_qualifier_name(candidates[i]));
1973 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1974 }
1975 #endif /* DEBUG_AARCH64 */
1976
1977 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1978
1979 if (qualifier == AARCH64_OPND_QLF_NIL)
1980 return 0;
1981
1982 inst->operands[idx].qualifier = qualifier;
1983 return 1;
1984 }
1985
1986 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1987 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1988
1989 static int
1990 decode_asimd_fcvt (aarch64_inst *inst)
1991 {
1992 aarch64_field field = {0, 0};
1993 aarch64_insn value;
1994 enum aarch64_opnd_qualifier qualifier;
1995
1996 gen_sub_field (FLD_size, 0, 1, &field);
1997 value = extract_field_2 (&field, inst->value, 0);
1998 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1999 : AARCH64_OPND_QLF_V_2D;
2000 switch (inst->opcode->op)
2001 {
2002 case OP_FCVTN:
2003 case OP_FCVTN2:
2004 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
2005 inst->operands[1].qualifier = qualifier;
2006 break;
2007 case OP_FCVTL:
2008 case OP_FCVTL2:
2009 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
2010 inst->operands[0].qualifier = qualifier;
2011 break;
2012 default:
2013 assert (0);
2014 return 0;
2015 }
2016
2017 return 1;
2018 }
2019
2020 /* Decode size[0], i.e. bit 22, for
2021 e.g. FCVTXN <Vb><d>, <Va><n>. */
2022
2023 static int
2024 decode_asisd_fcvtxn (aarch64_inst *inst)
2025 {
2026 aarch64_field field = {0, 0};
2027 gen_sub_field (FLD_size, 0, 1, &field);
2028 if (!extract_field_2 (&field, inst->value, 0))
2029 return 0;
2030 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2031 return 1;
2032 }
2033
2034 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
2035 static int
2036 decode_fcvt (aarch64_inst *inst)
2037 {
2038 enum aarch64_opnd_qualifier qualifier;
2039 aarch64_insn value;
2040 const aarch64_field field = {15, 2};
2041
2042 /* opc dstsize */
2043 value = extract_field_2 (&field, inst->value, 0);
2044 switch (value)
2045 {
2046 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2047 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2048 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2049 default: return 0;
2050 }
2051 inst->operands[0].qualifier = qualifier;
2052
2053 return 1;
2054 }
2055
2056 /* Do miscellaneous decodings that are not common enough to be driven by
2057 flags. */
2058
2059 static int
2060 do_misc_decoding (aarch64_inst *inst)
2061 {
2062 unsigned int value;
2063 switch (inst->opcode->op)
2064 {
2065 case OP_FCVT:
2066 return decode_fcvt (inst);
2067
2068 case OP_FCVTN:
2069 case OP_FCVTN2:
2070 case OP_FCVTL:
2071 case OP_FCVTL2:
2072 return decode_asimd_fcvt (inst);
2073
2074 case OP_FCVTXN_S:
2075 return decode_asisd_fcvtxn (inst);
2076
2077 case OP_MOV_P_P:
2078 case OP_MOVS_P_P:
2079 value = extract_field (FLD_SVE_Pn, inst->value, 0);
2080 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2081 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2082
2083 case OP_MOV_Z_P_Z:
2084 return (extract_field (FLD_SVE_Zd, inst->value, 0)
2085 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2086
2087 case OP_MOV_Z_V:
2088 /* Index must be zero. */
2089 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2090 return value > 0 && value <= 16 && value == (value & -value);
2091
2092 case OP_MOV_Z_Z:
2093 return (extract_field (FLD_SVE_Zn, inst->value, 0)
2094 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2095
2096 case OP_MOV_Z_Zi:
2097 /* Index must be nonzero. */
2098 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2099 return value > 0 && value != (value & -value);
2100
2101 case OP_MOVM_P_P_P:
2102 return (extract_field (FLD_SVE_Pd, inst->value, 0)
2103 == extract_field (FLD_SVE_Pm, inst->value, 0));
2104
2105 case OP_MOVZS_P_P_P:
2106 case OP_MOVZ_P_P_P:
2107 return (extract_field (FLD_SVE_Pn, inst->value, 0)
2108 == extract_field (FLD_SVE_Pm, inst->value, 0));
2109
2110 case OP_NOTS_P_P_P_Z:
2111 case OP_NOT_P_P_P_Z:
2112 return (extract_field (FLD_SVE_Pm, inst->value, 0)
2113 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2114
2115 default:
2116 return 0;
2117 }
2118 }
2119
2120 /* Opcodes that have fields shared by multiple operands are usually flagged
2121 with flags. In this function, we detect such flags, decode the related
2122 field(s) and store the information in one of the related operands. The
2123 'one' operand is not any operand but one of the operands that can
2124 accommadate all the information that has been decoded. */
2125
2126 static int
2127 do_special_decoding (aarch64_inst *inst)
2128 {
2129 int idx;
2130 aarch64_insn value;
2131 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2132 if (inst->opcode->flags & F_COND)
2133 {
2134 value = extract_field (FLD_cond2, inst->value, 0);
2135 inst->cond = get_cond_from_value (value);
2136 }
2137 /* 'sf' field. */
2138 if (inst->opcode->flags & F_SF)
2139 {
2140 idx = select_operand_for_sf_field_coding (inst->opcode);
2141 value = extract_field (FLD_sf, inst->value, 0);
2142 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2143 if ((inst->opcode->flags & F_N)
2144 && extract_field (FLD_N, inst->value, 0) != value)
2145 return 0;
2146 }
2147 /* 'sf' field. */
2148 if (inst->opcode->flags & F_LSE_SZ)
2149 {
2150 idx = select_operand_for_sf_field_coding (inst->opcode);
2151 value = extract_field (FLD_lse_sz, inst->value, 0);
2152 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2153 }
2154 /* size:Q fields. */
2155 if (inst->opcode->flags & F_SIZEQ)
2156 return decode_sizeq (inst);
2157
2158 if (inst->opcode->flags & F_FPTYPE)
2159 {
2160 idx = select_operand_for_fptype_field_coding (inst->opcode);
2161 value = extract_field (FLD_type, inst->value, 0);
2162 switch (value)
2163 {
2164 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2165 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2166 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2167 default: return 0;
2168 }
2169 }
2170
2171 if (inst->opcode->flags & F_SSIZE)
2172 {
2173 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2174 of the base opcode. */
2175 aarch64_insn mask;
2176 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2177 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2178 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2179 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2180 /* For most related instruciton, the 'size' field is fully available for
2181 operand encoding. */
2182 if (mask == 0x3)
2183 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2184 else
2185 {
2186 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2187 candidates);
2188 inst->operands[idx].qualifier
2189 = get_qualifier_from_partial_encoding (value, candidates, mask);
2190 }
2191 }
2192
2193 if (inst->opcode->flags & F_T)
2194 {
2195 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2196 int num = 0;
2197 unsigned val, Q;
2198 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2199 == AARCH64_OPND_CLASS_SIMD_REG);
2200 /* imm5<3:0> q <t>
2201 0000 x reserved
2202 xxx1 0 8b
2203 xxx1 1 16b
2204 xx10 0 4h
2205 xx10 1 8h
2206 x100 0 2s
2207 x100 1 4s
2208 1000 0 reserved
2209 1000 1 2d */
2210 val = extract_field (FLD_imm5, inst->value, 0);
2211 while ((val & 0x1) == 0 && ++num <= 3)
2212 val >>= 1;
2213 if (num > 3)
2214 return 0;
2215 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2216 inst->operands[0].qualifier =
2217 get_vreg_qualifier_from_value ((num << 1) | Q);
2218 }
2219
2220 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2221 {
2222 /* Use Rt to encode in the case of e.g.
2223 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2224 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2225 if (idx == -1)
2226 {
2227 /* Otherwise use the result operand, which has to be a integer
2228 register. */
2229 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2230 == AARCH64_OPND_CLASS_INT_REG);
2231 idx = 0;
2232 }
2233 assert (idx == 0 || idx == 1);
2234 value = extract_field (FLD_Q, inst->value, 0);
2235 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2236 }
2237
2238 if (inst->opcode->flags & F_LDS_SIZE)
2239 {
2240 aarch64_field field = {0, 0};
2241 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2242 == AARCH64_OPND_CLASS_INT_REG);
2243 gen_sub_field (FLD_opc, 0, 1, &field);
2244 value = extract_field_2 (&field, inst->value, 0);
2245 inst->operands[0].qualifier
2246 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2247 }
2248
2249 /* Miscellaneous decoding; done as the last step. */
2250 if (inst->opcode->flags & F_MISC)
2251 return do_misc_decoding (inst);
2252
2253 return 1;
2254 }
2255
2256 /* Converters converting a real opcode instruction to its alias form. */
2257
2258 /* ROR <Wd>, <Ws>, #<shift>
2259 is equivalent to:
2260 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2261 static int
2262 convert_extr_to_ror (aarch64_inst *inst)
2263 {
2264 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2265 {
2266 copy_operand_info (inst, 2, 3);
2267 inst->operands[3].type = AARCH64_OPND_NIL;
2268 return 1;
2269 }
2270 return 0;
2271 }
2272
2273 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2274 is equivalent to:
2275 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2276 static int
2277 convert_shll_to_xtl (aarch64_inst *inst)
2278 {
2279 if (inst->operands[2].imm.value == 0)
2280 {
2281 inst->operands[2].type = AARCH64_OPND_NIL;
2282 return 1;
2283 }
2284 return 0;
2285 }
2286
2287 /* Convert
2288 UBFM <Xd>, <Xn>, #<shift>, #63.
2289 to
2290 LSR <Xd>, <Xn>, #<shift>. */
2291 static int
2292 convert_bfm_to_sr (aarch64_inst *inst)
2293 {
2294 int64_t imms, val;
2295
2296 imms = inst->operands[3].imm.value;
2297 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2298 if (imms == val)
2299 {
2300 inst->operands[3].type = AARCH64_OPND_NIL;
2301 return 1;
2302 }
2303
2304 return 0;
2305 }
2306
2307 /* Convert MOV to ORR. */
2308 static int
2309 convert_orr_to_mov (aarch64_inst *inst)
2310 {
2311 /* MOV <Vd>.<T>, <Vn>.<T>
2312 is equivalent to:
2313 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2314 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2315 {
2316 inst->operands[2].type = AARCH64_OPND_NIL;
2317 return 1;
2318 }
2319 return 0;
2320 }
2321
2322 /* When <imms> >= <immr>, the instruction written:
2323 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2324 is equivalent to:
2325 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2326
2327 static int
2328 convert_bfm_to_bfx (aarch64_inst *inst)
2329 {
2330 int64_t immr, imms;
2331
2332 immr = inst->operands[2].imm.value;
2333 imms = inst->operands[3].imm.value;
2334 if (imms >= immr)
2335 {
2336 int64_t lsb = immr;
2337 inst->operands[2].imm.value = lsb;
2338 inst->operands[3].imm.value = imms + 1 - lsb;
2339 /* The two opcodes have different qualifiers for
2340 the immediate operands; reset to help the checking. */
2341 reset_operand_qualifier (inst, 2);
2342 reset_operand_qualifier (inst, 3);
2343 return 1;
2344 }
2345
2346 return 0;
2347 }
2348
2349 /* When <imms> < <immr>, the instruction written:
2350 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2351 is equivalent to:
2352 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2353
2354 static int
2355 convert_bfm_to_bfi (aarch64_inst *inst)
2356 {
2357 int64_t immr, imms, val;
2358
2359 immr = inst->operands[2].imm.value;
2360 imms = inst->operands[3].imm.value;
2361 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2362 if (imms < immr)
2363 {
2364 inst->operands[2].imm.value = (val - immr) & (val - 1);
2365 inst->operands[3].imm.value = imms + 1;
2366 /* The two opcodes have different qualifiers for
2367 the immediate operands; reset to help the checking. */
2368 reset_operand_qualifier (inst, 2);
2369 reset_operand_qualifier (inst, 3);
2370 return 1;
2371 }
2372
2373 return 0;
2374 }
2375
2376 /* The instruction written:
2377 BFC <Xd>, #<lsb>, #<width>
2378 is equivalent to:
2379 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2380
2381 static int
2382 convert_bfm_to_bfc (aarch64_inst *inst)
2383 {
2384 int64_t immr, imms, val;
2385
2386 /* Should have been assured by the base opcode value. */
2387 assert (inst->operands[1].reg.regno == 0x1f);
2388
2389 immr = inst->operands[2].imm.value;
2390 imms = inst->operands[3].imm.value;
2391 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2392 if (imms < immr)
2393 {
2394 /* Drop XZR from the second operand. */
2395 copy_operand_info (inst, 1, 2);
2396 copy_operand_info (inst, 2, 3);
2397 inst->operands[3].type = AARCH64_OPND_NIL;
2398
2399 /* Recalculate the immediates. */
2400 inst->operands[1].imm.value = (val - immr) & (val - 1);
2401 inst->operands[2].imm.value = imms + 1;
2402
2403 /* The two opcodes have different qualifiers for the operands; reset to
2404 help the checking. */
2405 reset_operand_qualifier (inst, 1);
2406 reset_operand_qualifier (inst, 2);
2407 reset_operand_qualifier (inst, 3);
2408
2409 return 1;
2410 }
2411
2412 return 0;
2413 }
2414
2415 /* The instruction written:
2416 LSL <Xd>, <Xn>, #<shift>
2417 is equivalent to:
2418 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2419
2420 static int
2421 convert_ubfm_to_lsl (aarch64_inst *inst)
2422 {
2423 int64_t immr = inst->operands[2].imm.value;
2424 int64_t imms = inst->operands[3].imm.value;
2425 int64_t val
2426 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2427
2428 if ((immr == 0 && imms == val) || immr == imms + 1)
2429 {
2430 inst->operands[3].type = AARCH64_OPND_NIL;
2431 inst->operands[2].imm.value = val - imms;
2432 return 1;
2433 }
2434
2435 return 0;
2436 }
2437
2438 /* CINC <Wd>, <Wn>, <cond>
2439 is equivalent to:
2440 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2441 where <cond> is not AL or NV. */
2442
2443 static int
2444 convert_from_csel (aarch64_inst *inst)
2445 {
2446 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2447 && (inst->operands[3].cond->value & 0xe) != 0xe)
2448 {
2449 copy_operand_info (inst, 2, 3);
2450 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2451 inst->operands[3].type = AARCH64_OPND_NIL;
2452 return 1;
2453 }
2454 return 0;
2455 }
2456
2457 /* CSET <Wd>, <cond>
2458 is equivalent to:
2459 CSINC <Wd>, WZR, WZR, invert(<cond>)
2460 where <cond> is not AL or NV. */
2461
2462 static int
2463 convert_csinc_to_cset (aarch64_inst *inst)
2464 {
2465 if (inst->operands[1].reg.regno == 0x1f
2466 && inst->operands[2].reg.regno == 0x1f
2467 && (inst->operands[3].cond->value & 0xe) != 0xe)
2468 {
2469 copy_operand_info (inst, 1, 3);
2470 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2471 inst->operands[3].type = AARCH64_OPND_NIL;
2472 inst->operands[2].type = AARCH64_OPND_NIL;
2473 return 1;
2474 }
2475 return 0;
2476 }
2477
2478 /* MOV <Wd>, #<imm>
2479 is equivalent to:
2480 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2481
2482 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2483 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2484 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2485 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2486 machine-instruction mnemonic must be used. */
2487
2488 static int
2489 convert_movewide_to_mov (aarch64_inst *inst)
2490 {
2491 uint64_t value = inst->operands[1].imm.value;
2492 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2493 if (value == 0 && inst->operands[1].shifter.amount != 0)
2494 return 0;
2495 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2496 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2497 value <<= inst->operands[1].shifter.amount;
2498 /* As an alias convertor, it has to be clear that the INST->OPCODE
2499 is the opcode of the real instruction. */
2500 if (inst->opcode->op == OP_MOVN)
2501 {
2502 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2503 value = ~value;
2504 /* A MOVN has an immediate that could be encoded by MOVZ. */
2505 if (aarch64_wide_constant_p (value, is32, NULL))
2506 return 0;
2507 }
2508 inst->operands[1].imm.value = value;
2509 inst->operands[1].shifter.amount = 0;
2510 return 1;
2511 }
2512
2513 /* MOV <Wd>, #<imm>
2514 is equivalent to:
2515 ORR <Wd>, WZR, #<imm>.
2516
2517 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2518 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2519 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2520 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2521 machine-instruction mnemonic must be used. */
2522
2523 static int
2524 convert_movebitmask_to_mov (aarch64_inst *inst)
2525 {
2526 int is32;
2527 uint64_t value;
2528
2529 /* Should have been assured by the base opcode value. */
2530 assert (inst->operands[1].reg.regno == 0x1f);
2531 copy_operand_info (inst, 1, 2);
2532 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2533 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2534 value = inst->operands[1].imm.value;
2535 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2536 instruction. */
2537 if (inst->operands[0].reg.regno != 0x1f
2538 && (aarch64_wide_constant_p (value, is32, NULL)
2539 || aarch64_wide_constant_p (~value, is32, NULL)))
2540 return 0;
2541
2542 inst->operands[2].type = AARCH64_OPND_NIL;
2543 return 1;
2544 }
2545
2546 /* Some alias opcodes are disassembled by being converted from their real-form.
2547 N.B. INST->OPCODE is the real opcode rather than the alias. */
2548
2549 static int
2550 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2551 {
2552 switch (alias->op)
2553 {
2554 case OP_ASR_IMM:
2555 case OP_LSR_IMM:
2556 return convert_bfm_to_sr (inst);
2557 case OP_LSL_IMM:
2558 return convert_ubfm_to_lsl (inst);
2559 case OP_CINC:
2560 case OP_CINV:
2561 case OP_CNEG:
2562 return convert_from_csel (inst);
2563 case OP_CSET:
2564 case OP_CSETM:
2565 return convert_csinc_to_cset (inst);
2566 case OP_UBFX:
2567 case OP_BFXIL:
2568 case OP_SBFX:
2569 return convert_bfm_to_bfx (inst);
2570 case OP_SBFIZ:
2571 case OP_BFI:
2572 case OP_UBFIZ:
2573 return convert_bfm_to_bfi (inst);
2574 case OP_BFC:
2575 return convert_bfm_to_bfc (inst);
2576 case OP_MOV_V:
2577 return convert_orr_to_mov (inst);
2578 case OP_MOV_IMM_WIDE:
2579 case OP_MOV_IMM_WIDEN:
2580 return convert_movewide_to_mov (inst);
2581 case OP_MOV_IMM_LOG:
2582 return convert_movebitmask_to_mov (inst);
2583 case OP_ROR_IMM:
2584 return convert_extr_to_ror (inst);
2585 case OP_SXTL:
2586 case OP_SXTL2:
2587 case OP_UXTL:
2588 case OP_UXTL2:
2589 return convert_shll_to_xtl (inst);
2590 default:
2591 return 0;
2592 }
2593 }
2594
2595 static bfd_boolean
2596 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2597 aarch64_inst *, int, aarch64_operand_error *errors);
2598
2599 /* Given the instruction information in *INST, check if the instruction has
2600 any alias form that can be used to represent *INST. If the answer is yes,
2601 update *INST to be in the form of the determined alias. */
2602
2603 /* In the opcode description table, the following flags are used in opcode
2604 entries to help establish the relations between the real and alias opcodes:
2605
2606 F_ALIAS: opcode is an alias
2607 F_HAS_ALIAS: opcode has alias(es)
2608 F_P1
2609 F_P2
2610 F_P3: Disassembly preference priority 1-3 (the larger the
2611 higher). If nothing is specified, it is the priority
2612 0 by default, i.e. the lowest priority.
2613
2614 Although the relation between the machine and the alias instructions are not
2615 explicitly described, it can be easily determined from the base opcode
2616 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2617 description entries:
2618
2619 The mask of an alias opcode must be equal to or a super-set (i.e. more
2620 constrained) of that of the aliased opcode; so is the base opcode value.
2621
2622 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2623 && (opcode->mask & real->mask) == real->mask
2624 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2625 then OPCODE is an alias of, and only of, the REAL instruction
2626
2627 The alias relationship is forced flat-structured to keep related algorithm
2628 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2629
2630 During the disassembling, the decoding decision tree (in
2631 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2632 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2633 not specified), the disassembler will check whether there is any alias
2634 instruction exists for this real instruction. If there is, the disassembler
2635 will try to disassemble the 32-bit binary again using the alias's rule, or
2636 try to convert the IR to the form of the alias. In the case of the multiple
2637 aliases, the aliases are tried one by one from the highest priority
2638 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2639 first succeeds first adopted.
2640
2641 You may ask why there is a need for the conversion of IR from one form to
2642 another in handling certain aliases. This is because on one hand it avoids
2643 adding more operand code to handle unusual encoding/decoding; on other
2644 hand, during the disassembling, the conversion is an effective approach to
2645 check the condition of an alias (as an alias may be adopted only if certain
2646 conditions are met).
2647
2648 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2649 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2650 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2651
2652 static void
2653 determine_disassembling_preference (struct aarch64_inst *inst,
2654 aarch64_operand_error *errors)
2655 {
2656 const aarch64_opcode *opcode;
2657 const aarch64_opcode *alias;
2658
2659 opcode = inst->opcode;
2660
2661 /* This opcode does not have an alias, so use itself. */
2662 if (!opcode_has_alias (opcode))
2663 return;
2664
2665 alias = aarch64_find_alias_opcode (opcode);
2666 assert (alias);
2667
2668 #ifdef DEBUG_AARCH64
2669 if (debug_dump)
2670 {
2671 const aarch64_opcode *tmp = alias;
2672 printf ("#### LIST orderd: ");
2673 while (tmp)
2674 {
2675 printf ("%s, ", tmp->name);
2676 tmp = aarch64_find_next_alias_opcode (tmp);
2677 }
2678 printf ("\n");
2679 }
2680 #endif /* DEBUG_AARCH64 */
2681
2682 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2683 {
2684 DEBUG_TRACE ("try %s", alias->name);
2685 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2686
2687 /* An alias can be a pseudo opcode which will never be used in the
2688 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2689 aliasing AND. */
2690 if (pseudo_opcode_p (alias))
2691 {
2692 DEBUG_TRACE ("skip pseudo %s", alias->name);
2693 continue;
2694 }
2695
2696 if ((inst->value & alias->mask) != alias->opcode)
2697 {
2698 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2699 continue;
2700 }
2701 /* No need to do any complicated transformation on operands, if the alias
2702 opcode does not have any operand. */
2703 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2704 {
2705 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2706 aarch64_replace_opcode (inst, alias);
2707 return;
2708 }
2709 if (alias->flags & F_CONV)
2710 {
2711 aarch64_inst copy;
2712 memcpy (&copy, inst, sizeof (aarch64_inst));
2713 /* ALIAS is the preference as long as the instruction can be
2714 successfully converted to the form of ALIAS. */
2715 if (convert_to_alias (&copy, alias) == 1)
2716 {
2717 aarch64_replace_opcode (&copy, alias);
2718 assert (aarch64_match_operands_constraint (&copy, NULL));
2719 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2720 memcpy (inst, &copy, sizeof (aarch64_inst));
2721 return;
2722 }
2723 }
2724 else
2725 {
2726 /* Directly decode the alias opcode. */
2727 aarch64_inst temp;
2728 memset (&temp, '\0', sizeof (aarch64_inst));
2729 if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2730 {
2731 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2732 memcpy (inst, &temp, sizeof (aarch64_inst));
2733 return;
2734 }
2735 }
2736 }
2737 }
2738
2739 /* Some instructions (including all SVE ones) use the instruction class
2740 to describe how a qualifiers_list index is represented in the instruction
2741 encoding. If INST is such an instruction, decode the appropriate fields
2742 and fill in the operand qualifiers accordingly. Return true if no
2743 problems are found. */
2744
2745 static bfd_boolean
2746 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2747 {
2748 int i, variant;
2749
2750 variant = 0;
2751 switch (inst->opcode->iclass)
2752 {
2753 case sve_cpy:
2754 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2755 break;
2756
2757 case sve_index:
2758 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2759 if ((i & 31) == 0)
2760 return FALSE;
2761 while ((i & 1) == 0)
2762 {
2763 i >>= 1;
2764 variant += 1;
2765 }
2766 break;
2767
2768 case sve_limm:
2769 /* Pick the smallest applicable element size. */
2770 if ((inst->value & 0x20600) == 0x600)
2771 variant = 0;
2772 else if ((inst->value & 0x20400) == 0x400)
2773 variant = 1;
2774 else if ((inst->value & 0x20000) == 0)
2775 variant = 2;
2776 else
2777 variant = 3;
2778 break;
2779
2780 case sve_misc:
2781 /* sve_misc instructions have only a single variant. */
2782 break;
2783
2784 case sve_movprfx:
2785 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2786 break;
2787
2788 case sve_pred_zm:
2789 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2790 break;
2791
2792 case sve_shift_pred:
2793 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2794 sve_shift:
2795 if (i == 0)
2796 return FALSE;
2797 while (i != 1)
2798 {
2799 i >>= 1;
2800 variant += 1;
2801 }
2802 break;
2803
2804 case sve_shift_unpred:
2805 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2806 goto sve_shift;
2807
2808 case sve_size_bhs:
2809 variant = extract_field (FLD_size, inst->value, 0);
2810 if (variant >= 3)
2811 return FALSE;
2812 break;
2813
2814 case sve_size_bhsd:
2815 variant = extract_field (FLD_size, inst->value, 0);
2816 break;
2817
2818 case sve_size_hsd:
2819 i = extract_field (FLD_size, inst->value, 0);
2820 if (i < 1)
2821 return FALSE;
2822 variant = i - 1;
2823 break;
2824
2825 case sve_size_sd:
2826 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2827 break;
2828
2829 default:
2830 /* No mapping between instruction class and qualifiers. */
2831 return TRUE;
2832 }
2833
2834 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2835 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2836 return TRUE;
2837 }
2838 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2839 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2840 return 1.
2841
2842 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2843 determined and used to disassemble CODE; this is done just before the
2844 return. */
2845
2846 static bfd_boolean
2847 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2848 aarch64_inst *inst, int noaliases_p,
2849 aarch64_operand_error *errors)
2850 {
2851 int i;
2852
2853 DEBUG_TRACE ("enter with %s", opcode->name);
2854
2855 assert (opcode && inst);
2856
2857 /* Clear inst. */
2858 memset (inst, '\0', sizeof (aarch64_inst));
2859
2860 /* Check the base opcode. */
2861 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2862 {
2863 DEBUG_TRACE ("base opcode match FAIL");
2864 goto decode_fail;
2865 }
2866
2867 inst->opcode = opcode;
2868 inst->value = code;
2869
2870 /* Assign operand codes and indexes. */
2871 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2872 {
2873 if (opcode->operands[i] == AARCH64_OPND_NIL)
2874 break;
2875 inst->operands[i].type = opcode->operands[i];
2876 inst->operands[i].idx = i;
2877 }
2878
2879 /* Call the opcode decoder indicated by flags. */
2880 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2881 {
2882 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2883 goto decode_fail;
2884 }
2885
2886 /* Possibly use the instruction class to determine the correct
2887 qualifier. */
2888 if (!aarch64_decode_variant_using_iclass (inst))
2889 {
2890 DEBUG_TRACE ("iclass-based decoder FAIL");
2891 goto decode_fail;
2892 }
2893
2894 /* Call operand decoders. */
2895 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2896 {
2897 const aarch64_operand *opnd;
2898 enum aarch64_opnd type;
2899
2900 type = opcode->operands[i];
2901 if (type == AARCH64_OPND_NIL)
2902 break;
2903 opnd = &aarch64_operands[type];
2904 if (operand_has_extractor (opnd)
2905 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2906 errors)))
2907 {
2908 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2909 goto decode_fail;
2910 }
2911 }
2912
2913 /* If the opcode has a verifier, then check it now. */
2914 if (opcode->verifier
2915 && opcode->verifier (inst, code, 0, FALSE, errors, NULL) != ERR_OK)
2916 {
2917 DEBUG_TRACE ("operand verifier FAIL");
2918 goto decode_fail;
2919 }
2920
2921 /* Match the qualifiers. */
2922 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2923 {
2924 /* Arriving here, the CODE has been determined as a valid instruction
2925 of OPCODE and *INST has been filled with information of this OPCODE
2926 instruction. Before the return, check if the instruction has any
2927 alias and should be disassembled in the form of its alias instead.
2928 If the answer is yes, *INST will be updated. */
2929 if (!noaliases_p)
2930 determine_disassembling_preference (inst, errors);
2931 DEBUG_TRACE ("SUCCESS");
2932 return TRUE;
2933 }
2934 else
2935 {
2936 DEBUG_TRACE ("constraint matching FAIL");
2937 }
2938
2939 decode_fail:
2940 return FALSE;
2941 }
2942 \f
2943 /* This does some user-friendly fix-up to *INST. It is currently focus on
2944 the adjustment of qualifiers to help the printed instruction
2945 recognized/understood more easily. */
2946
2947 static void
2948 user_friendly_fixup (aarch64_inst *inst)
2949 {
2950 switch (inst->opcode->iclass)
2951 {
2952 case testbranch:
2953 /* TBNZ Xn|Wn, #uimm6, label
2954 Test and Branch Not Zero: conditionally jumps to label if bit number
2955 uimm6 in register Xn is not zero. The bit number implies the width of
2956 the register, which may be written and should be disassembled as Wn if
2957 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2958 */
2959 if (inst->operands[1].imm.value < 32)
2960 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2961 break;
2962 default: break;
2963 }
2964 }
2965
2966 /* Decode INSN and fill in *INST the instruction information. An alias
2967 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2968 success. */
2969
2970 enum err_type
2971 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2972 bfd_boolean noaliases_p,
2973 aarch64_operand_error *errors)
2974 {
2975 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2976
2977 #ifdef DEBUG_AARCH64
2978 if (debug_dump)
2979 {
2980 const aarch64_opcode *tmp = opcode;
2981 printf ("\n");
2982 DEBUG_TRACE ("opcode lookup:");
2983 while (tmp != NULL)
2984 {
2985 aarch64_verbose (" %s", tmp->name);
2986 tmp = aarch64_find_next_opcode (tmp);
2987 }
2988 }
2989 #endif /* DEBUG_AARCH64 */
2990
2991 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2992 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2993 opcode field and value, apart from the difference that one of them has an
2994 extra field as part of the opcode, but such a field is used for operand
2995 encoding in other opcode(s) ('immh' in the case of the example). */
2996 while (opcode != NULL)
2997 {
2998 /* But only one opcode can be decoded successfully for, as the
2999 decoding routine will check the constraint carefully. */
3000 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3001 return ERR_OK;
3002 opcode = aarch64_find_next_opcode (opcode);
3003 }
3004
3005 return ERR_UND;
3006 }
3007
3008 /* Print operands. */
3009
3010 static void
3011 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3012 const aarch64_opnd_info *opnds, struct disassemble_info *info,
3013 bfd_boolean *has_notes)
3014 {
3015 char *notes = NULL;
3016 int i, pcrel_p, num_printed;
3017 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3018 {
3019 char str[128];
3020 /* We regard the opcode operand info more, however we also look into
3021 the inst->operands to support the disassembling of the optional
3022 operand.
3023 The two operand code should be the same in all cases, apart from
3024 when the operand can be optional. */
3025 if (opcode->operands[i] == AARCH64_OPND_NIL
3026 || opnds[i].type == AARCH64_OPND_NIL)
3027 break;
3028
3029 /* Generate the operand string in STR. */
3030 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3031 &info->target, &notes);
3032
3033 /* Print the delimiter (taking account of omitted operand(s)). */
3034 if (str[0] != '\0')
3035 (*info->fprintf_func) (info->stream, "%s",
3036 num_printed++ == 0 ? "\t" : ", ");
3037
3038 /* Print the operand. */
3039 if (pcrel_p)
3040 (*info->print_address_func) (info->target, info);
3041 else
3042 (*info->fprintf_func) (info->stream, "%s", str);
3043 }
3044
3045 if (notes && !no_notes)
3046 {
3047 *has_notes = TRUE;
3048 (*info->fprintf_func) (info->stream, " // note: %s", notes);
3049 }
3050 }
3051
3052 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
3053
3054 static void
3055 remove_dot_suffix (char *name, const aarch64_inst *inst)
3056 {
3057 char *ptr;
3058 size_t len;
3059
3060 ptr = strchr (inst->opcode->name, '.');
3061 assert (ptr && inst->cond);
3062 len = ptr - inst->opcode->name;
3063 assert (len < 8);
3064 strncpy (name, inst->opcode->name, len);
3065 name[len] = '\0';
3066 }
3067
3068 /* Print the instruction mnemonic name. */
3069
3070 static void
3071 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3072 {
3073 if (inst->opcode->flags & F_COND)
3074 {
3075 /* For instructions that are truly conditionally executed, e.g. b.cond,
3076 prepare the full mnemonic name with the corresponding condition
3077 suffix. */
3078 char name[8];
3079
3080 remove_dot_suffix (name, inst);
3081 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3082 }
3083 else
3084 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3085 }
3086
3087 /* Decide whether we need to print a comment after the operands of
3088 instruction INST. */
3089
3090 static void
3091 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3092 {
3093 if (inst->opcode->flags & F_COND)
3094 {
3095 char name[8];
3096 unsigned int i, num_conds;
3097
3098 remove_dot_suffix (name, inst);
3099 num_conds = ARRAY_SIZE (inst->cond->names);
3100 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3101 (*info->fprintf_func) (info->stream, "%s %s.%s",
3102 i == 1 ? " //" : ",",
3103 name, inst->cond->names[i]);
3104 }
3105 }
3106
3107 /* Build notes from verifiers into a string for printing. */
3108
3109 static void
3110 print_verifier_notes (aarch64_operand_error *detail,
3111 struct disassemble_info *info)
3112 {
3113 if (no_notes)
3114 return;
3115
3116 /* The output of the verifier cannot be a fatal error, otherwise the assembly
3117 would not have succeeded. We can safely ignore these. */
3118 assert (detail->non_fatal);
3119 assert (detail->error);
3120
3121 /* If there are multiple verifier messages, concat them up to 1k. */
3122 (*info->fprintf_func) (info->stream, " // note: %s", detail->error);
3123 if (detail->index >= 0)
3124 (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3125 }
3126
3127 /* Print the instruction according to *INST. */
3128
3129 static void
3130 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3131 const aarch64_insn code,
3132 struct disassemble_info *info,
3133 aarch64_operand_error *mismatch_details)
3134 {
3135 bfd_boolean has_notes = FALSE;
3136
3137 print_mnemonic_name (inst, info);
3138 print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3139 print_comment (inst, info);
3140
3141 /* We've already printed a note, not enough space to print more so exit.
3142 Usually notes shouldn't overlap so it shouldn't happen that we have a note
3143 from a register and instruction at the same time. */
3144 if (has_notes)
3145 return;
3146
3147 /* Always run constraint verifiers, this is needed because constraints need to
3148 maintain a global state regardless of whether the instruction has the flag
3149 set or not. */
3150 enum err_type result = verify_constraints (inst, code, pc, FALSE,
3151 mismatch_details, &insn_sequence);
3152 switch (result)
3153 {
3154 case ERR_UND:
3155 case ERR_UNP:
3156 case ERR_NYI:
3157 assert (0);
3158 case ERR_VFI:
3159 print_verifier_notes (mismatch_details, info);
3160 break;
3161 default:
3162 break;
3163 }
3164 }
3165
3166 /* Entry-point of the instruction disassembler and printer. */
3167
3168 static void
3169 print_insn_aarch64_word (bfd_vma pc,
3170 uint32_t word,
3171 struct disassemble_info *info,
3172 aarch64_operand_error *errors)
3173 {
3174 static const char *err_msg[ERR_NR_ENTRIES+1] =
3175 {
3176 [ERR_OK] = "_",
3177 [ERR_UND] = "undefined",
3178 [ERR_UNP] = "unpredictable",
3179 [ERR_NYI] = "NYI"
3180 };
3181
3182 enum err_type ret;
3183 aarch64_inst inst;
3184
3185 info->insn_info_valid = 1;
3186 info->branch_delay_insns = 0;
3187 info->data_size = 0;
3188 info->target = 0;
3189 info->target2 = 0;
3190
3191 if (info->flags & INSN_HAS_RELOC)
3192 /* If the instruction has a reloc associated with it, then
3193 the offset field in the instruction will actually be the
3194 addend for the reloc. (If we are using REL type relocs).
3195 In such cases, we can ignore the pc when computing
3196 addresses, since the addend is not currently pc-relative. */
3197 pc = 0;
3198
3199 ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3200
3201 if (((word >> 21) & 0x3ff) == 1)
3202 {
3203 /* RESERVED for ALES. */
3204 assert (ret != ERR_OK);
3205 ret = ERR_NYI;
3206 }
3207
3208 switch (ret)
3209 {
3210 case ERR_UND:
3211 case ERR_UNP:
3212 case ERR_NYI:
3213 /* Handle undefined instructions. */
3214 info->insn_type = dis_noninsn;
3215 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3216 word, err_msg[ret]);
3217 break;
3218 case ERR_OK:
3219 user_friendly_fixup (&inst);
3220 print_aarch64_insn (pc, &inst, word, info, errors);
3221 break;
3222 default:
3223 abort ();
3224 }
3225 }
3226
3227 /* Disallow mapping symbols ($x, $d etc) from
3228 being displayed in symbol relative addresses. */
3229
3230 bfd_boolean
3231 aarch64_symbol_is_valid (asymbol * sym,
3232 struct disassemble_info * info ATTRIBUTE_UNUSED)
3233 {
3234 const char * name;
3235
3236 if (sym == NULL)
3237 return FALSE;
3238
3239 name = bfd_asymbol_name (sym);
3240
3241 return name
3242 && (name[0] != '$'
3243 || (name[1] != 'x' && name[1] != 'd')
3244 || (name[2] != '\0' && name[2] != '.'));
3245 }
3246
3247 /* Print data bytes on INFO->STREAM. */
3248
3249 static void
3250 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3251 uint32_t word,
3252 struct disassemble_info *info,
3253 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3254 {
3255 switch (info->bytes_per_chunk)
3256 {
3257 case 1:
3258 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3259 break;
3260 case 2:
3261 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3262 break;
3263 case 4:
3264 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3265 break;
3266 default:
3267 abort ();
3268 }
3269 }
3270
3271 /* Try to infer the code or data type from a symbol.
3272 Returns nonzero if *MAP_TYPE was set. */
3273
3274 static int
3275 get_sym_code_type (struct disassemble_info *info, int n,
3276 enum map_type *map_type)
3277 {
3278 elf_symbol_type *es;
3279 unsigned int type;
3280 const char *name;
3281
3282 /* If the symbol is in a different section, ignore it. */
3283 if (info->section != NULL && info->section != info->symtab[n]->section)
3284 return FALSE;
3285
3286 es = *(elf_symbol_type **)(info->symtab + n);
3287 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3288
3289 /* If the symbol has function type then use that. */
3290 if (type == STT_FUNC)
3291 {
3292 *map_type = MAP_INSN;
3293 return TRUE;
3294 }
3295
3296 /* Check for mapping symbols. */
3297 name = bfd_asymbol_name(info->symtab[n]);
3298 if (name[0] == '$'
3299 && (name[1] == 'x' || name[1] == 'd')
3300 && (name[2] == '\0' || name[2] == '.'))
3301 {
3302 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3303 return TRUE;
3304 }
3305
3306 return FALSE;
3307 }
3308
3309 /* Entry-point of the AArch64 disassembler. */
3310
3311 int
3312 print_insn_aarch64 (bfd_vma pc,
3313 struct disassemble_info *info)
3314 {
3315 bfd_byte buffer[INSNLEN];
3316 int status;
3317 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3318 aarch64_operand_error *);
3319 bfd_boolean found = FALSE;
3320 unsigned int size = 4;
3321 unsigned long data;
3322 aarch64_operand_error errors;
3323
3324 if (info->disassembler_options)
3325 {
3326 set_default_aarch64_dis_options (info);
3327
3328 parse_aarch64_dis_options (info->disassembler_options);
3329
3330 /* To avoid repeated parsing of these options, we remove them here. */
3331 info->disassembler_options = NULL;
3332 }
3333
3334 /* Aarch64 instructions are always little-endian */
3335 info->endian_code = BFD_ENDIAN_LITTLE;
3336
3337 /* First check the full symtab for a mapping symbol, even if there
3338 are no usable non-mapping symbols for this address. */
3339 if (info->symtab_size != 0
3340 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3341 {
3342 enum map_type type = MAP_INSN;
3343 int last_sym = -1;
3344 bfd_vma addr;
3345 int n;
3346
3347 if (pc <= last_mapping_addr)
3348 last_mapping_sym = -1;
3349
3350 /* Start scanning at the start of the function, or wherever
3351 we finished last time. */
3352 n = info->symtab_pos + 1;
3353 if (n < last_mapping_sym)
3354 n = last_mapping_sym;
3355
3356 /* Scan up to the location being disassembled. */
3357 for (; n < info->symtab_size; n++)
3358 {
3359 addr = bfd_asymbol_value (info->symtab[n]);
3360 if (addr > pc)
3361 break;
3362 if (get_sym_code_type (info, n, &type))
3363 {
3364 last_sym = n;
3365 found = TRUE;
3366 }
3367 }
3368
3369 if (!found)
3370 {
3371 n = info->symtab_pos;
3372 if (n < last_mapping_sym)
3373 n = last_mapping_sym;
3374
3375 /* No mapping symbol found at this address. Look backwards
3376 for a preceeding one. */
3377 for (; n >= 0; n--)
3378 {
3379 if (get_sym_code_type (info, n, &type))
3380 {
3381 last_sym = n;
3382 found = TRUE;
3383 break;
3384 }
3385 }
3386 }
3387
3388 last_mapping_sym = last_sym;
3389 last_type = type;
3390
3391 /* Look a little bit ahead to see if we should print out
3392 less than four bytes of data. If there's a symbol,
3393 mapping or otherwise, after two bytes then don't
3394 print more. */
3395 if (last_type == MAP_DATA)
3396 {
3397 size = 4 - (pc & 3);
3398 for (n = last_sym + 1; n < info->symtab_size; n++)
3399 {
3400 addr = bfd_asymbol_value (info->symtab[n]);
3401 if (addr > pc)
3402 {
3403 if (addr - pc < size)
3404 size = addr - pc;
3405 break;
3406 }
3407 }
3408 /* If the next symbol is after three bytes, we need to
3409 print only part of the data, so that we can use either
3410 .byte or .short. */
3411 if (size == 3)
3412 size = (pc & 1) ? 1 : 2;
3413 }
3414 }
3415
3416 if (last_type == MAP_DATA)
3417 {
3418 /* size was set above. */
3419 info->bytes_per_chunk = size;
3420 info->display_endian = info->endian;
3421 printer = print_insn_data;
3422 }
3423 else
3424 {
3425 info->bytes_per_chunk = size = INSNLEN;
3426 info->display_endian = info->endian_code;
3427 printer = print_insn_aarch64_word;
3428 }
3429
3430 status = (*info->read_memory_func) (pc, buffer, size, info);
3431 if (status != 0)
3432 {
3433 (*info->memory_error_func) (status, pc, info);
3434 return -1;
3435 }
3436
3437 data = bfd_get_bits (buffer, size * 8,
3438 info->display_endian == BFD_ENDIAN_BIG);
3439
3440 (*printer) (pc, data, info, &errors);
3441
3442 return size;
3443 }
3444 \f
3445 void
3446 print_aarch64_disassembler_options (FILE *stream)
3447 {
3448 fprintf (stream, _("\n\
3449 The following AARCH64 specific disassembler options are supported for use\n\
3450 with the -M switch (multiple options should be separated by commas):\n"));
3451
3452 fprintf (stream, _("\n\
3453 no-aliases Don't print instruction aliases.\n"));
3454
3455 fprintf (stream, _("\n\
3456 aliases Do print instruction aliases.\n"));
3457
3458 fprintf (stream, _("\n\
3459 no-notes Don't print instruction notes.\n"));
3460
3461 fprintf (stream, _("\n\
3462 notes Do print instruction notes.\n"));
3463
3464 #ifdef DEBUG_AARCH64
3465 fprintf (stream, _("\n\
3466 debug_dump Temp switch for debug trace.\n"));
3467 #endif /* DEBUG_AARCH64 */
3468
3469 fprintf (stream, _("\n"));
3470 }
This page took 0.103856 seconds and 4 git commands to generate.