1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type
;
44 static int last_mapping_sym
= -1;
45 static bfd_vma last_mapping_addr
= 0;
48 static int no_aliases
= 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info
*info ATTRIBUTE_UNUSED
)
57 parse_aarch64_dis_option (const char *option
, unsigned int len ATTRIBUTE_UNUSED
)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option
, "no-aliases"))
66 if (CONST_STRNEQ (option
, "aliases"))
73 if (CONST_STRNEQ (option
, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr
, _("Unrecognised disassembler option: %s\n"), option
);
85 parse_aarch64_dis_options (const char *options
)
87 const char *option_end
;
92 while (*options
!= '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end
= options
+ 1;
103 while (*option_end
!= ',' && *option_end
!= '\0')
106 parse_aarch64_dis_option (options
, option_end
- options
);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options
= option_end
;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code
, aarch64_insn mask
, ...)
130 const aarch64_field
*field
;
131 enum aarch64_field_kind kind
;
135 num
= va_arg (va
, uint32_t);
137 aarch64_insn value
= 0x0;
140 kind
= va_arg (va
, enum aarch64_field_kind
);
141 field
= &fields
[kind
];
142 value
<<= field
->width
;
143 value
|= extract_field (kind
, code
, mask
);
148 /* Sign-extend bit I of VALUE. */
149 static inline int32_t
150 sign_extend (aarch64_insn value
, unsigned i
)
152 uint32_t ret
= value
;
155 if ((value
>> i
) & 0x1)
157 uint32_t val
= (uint32_t)(-1) << i
;
160 return (int32_t) ret
;
163 /* N.B. the following inline helpfer functions create a dependency on the
164 order of operand qualifier enumerators. */
166 /* Given VALUE, return qualifier for a general purpose register. */
167 static inline enum aarch64_opnd_qualifier
168 get_greg_qualifier_from_value (aarch64_insn value
)
170 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_W
+ value
;
172 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
176 /* Given VALUE, return qualifier for a vector register. */
177 static inline enum aarch64_opnd_qualifier
178 get_vreg_qualifier_from_value (aarch64_insn value
)
180 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_V_8B
+ value
;
183 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
188 static inline enum aarch64_opnd_qualifier
189 get_sreg_qualifier_from_value (aarch64_insn value
)
191 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_S_B
+ value
;
194 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
198 /* Given the instruction in *INST which is probably half way through the
199 decoding and our caller wants to know the expected qualifier for operand
200 I. Return such a qualifier if we can establish it; otherwise return
201 AARCH64_OPND_QLF_NIL. */
203 static aarch64_opnd_qualifier_t
204 get_expected_qualifier (const aarch64_inst
*inst
, int i
)
206 aarch64_opnd_qualifier_seq_t qualifiers
;
207 /* Should not be called if the qualifier is known. */
208 assert (inst
->operands
[i
].qualifier
== AARCH64_OPND_QLF_NIL
);
209 if (aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
,
211 return qualifiers
[i
];
213 return AARCH64_OPND_QLF_NIL
;
216 /* Operand extractors. */
219 aarch64_ext_regno (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
220 const aarch64_insn code
,
221 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
223 info
->reg
.regno
= extract_field (self
->fields
[0], code
, 0);
227 /* e.g. IC <ic_op>{, <Xt>}. */
229 aarch64_ext_regrt_sysins (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
230 const aarch64_insn code
,
231 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
233 info
->reg
.regno
= extract_field (self
->fields
[0], code
, 0);
234 assert (info
->idx
== 1
235 && (aarch64_get_operand_class (inst
->operands
[0].type
)
236 == AARCH64_OPND_CLASS_SYSTEM
));
237 /* This will make the constraint checking happy and more importantly will
238 help the disassembler determine whether this operand is optional or
240 info
->present
= inst
->operands
[0].sysins_op
->has_xt
;
245 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
247 aarch64_ext_reglane (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
248 const aarch64_insn code
,
249 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
252 info
->reglane
.regno
= extract_field (self
->fields
[0], code
,
255 /* Index and/or type. */
256 if (inst
->opcode
->iclass
== asisdone
257 || inst
->opcode
->iclass
== asimdins
)
259 if (info
->type
== AARCH64_OPND_En
260 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
263 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
264 assert (info
->idx
== 1); /* Vn */
265 aarch64_insn value
= extract_field (FLD_imm4
, code
, 0);
266 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
267 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
268 shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
269 info
->reglane
.index
= value
>> shift
;
273 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
281 aarch64_insn value
= extract_field (FLD_imm5
, code
, 0);
282 while (++pos
<= 3 && (value
& 0x1) == 0)
286 info
->qualifier
= get_sreg_qualifier_from_value (pos
);
287 info
->reglane
.index
= (unsigned) (value
>> 1);
292 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
293 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
295 /* Need information in other operand(s) to help decoding. */
296 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
297 switch (info
->qualifier
)
299 case AARCH64_OPND_QLF_S_H
:
301 info
->reglane
.index
= extract_fields (code
, 0, 3, FLD_H
, FLD_L
,
303 info
->reglane
.regno
&= 0xf;
305 case AARCH64_OPND_QLF_S_S
:
307 info
->reglane
.index
= extract_fields (code
, 0, 2, FLD_H
, FLD_L
);
309 case AARCH64_OPND_QLF_S_D
:
311 info
->reglane
.index
= extract_field (FLD_H
, code
, 0);
322 aarch64_ext_reglist (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
323 const aarch64_insn code
,
324 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
327 info
->reglist
.first_regno
= extract_field (self
->fields
[0], code
, 0);
329 info
->reglist
.num_regs
= extract_field (FLD_len
, code
, 0) + 1;
333 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
335 aarch64_ext_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
336 aarch64_opnd_info
*info
, const aarch64_insn code
,
337 const aarch64_inst
*inst
)
340 /* Number of elements in each structure to be loaded/stored. */
341 unsigned expected_num
= get_opcode_dependent_value (inst
->opcode
);
345 unsigned is_reserved
;
347 unsigned num_elements
;
363 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
365 value
= extract_field (FLD_opcode
, code
, 0);
366 if (expected_num
!= data
[value
].num_elements
|| data
[value
].is_reserved
)
368 info
->reglist
.num_regs
= data
[value
].num_regs
;
373 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
374 lanes instructions. */
376 aarch64_ext_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
377 aarch64_opnd_info
*info
, const aarch64_insn code
,
378 const aarch64_inst
*inst
)
383 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
385 value
= extract_field (FLD_S
, code
, 0);
387 /* Number of registers is equal to the number of elements in
388 each structure to be loaded/stored. */
389 info
->reglist
.num_regs
= get_opcode_dependent_value (inst
->opcode
);
390 assert (info
->reglist
.num_regs
>= 1 && info
->reglist
.num_regs
<= 4);
392 /* Except when it is LD1R. */
393 if (info
->reglist
.num_regs
== 1 && value
== (aarch64_insn
) 1)
394 info
->reglist
.num_regs
= 2;
399 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
400 load/store single element instructions. */
402 aarch64_ext_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
403 aarch64_opnd_info
*info
, const aarch64_insn code
,
404 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
406 aarch64_field field
= {0, 0};
407 aarch64_insn QSsize
; /* fields Q:S:size. */
408 aarch64_insn opcodeh2
; /* opcode<2:1> */
411 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
413 /* Decode the index, opcode<2:1> and size. */
414 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
415 opcodeh2
= extract_field_2 (&field
, code
, 0);
416 QSsize
= extract_fields (code
, 0, 3, FLD_Q
, FLD_S
, FLD_vldst_size
);
420 info
->qualifier
= AARCH64_OPND_QLF_S_B
;
421 /* Index encoded in "Q:S:size". */
422 info
->reglist
.index
= QSsize
;
428 info
->qualifier
= AARCH64_OPND_QLF_S_H
;
429 /* Index encoded in "Q:S:size<1>". */
430 info
->reglist
.index
= QSsize
>> 1;
433 if ((QSsize
>> 1) & 0x1)
436 if ((QSsize
& 0x1) == 0)
438 info
->qualifier
= AARCH64_OPND_QLF_S_S
;
439 /* Index encoded in "Q:S". */
440 info
->reglist
.index
= QSsize
>> 2;
444 if (extract_field (FLD_S
, code
, 0))
447 info
->qualifier
= AARCH64_OPND_QLF_S_D
;
448 /* Index encoded in "Q". */
449 info
->reglist
.index
= QSsize
>> 3;
456 info
->reglist
.has_index
= 1;
457 info
->reglist
.num_regs
= 0;
458 /* Number of registers is equal to the number of elements in
459 each structure to be loaded/stored. */
460 info
->reglist
.num_regs
= get_opcode_dependent_value (inst
->opcode
);
461 assert (info
->reglist
.num_regs
>= 1 && info
->reglist
.num_regs
<= 4);
466 /* Decode fields immh:immb and/or Q for e.g.
467 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
468 or SSHR <V><d>, <V><n>, #<shift>. */
471 aarch64_ext_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
472 aarch64_opnd_info
*info
, const aarch64_insn code
,
473 const aarch64_inst
*inst
)
476 aarch64_insn Q
, imm
, immh
;
477 enum aarch64_insn_class iclass
= inst
->opcode
->iclass
;
479 immh
= extract_field (FLD_immh
, code
, 0);
482 imm
= extract_fields (code
, 0, 2, FLD_immh
, FLD_immb
);
484 /* Get highest set bit in immh. */
485 while (--pos
>= 0 && (immh
& 0x8) == 0)
488 assert ((iclass
== asimdshf
|| iclass
== asisdshf
)
489 && (info
->type
== AARCH64_OPND_IMM_VLSR
490 || info
->type
== AARCH64_OPND_IMM_VLSL
));
492 if (iclass
== asimdshf
)
494 Q
= extract_field (FLD_Q
, code
, 0);
496 0000 x SEE AdvSIMD modified immediate
506 get_vreg_qualifier_from_value ((pos
<< 1) | (int) Q
);
509 info
->qualifier
= get_sreg_qualifier_from_value (pos
);
511 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
513 0000 SEE AdvSIMD modified immediate
514 0001 (16-UInt(immh:immb))
515 001x (32-UInt(immh:immb))
516 01xx (64-UInt(immh:immb))
517 1xxx (128-UInt(immh:immb)) */
518 info
->imm
.value
= (16 << pos
) - imm
;
522 0000 SEE AdvSIMD modified immediate
523 0001 (UInt(immh:immb)-8)
524 001x (UInt(immh:immb)-16)
525 01xx (UInt(immh:immb)-32)
526 1xxx (UInt(immh:immb)-64) */
527 info
->imm
.value
= imm
- (8 << pos
);
532 /* Decode shift immediate for e.g. sshr (imm). */
534 aarch64_ext_shll_imm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
535 aarch64_opnd_info
*info
, const aarch64_insn code
,
536 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
540 val
= extract_field (FLD_size
, code
, 0);
543 case 0: imm
= 8; break;
544 case 1: imm
= 16; break;
545 case 2: imm
= 32; break;
548 info
->imm
.value
= imm
;
552 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
553 value in the field(s) will be extracted as unsigned immediate value. */
555 aarch64_ext_imm (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
556 const aarch64_insn code
,
557 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
560 /* Maximum of two fields to extract. */
561 assert (self
->fields
[2] == FLD_NIL
);
563 if (self
->fields
[1] == FLD_NIL
)
564 imm
= extract_field (self
->fields
[0], code
, 0);
566 /* e.g. TBZ b5:b40. */
567 imm
= extract_fields (code
, 0, 2, self
->fields
[0], self
->fields
[1]);
569 if (info
->type
== AARCH64_OPND_FPIMM
)
572 if (operand_need_sign_extension (self
))
573 imm
= sign_extend (imm
, get_operand_fields_width (self
) - 1);
575 if (operand_need_shift_by_two (self
))
578 if (info
->type
== AARCH64_OPND_ADDR_ADRP
)
581 info
->imm
.value
= imm
;
585 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
587 aarch64_ext_imm_half (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
588 const aarch64_insn code
,
589 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
591 aarch64_ext_imm (self
, info
, code
, inst
);
592 info
->shifter
.kind
= AARCH64_MOD_LSL
;
593 info
->shifter
.amount
= extract_field (FLD_hw
, code
, 0) << 4;
597 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
598 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
600 aarch64_ext_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
601 aarch64_opnd_info
*info
,
602 const aarch64_insn code
,
603 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
606 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
607 aarch64_field field
= {0, 0};
609 assert (info
->idx
== 1);
611 if (info
->type
== AARCH64_OPND_SIMD_FPIMM
)
614 /* a:b:c:d:e:f:g:h */
615 imm
= extract_fields (code
, 0, 2, FLD_abc
, FLD_defgh
);
616 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
618 /* Either MOVI <Dd>, #<imm>
619 or MOVI <Vd>.2D, #<imm>.
620 <imm> is a 64-bit immediate
621 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
622 encoded in "a:b:c:d:e:f:g:h". */
624 unsigned abcdefgh
= imm
;
625 for (imm
= 0ull, i
= 0; i
< 8; i
++)
626 if (((abcdefgh
>> i
) & 0x1) != 0)
627 imm
|= 0xffull
<< (8 * i
);
629 info
->imm
.value
= imm
;
632 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
633 switch (info
->qualifier
)
635 case AARCH64_OPND_QLF_NIL
:
637 info
->shifter
.kind
= AARCH64_MOD_NONE
;
639 case AARCH64_OPND_QLF_LSL
:
641 info
->shifter
.kind
= AARCH64_MOD_LSL
;
642 switch (aarch64_get_qualifier_esize (opnd0_qualifier
))
644 case 4: gen_sub_field (FLD_cmode
, 1, 2, &field
); break; /* per word */
645 case 2: gen_sub_field (FLD_cmode
, 1, 1, &field
); break; /* per half */
646 case 1: gen_sub_field (FLD_cmode
, 1, 0, &field
); break; /* per byte */
647 default: assert (0); return 0;
649 /* 00: 0; 01: 8; 10:16; 11:24. */
650 info
->shifter
.amount
= extract_field_2 (&field
, code
, 0) << 3;
652 case AARCH64_OPND_QLF_MSL
:
654 info
->shifter
.kind
= AARCH64_MOD_MSL
;
655 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
656 info
->shifter
.amount
= extract_field_2 (&field
, code
, 0) ? 16 : 8;
666 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
668 aarch64_ext_fbits (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
669 aarch64_opnd_info
*info
, const aarch64_insn code
,
670 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
672 info
->imm
.value
= 64- extract_field (FLD_scale
, code
, 0);
676 /* Decode arithmetic immediate for e.g.
677 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
679 aarch64_ext_aimm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
680 aarch64_opnd_info
*info
, const aarch64_insn code
,
681 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
685 info
->shifter
.kind
= AARCH64_MOD_LSL
;
687 value
= extract_field (FLD_shift
, code
, 0);
690 info
->shifter
.amount
= value
? 12 : 0;
691 /* imm12 (unsigned) */
692 info
->imm
.value
= extract_field (FLD_imm12
, code
, 0);
697 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
700 aarch64_ext_limm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
701 aarch64_opnd_info
*info
, const aarch64_insn code
,
702 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
710 value
= extract_fields (code
, 0, 3, FLD_N
, FLD_immr
, FLD_imms
);
711 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
712 || inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_X
);
713 sf
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
) != 4;
715 /* value is N:immr:imms. */
717 R
= (value
>> 6) & 0x3f;
718 N
= (value
>> 12) & 0x1;
720 if (sf
== 0 && N
== 1)
723 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
724 (in other words, right rotated by R), then replicated. */
728 mask
= 0xffffffffffffffffull
;
734 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size
= 32; break;
735 case 0x20 ... 0x2f: /* 10xxxx */ simd_size
= 16; S
&= 0xf; break;
736 case 0x30 ... 0x37: /* 110xxx */ simd_size
= 8; S
&= 0x7; break;
737 case 0x38 ... 0x3b: /* 1110xx */ simd_size
= 4; S
&= 0x3; break;
738 case 0x3c ... 0x3d: /* 11110x */ simd_size
= 2; S
&= 0x1; break;
741 mask
= (1ull << simd_size
) - 1;
742 /* Top bits are IGNORED. */
745 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
746 if (S
== simd_size
- 1)
748 /* S+1 consecutive bits to 1. */
749 /* NOTE: S can't be 63 due to detection above. */
750 imm
= (1ull << (S
+ 1)) - 1;
751 /* Rotate to the left by simd_size - R. */
753 imm
= ((imm
<< (simd_size
- R
)) & mask
) | (imm
>> R
);
754 /* Replicate the value according to SIMD size. */
757 case 2: imm
= (imm
<< 2) | imm
;
758 case 4: imm
= (imm
<< 4) | imm
;
759 case 8: imm
= (imm
<< 8) | imm
;
760 case 16: imm
= (imm
<< 16) | imm
;
761 case 32: imm
= (imm
<< 32) | imm
;
763 default: assert (0); return 0;
766 info
->imm
.value
= sf
? imm
: imm
& 0xffffffff;
771 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
772 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
774 aarch64_ext_ft (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
775 aarch64_opnd_info
*info
,
776 const aarch64_insn code
, const aarch64_inst
*inst
)
781 info
->reg
.regno
= extract_field (FLD_Rt
, code
, 0);
784 value
= extract_field (FLD_ldst_size
, code
, 0);
785 if (inst
->opcode
->iclass
== ldstpair_indexed
786 || inst
->opcode
->iclass
== ldstnapair_offs
787 || inst
->opcode
->iclass
== ldstpair_off
788 || inst
->opcode
->iclass
== loadlit
)
790 enum aarch64_opnd_qualifier qualifier
;
793 case 0: qualifier
= AARCH64_OPND_QLF_S_S
; break;
794 case 1: qualifier
= AARCH64_OPND_QLF_S_D
; break;
795 case 2: qualifier
= AARCH64_OPND_QLF_S_Q
; break;
798 info
->qualifier
= qualifier
;
803 value
= extract_fields (code
, 0, 2, FLD_opc1
, FLD_ldst_size
);
806 info
->qualifier
= get_sreg_qualifier_from_value (value
);
812 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
814 aarch64_ext_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
815 aarch64_opnd_info
*info
,
817 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
820 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
824 /* Decode the address operand for e.g.
825 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
827 aarch64_ext_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
828 aarch64_opnd_info
*info
,
829 aarch64_insn code
, const aarch64_inst
*inst
)
831 aarch64_insn S
, value
;
834 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
836 info
->addr
.offset
.regno
= extract_field (FLD_Rm
, code
, 0);
838 value
= extract_field (FLD_option
, code
, 0);
840 aarch64_get_operand_modifier_from_value (value
, TRUE
/* extend_p */);
841 /* Fix-up the shifter kind; although the table-driven approach is
842 efficient, it is slightly inflexible, thus needing this fix-up. */
843 if (info
->shifter
.kind
== AARCH64_MOD_UXTX
)
844 info
->shifter
.kind
= AARCH64_MOD_LSL
;
846 S
= extract_field (FLD_S
, code
, 0);
849 info
->shifter
.amount
= 0;
850 info
->shifter
.amount_present
= 0;
855 /* Need information in other operand(s) to help achieve the decoding
857 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
858 /* Get the size of the data element that is accessed, which may be
859 different from that of the source register size, e.g. in strb/ldrb. */
860 size
= aarch64_get_qualifier_esize (info
->qualifier
);
861 info
->shifter
.amount
= get_logsz (size
);
862 info
->shifter
.amount_present
= 1;
868 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
870 aarch64_ext_addr_simm (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
871 aarch64_insn code
, const aarch64_inst
*inst
)
874 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
877 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
878 /* simm (imm9 or imm7) */
879 imm
= extract_field (self
->fields
[0], code
, 0);
880 info
->addr
.offset
.imm
= sign_extend (imm
, fields
[self
->fields
[0]].width
- 1);
881 if (self
->fields
[0] == FLD_imm7
)
882 /* scaled immediate in ld/st pair instructions. */
883 info
->addr
.offset
.imm
*= aarch64_get_qualifier_esize (info
->qualifier
);
885 if (inst
->opcode
->iclass
== ldst_unscaled
886 || inst
->opcode
->iclass
== ldstnapair_offs
887 || inst
->opcode
->iclass
== ldstpair_off
888 || inst
->opcode
->iclass
== ldst_unpriv
)
889 info
->addr
.writeback
= 0;
892 /* pre/post- index */
893 info
->addr
.writeback
= 1;
894 if (extract_field (self
->fields
[1], code
, 0) == 1)
895 info
->addr
.preind
= 1;
897 info
->addr
.postind
= 1;
903 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
905 aarch64_ext_addr_uimm12 (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
907 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
910 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
911 shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
913 info
->addr
.base_regno
= extract_field (self
->fields
[0], code
, 0);
915 info
->addr
.offset
.imm
= extract_field (self
->fields
[1], code
, 0) << shift
;
919 /* Decode the address operand for e.g.
920 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
922 aarch64_ext_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
923 aarch64_opnd_info
*info
,
924 aarch64_insn code
, const aarch64_inst
*inst
)
926 /* The opcode dependent area stores the number of elements in
927 each structure to be loaded/stored. */
928 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
931 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
933 info
->addr
.offset
.regno
= extract_field (FLD_Rm
, code
, 0);
934 if (info
->addr
.offset
.regno
== 31)
936 if (inst
->opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
937 /* Special handling of loading single structure to all lane. */
938 info
->addr
.offset
.imm
= (is_ld1r
? 1
939 : inst
->operands
[0].reglist
.num_regs
)
940 * aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
942 info
->addr
.offset
.imm
= inst
->operands
[0].reglist
.num_regs
943 * aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
)
944 * aarch64_get_qualifier_nelem (inst
->operands
[0].qualifier
);
947 info
->addr
.offset
.is_reg
= 1;
948 info
->addr
.writeback
= 1;
953 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
955 aarch64_ext_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
956 aarch64_opnd_info
*info
,
957 aarch64_insn code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
961 value
= extract_field (FLD_cond
, code
, 0);
962 info
->cond
= get_cond_from_value (value
);
966 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
968 aarch64_ext_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
969 aarch64_opnd_info
*info
,
971 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
973 /* op0:op1:CRn:CRm:op2 */
974 info
->sysreg
= extract_fields (code
, 0, 5, FLD_op0
, FLD_op1
, FLD_CRn
,
979 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
981 aarch64_ext_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
982 aarch64_opnd_info
*info
, aarch64_insn code
,
983 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
987 info
->pstatefield
= extract_fields (code
, 0, 2, FLD_op1
, FLD_op2
);
988 for (i
= 0; aarch64_pstatefields
[i
].name
!= NULL
; ++i
)
989 if (aarch64_pstatefields
[i
].value
== (aarch64_insn
)info
->pstatefield
)
991 /* Reserved value in <pstatefield>. */
995 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
997 aarch64_ext_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
998 aarch64_opnd_info
*info
,
1000 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1004 const aarch64_sys_ins_reg
*sysins_ops
;
1005 /* op0:op1:CRn:CRm:op2 */
1006 value
= extract_fields (code
, 0, 5,
1007 FLD_op0
, FLD_op1
, FLD_CRn
,
1012 case AARCH64_OPND_SYSREG_AT
: sysins_ops
= aarch64_sys_regs_at
; break;
1013 case AARCH64_OPND_SYSREG_DC
: sysins_ops
= aarch64_sys_regs_dc
; break;
1014 case AARCH64_OPND_SYSREG_IC
: sysins_ops
= aarch64_sys_regs_ic
; break;
1015 case AARCH64_OPND_SYSREG_TLBI
: sysins_ops
= aarch64_sys_regs_tlbi
; break;
1016 default: assert (0); return 0;
1019 for (i
= 0; sysins_ops
[i
].template != NULL
; ++i
)
1020 if (sysins_ops
[i
].value
== value
)
1022 info
->sysins_op
= sysins_ops
+ i
;
1023 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1024 info
->sysins_op
->template,
1025 (unsigned)info
->sysins_op
->value
,
1026 info
->sysins_op
->has_xt
, i
);
1033 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1036 aarch64_ext_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1037 aarch64_opnd_info
*info
,
1039 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1042 info
->barrier
= aarch64_barrier_options
+ extract_field (FLD_CRm
, code
, 0);
1046 /* Decode the prefetch operation option operand for e.g.
1047 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1050 aarch64_ext_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1051 aarch64_opnd_info
*info
,
1052 aarch64_insn code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1055 info
->prfop
= aarch64_prfops
+ extract_field (FLD_Rt
, code
, 0);
1059 /* Decode the extended register operand for e.g.
1060 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1062 aarch64_ext_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1063 aarch64_opnd_info
*info
,
1065 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1070 info
->reg
.regno
= extract_field (FLD_Rm
, code
, 0);
1072 value
= extract_field (FLD_option
, code
, 0);
1073 info
->shifter
.kind
=
1074 aarch64_get_operand_modifier_from_value (value
, TRUE
/* extend_p */);
1076 info
->shifter
.amount
= extract_field (FLD_imm3
, code
, 0);
1078 /* This makes the constraint checking happy. */
1079 info
->shifter
.operator_present
= 1;
1081 /* Assume inst->operands[0].qualifier has been resolved. */
1082 assert (inst
->operands
[0].qualifier
!= AARCH64_OPND_QLF_NIL
);
1083 info
->qualifier
= AARCH64_OPND_QLF_W
;
1084 if (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_X
1085 && (info
->shifter
.kind
== AARCH64_MOD_UXTX
1086 || info
->shifter
.kind
== AARCH64_MOD_SXTX
))
1087 info
->qualifier
= AARCH64_OPND_QLF_X
;
1092 /* Decode the shifted register operand for e.g.
1093 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1095 aarch64_ext_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1096 aarch64_opnd_info
*info
,
1098 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1103 info
->reg
.regno
= extract_field (FLD_Rm
, code
, 0);
1105 value
= extract_field (FLD_shift
, code
, 0);
1106 info
->shifter
.kind
=
1107 aarch64_get_operand_modifier_from_value (value
, FALSE
/* extend_p */);
1108 if (info
->shifter
.kind
== AARCH64_MOD_ROR
1109 && inst
->opcode
->iclass
!= log_shift
)
1110 /* ROR is not available for the shifted register operand in arithmetic
1114 info
->shifter
.amount
= extract_field (FLD_imm6
, code
, 0);
1116 /* This makes the constraint checking happy. */
1117 info
->shifter
.operator_present
= 1;
1122 /* Bitfields that are commonly used to encode certain operands' information
1123 may be partially used as part of the base opcode in some instructions.
1124 For example, the bit 1 of the field 'size' in
1125 FCVTXN <Vb><d>, <Va><n>
1126 is actually part of the base opcode, while only size<0> is available
1127 for encoding the register type. Another example is the AdvSIMD
1128 instruction ORR (register), in which the field 'size' is also used for
1129 the base opcode, leaving only the field 'Q' available to encode the
1130 vector register arrangement specifier '8B' or '16B'.
1132 This function tries to deduce the qualifier from the value of partially
1133 constrained field(s). Given the VALUE of such a field or fields, the
1134 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1135 operand encoding), the function returns the matching qualifier or
1136 AARCH64_OPND_QLF_NIL if nothing matches.
1138 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1139 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1140 may end with AARCH64_OPND_QLF_NIL. */
1142 static enum aarch64_opnd_qualifier
1143 get_qualifier_from_partial_encoding (aarch64_insn value
,
1144 const enum aarch64_opnd_qualifier
* \
1149 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value
, (int)mask
);
1150 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1152 aarch64_insn standard_value
;
1153 if (candidates
[i
] == AARCH64_OPND_QLF_NIL
)
1155 standard_value
= aarch64_get_qualifier_standard_value (candidates
[i
]);
1156 if ((standard_value
& mask
) == (value
& mask
))
1157 return candidates
[i
];
1159 return AARCH64_OPND_QLF_NIL
;
1162 /* Given a list of qualifier sequences, return all possible valid qualifiers
1163 for operand IDX in QUALIFIERS.
1164 Assume QUALIFIERS is an array whose length is large enough. */
1167 get_operand_possible_qualifiers (int idx
,
1168 const aarch64_opnd_qualifier_seq_t
*list
,
1169 enum aarch64_opnd_qualifier
*qualifiers
)
1172 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1173 if ((qualifiers
[i
] = list
[i
][idx
]) == AARCH64_OPND_QLF_NIL
)
1177 /* Decode the size Q field for e.g. SHADD.
1178 We tag one operand with the qualifer according to the code;
1179 whether the qualifier is valid for this opcode or not, it is the
1180 duty of the semantic checking. */
1183 decode_sizeq (aarch64_inst
*inst
)
1186 enum aarch64_opnd_qualifier qualifier
;
1188 aarch64_insn value
, mask
;
1189 enum aarch64_field_kind fld_sz
;
1190 enum aarch64_opnd_qualifier candidates
[AARCH64_MAX_QLF_SEQ_NUM
];
1192 if (inst
->opcode
->iclass
== asisdlse
1193 || inst
->opcode
->iclass
== asisdlsep
1194 || inst
->opcode
->iclass
== asisdlso
1195 || inst
->opcode
->iclass
== asisdlsop
)
1196 fld_sz
= FLD_vldst_size
;
1201 value
= extract_fields (code
, inst
->opcode
->mask
, 2, fld_sz
, FLD_Q
);
1202 /* Obtain the info that which bits of fields Q and size are actually
1203 available for operand encoding. Opcodes like FMAXNM and FMLA have
1204 size[1] unavailable. */
1205 mask
= extract_fields (~inst
->opcode
->mask
, 0, 2, fld_sz
, FLD_Q
);
1207 /* The index of the operand we are going to tag a qualifier and the qualifer
1208 itself are reasoned from the value of the size and Q fields and the
1209 possible valid qualifier lists. */
1210 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1211 DEBUG_TRACE ("key idx: %d", idx
);
1213 /* For most related instruciton, size:Q are fully available for operand
1217 inst
->operands
[idx
].qualifier
= get_vreg_qualifier_from_value (value
);
1221 get_operand_possible_qualifiers (idx
, inst
->opcode
->qualifiers_list
,
1223 #ifdef DEBUG_AARCH64
1227 for (i
= 0; candidates
[i
] != AARCH64_OPND_QLF_NIL
1228 && i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1229 DEBUG_TRACE ("qualifier %d: %s", i
,
1230 aarch64_get_qualifier_name(candidates
[i
]));
1231 DEBUG_TRACE ("%d, %d", (int)value
, (int)mask
);
1233 #endif /* DEBUG_AARCH64 */
1235 qualifier
= get_qualifier_from_partial_encoding (value
, candidates
, mask
);
1237 if (qualifier
== AARCH64_OPND_QLF_NIL
)
1240 inst
->operands
[idx
].qualifier
= qualifier
;
1244 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1245 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1248 decode_asimd_fcvt (aarch64_inst
*inst
)
1250 aarch64_field field
= {0, 0};
1252 enum aarch64_opnd_qualifier qualifier
;
1254 gen_sub_field (FLD_size
, 0, 1, &field
);
1255 value
= extract_field_2 (&field
, inst
->value
, 0);
1256 qualifier
= value
== 0 ? AARCH64_OPND_QLF_V_4S
1257 : AARCH64_OPND_QLF_V_2D
;
1258 switch (inst
->opcode
->op
)
1262 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1263 inst
->operands
[1].qualifier
= qualifier
;
1267 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1268 inst
->operands
[0].qualifier
= qualifier
;
1278 /* Decode size[0], i.e. bit 22, for
1279 e.g. FCVTXN <Vb><d>, <Va><n>. */
1282 decode_asisd_fcvtxn (aarch64_inst
*inst
)
1284 aarch64_field field
= {0, 0};
1285 gen_sub_field (FLD_size
, 0, 1, &field
);
1286 if (!extract_field_2 (&field
, inst
->value
, 0))
1288 inst
->operands
[0].qualifier
= AARCH64_OPND_QLF_S_S
;
1292 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1294 decode_fcvt (aarch64_inst
*inst
)
1296 enum aarch64_opnd_qualifier qualifier
;
1298 const aarch64_field field
= {15, 2};
1301 value
= extract_field_2 (&field
, inst
->value
, 0);
1304 case 0: qualifier
= AARCH64_OPND_QLF_S_S
; break;
1305 case 1: qualifier
= AARCH64_OPND_QLF_S_D
; break;
1306 case 3: qualifier
= AARCH64_OPND_QLF_S_H
; break;
1309 inst
->operands
[0].qualifier
= qualifier
;
1314 /* Do miscellaneous decodings that are not common enough to be driven by
1318 do_misc_decoding (aarch64_inst
*inst
)
1320 switch (inst
->opcode
->op
)
1323 return decode_fcvt (inst
);
1328 return decode_asimd_fcvt (inst
);
1330 return decode_asisd_fcvtxn (inst
);
1336 /* Opcodes that have fields shared by multiple operands are usually flagged
1337 with flags. In this function, we detect such flags, decode the related
1338 field(s) and store the information in one of the related operands. The
1339 'one' operand is not any operand but one of the operands that can
1340 accommadate all the information that has been decoded. */
1343 do_special_decoding (aarch64_inst
*inst
)
1347 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1348 if (inst
->opcode
->flags
& F_COND
)
1350 value
= extract_field (FLD_cond2
, inst
->value
, 0);
1351 inst
->cond
= get_cond_from_value (value
);
1354 if (inst
->opcode
->flags
& F_SF
)
1356 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1357 value
= extract_field (FLD_sf
, inst
->value
, 0);
1358 inst
->operands
[idx
].qualifier
= get_greg_qualifier_from_value (value
);
1359 if ((inst
->opcode
->flags
& F_N
)
1360 && extract_field (FLD_N
, inst
->value
, 0) != value
)
1363 /* size:Q fields. */
1364 if (inst
->opcode
->flags
& F_SIZEQ
)
1365 return decode_sizeq (inst
);
1367 if (inst
->opcode
->flags
& F_FPTYPE
)
1369 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1370 value
= extract_field (FLD_type
, inst
->value
, 0);
1373 case 0: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_S
; break;
1374 case 1: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_D
; break;
1375 case 3: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_H
; break;
1380 if (inst
->opcode
->flags
& F_SSIZE
)
1382 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1383 of the base opcode. */
1385 enum aarch64_opnd_qualifier candidates
[AARCH64_MAX_QLF_SEQ_NUM
];
1386 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1387 value
= extract_field (FLD_size
, inst
->value
, inst
->opcode
->mask
);
1388 mask
= extract_field (FLD_size
, ~inst
->opcode
->mask
, 0);
1389 /* For most related instruciton, the 'size' field is fully available for
1390 operand encoding. */
1392 inst
->operands
[idx
].qualifier
= get_sreg_qualifier_from_value (value
);
1395 get_operand_possible_qualifiers (idx
, inst
->opcode
->qualifiers_list
,
1397 inst
->operands
[idx
].qualifier
1398 = get_qualifier_from_partial_encoding (value
, candidates
, mask
);
1402 if (inst
->opcode
->flags
& F_T
)
1404 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1407 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1408 == AARCH64_OPND_CLASS_SIMD_REG
);
1419 val
= extract_field (FLD_imm5
, inst
->value
, 0);
1420 while ((val
& 0x1) == 0 && ++num
<= 3)
1424 Q
= (unsigned) extract_field (FLD_Q
, inst
->value
, inst
->opcode
->mask
);
1425 inst
->operands
[0].qualifier
=
1426 get_vreg_qualifier_from_value ((num
<< 1) | Q
);
1429 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1431 /* Use Rt to encode in the case of e.g.
1432 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1433 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1436 /* Otherwise use the result operand, which has to be a integer
1438 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1439 == AARCH64_OPND_CLASS_INT_REG
);
1442 assert (idx
== 0 || idx
== 1);
1443 value
= extract_field (FLD_Q
, inst
->value
, 0);
1444 inst
->operands
[idx
].qualifier
= get_greg_qualifier_from_value (value
);
1447 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1449 aarch64_field field
= {0, 0};
1450 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1451 == AARCH64_OPND_CLASS_INT_REG
);
1452 gen_sub_field (FLD_opc
, 0, 1, &field
);
1453 value
= extract_field_2 (&field
, inst
->value
, 0);
1454 inst
->operands
[0].qualifier
1455 = value
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
;
1458 /* Miscellaneous decoding; done as the last step. */
1459 if (inst
->opcode
->flags
& F_MISC
)
1460 return do_misc_decoding (inst
);
1465 /* Converters converting a real opcode instruction to its alias form. */
1467 /* ROR <Wd>, <Ws>, #<shift>
1469 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1471 convert_extr_to_ror (aarch64_inst
*inst
)
1473 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
)
1475 copy_operand_info (inst
, 2, 3);
1476 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1482 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1484 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1486 convert_shll_to_xtl (aarch64_inst
*inst
)
1488 if (inst
->operands
[2].imm
.value
== 0)
1490 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1497 UBFM <Xd>, <Xn>, #<shift>, #63.
1499 LSR <Xd>, <Xn>, #<shift>. */
1501 convert_bfm_to_sr (aarch64_inst
*inst
)
1505 imms
= inst
->operands
[3].imm
.value
;
1506 val
= inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1509 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1516 /* Convert MOV to ORR. */
1518 convert_orr_to_mov (aarch64_inst
*inst
)
1520 /* MOV <Vd>.<T>, <Vn>.<T>
1522 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1523 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
)
1525 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1531 /* When <imms> >= <immr>, the instruction written:
1532 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1534 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1537 convert_bfm_to_bfx (aarch64_inst
*inst
)
1541 immr
= inst
->operands
[2].imm
.value
;
1542 imms
= inst
->operands
[3].imm
.value
;
1546 inst
->operands
[2].imm
.value
= lsb
;
1547 inst
->operands
[3].imm
.value
= imms
+ 1 - lsb
;
1548 /* The two opcodes have different qualifiers for
1549 the immediate operands; reset to help the checking. */
1550 reset_operand_qualifier (inst
, 2);
1551 reset_operand_qualifier (inst
, 3);
1558 /* When <imms> < <immr>, the instruction written:
1559 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1561 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1564 convert_bfm_to_bfi (aarch64_inst
*inst
)
1566 int64_t immr
, imms
, val
;
1568 immr
= inst
->operands
[2].imm
.value
;
1569 imms
= inst
->operands
[3].imm
.value
;
1570 val
= inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 32 : 64;
1573 inst
->operands
[2].imm
.value
= (val
- immr
) & (val
- 1);
1574 inst
->operands
[3].imm
.value
= imms
+ 1;
1575 /* The two opcodes have different qualifiers for
1576 the immediate operands; reset to help the checking. */
1577 reset_operand_qualifier (inst
, 2);
1578 reset_operand_qualifier (inst
, 3);
1585 /* The instruction written:
1586 LSL <Xd>, <Xn>, #<shift>
1588 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1591 convert_ubfm_to_lsl (aarch64_inst
*inst
)
1593 int64_t immr
= inst
->operands
[2].imm
.value
;
1594 int64_t imms
= inst
->operands
[3].imm
.value
;
1596 = inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1598 if ((immr
== 0 && imms
== val
) || immr
== imms
+ 1)
1600 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1601 inst
->operands
[2].imm
.value
= val
- imms
;
1608 /* CINC <Wd>, <Wn>, <cond>
1610 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1611 where <cond> is not AL or NV. */
1614 convert_from_csel (aarch64_inst
*inst
)
1616 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
1617 && (inst
->operands
[3].cond
->value
& 0xe) != 0xe)
1619 copy_operand_info (inst
, 2, 3);
1620 inst
->operands
[2].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1621 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1627 /* CSET <Wd>, <cond>
1629 CSINC <Wd>, WZR, WZR, invert(<cond>)
1630 where <cond> is not AL or NV. */
1633 convert_csinc_to_cset (aarch64_inst
*inst
)
1635 if (inst
->operands
[1].reg
.regno
== 0x1f
1636 && inst
->operands
[2].reg
.regno
== 0x1f
1637 && (inst
->operands
[3].cond
->value
& 0xe) != 0xe)
1639 copy_operand_info (inst
, 1, 3);
1640 inst
->operands
[1].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1641 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1642 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1650 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1652 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1653 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1654 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1655 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1656 machine-instruction mnemonic must be used. */
1659 convert_movewide_to_mov (aarch64_inst
*inst
)
1661 uint64_t value
= inst
->operands
[1].imm
.value
;
1662 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1663 if (value
== 0 && inst
->operands
[1].shifter
.amount
!= 0)
1665 inst
->operands
[1].type
= AARCH64_OPND_IMM_MOV
;
1666 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_NONE
;
1667 value
<<= inst
->operands
[1].shifter
.amount
;
1668 /* As an alias convertor, it has to be clear that the INST->OPCODE
1669 is the opcode of the real instruction. */
1670 if (inst
->opcode
->op
== OP_MOVN
)
1672 int is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1674 /* A MOVN has an immediate that could be encoded by MOVZ. */
1675 if (aarch64_wide_constant_p (value
, is32
, NULL
) == TRUE
)
1678 inst
->operands
[1].imm
.value
= value
;
1679 inst
->operands
[1].shifter
.amount
= 0;
1685 ORR <Wd>, WZR, #<imm>.
1687 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1688 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1689 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1690 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1691 machine-instruction mnemonic must be used. */
1694 convert_movebitmask_to_mov (aarch64_inst
*inst
)
1699 /* Should have been assured by the base opcode value. */
1700 assert (inst
->operands
[1].reg
.regno
== 0x1f);
1701 copy_operand_info (inst
, 1, 2);
1702 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1703 inst
->operands
[1].type
= AARCH64_OPND_IMM_MOV
;
1704 value
= inst
->operands
[1].imm
.value
;
1705 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1707 if (inst
->operands
[0].reg
.regno
!= 0x1f
1708 && (aarch64_wide_constant_p (value
, is32
, NULL
) == TRUE
1709 || aarch64_wide_constant_p (~value
, is32
, NULL
) == TRUE
))
1712 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1716 /* Some alias opcodes are disassembled by being converted from their real-form.
1717 N.B. INST->OPCODE is the real opcode rather than the alias. */
1720 convert_to_alias (aarch64_inst
*inst
, const aarch64_opcode
*alias
)
1726 return convert_bfm_to_sr (inst
);
1728 return convert_ubfm_to_lsl (inst
);
1732 return convert_from_csel (inst
);
1735 return convert_csinc_to_cset (inst
);
1739 return convert_bfm_to_bfx (inst
);
1743 return convert_bfm_to_bfi (inst
);
1745 return convert_orr_to_mov (inst
);
1746 case OP_MOV_IMM_WIDE
:
1747 case OP_MOV_IMM_WIDEN
:
1748 return convert_movewide_to_mov (inst
);
1749 case OP_MOV_IMM_LOG
:
1750 return convert_movebitmask_to_mov (inst
);
1752 return convert_extr_to_ror (inst
);
1757 return convert_shll_to_xtl (inst
);
1763 static int aarch64_opcode_decode (const aarch64_opcode
*, const aarch64_insn
,
1764 aarch64_inst
*, int);
1766 /* Given the instruction information in *INST, check if the instruction has
1767 any alias form that can be used to represent *INST. If the answer is yes,
1768 update *INST to be in the form of the determined alias. */
1770 /* In the opcode description table, the following flags are used in opcode
1771 entries to help establish the relations between the real and alias opcodes:
1773 F_ALIAS: opcode is an alias
1774 F_HAS_ALIAS: opcode has alias(es)
1777 F_P3: Disassembly preference priority 1-3 (the larger the
1778 higher). If nothing is specified, it is the priority
1779 0 by default, i.e. the lowest priority.
1781 Although the relation between the machine and the alias instructions are not
1782 explicitly described, it can be easily determined from the base opcode
1783 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1784 description entries:
1786 The mask of an alias opcode must be equal to or a super-set (i.e. more
1787 constrained) of that of the aliased opcode; so is the base opcode value.
1789 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1790 && (opcode->mask & real->mask) == real->mask
1791 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1792 then OPCODE is an alias of, and only of, the REAL instruction
1794 The alias relationship is forced flat-structured to keep related algorithm
1795 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1797 During the disassembling, the decoding decision tree (in
1798 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1799 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1800 not specified), the disassembler will check whether there is any alias
1801 instruction exists for this real instruction. If there is, the disassembler
1802 will try to disassemble the 32-bit binary again using the alias's rule, or
1803 try to convert the IR to the form of the alias. In the case of the multiple
1804 aliases, the aliases are tried one by one from the highest priority
1805 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1806 first succeeds first adopted.
1808 You may ask why there is a need for the conversion of IR from one form to
1809 another in handling certain aliases. This is because on one hand it avoids
1810 adding more operand code to handle unusual encoding/decoding; on other
1811 hand, during the disassembling, the conversion is an effective approach to
1812 check the condition of an alias (as an alias may be adopted only if certain
1813 conditions are met).
1815 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1816 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1817 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1820 determine_disassembling_preference (struct aarch64_inst
*inst
)
1822 const aarch64_opcode
*opcode
;
1823 const aarch64_opcode
*alias
;
1825 opcode
= inst
->opcode
;
1827 /* This opcode does not have an alias, so use itself. */
1828 if (opcode_has_alias (opcode
) == FALSE
)
1831 alias
= aarch64_find_alias_opcode (opcode
);
1834 #ifdef DEBUG_AARCH64
1837 const aarch64_opcode
*tmp
= alias
;
1838 printf ("#### LIST orderd: ");
1841 printf ("%s, ", tmp
->name
);
1842 tmp
= aarch64_find_next_alias_opcode (tmp
);
1846 #endif /* DEBUG_AARCH64 */
1848 for (; alias
; alias
= aarch64_find_next_alias_opcode (alias
))
1850 DEBUG_TRACE ("try %s", alias
->name
);
1851 assert (alias_opcode_p (alias
));
1853 /* An alias can be a pseudo opcode which will never be used in the
1854 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1856 if (pseudo_opcode_p (alias
))
1858 DEBUG_TRACE ("skip pseudo %s", alias
->name
);
1862 if ((inst
->value
& alias
->mask
) != alias
->opcode
)
1864 DEBUG_TRACE ("skip %s as base opcode not match", alias
->name
);
1867 /* No need to do any complicated transformation on operands, if the alias
1868 opcode does not have any operand. */
1869 if (aarch64_num_of_operands (alias
) == 0 && alias
->opcode
== inst
->value
)
1871 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias
->name
);
1872 aarch64_replace_opcode (inst
, alias
);
1875 if (alias
->flags
& F_CONV
)
1878 memcpy (©
, inst
, sizeof (aarch64_inst
));
1879 /* ALIAS is the preference as long as the instruction can be
1880 successfully converted to the form of ALIAS. */
1881 if (convert_to_alias (©
, alias
) == 1)
1883 aarch64_replace_opcode (©
, alias
);
1884 assert (aarch64_match_operands_constraint (©
, NULL
));
1885 DEBUG_TRACE ("succeed with %s via conversion", alias
->name
);
1886 memcpy (inst
, ©
, sizeof (aarch64_inst
));
1892 /* Directly decode the alias opcode. */
1894 memset (&temp
, '\0', sizeof (aarch64_inst
));
1895 if (aarch64_opcode_decode (alias
, inst
->value
, &temp
, 1) == 1)
1897 DEBUG_TRACE ("succeed with %s via direct decoding", alias
->name
);
1898 memcpy (inst
, &temp
, sizeof (aarch64_inst
));
1905 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1906 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1909 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1910 determined and used to disassemble CODE; this is done just before the
1914 aarch64_opcode_decode (const aarch64_opcode
*opcode
, const aarch64_insn code
,
1915 aarch64_inst
*inst
, int noaliases_p
)
1919 DEBUG_TRACE ("enter with %s", opcode
->name
);
1921 assert (opcode
&& inst
);
1923 /* Check the base opcode. */
1924 if ((code
& opcode
->mask
) != (opcode
->opcode
& opcode
->mask
))
1926 DEBUG_TRACE ("base opcode match FAIL");
1931 memset (inst
, '\0', sizeof (aarch64_inst
));
1933 inst
->opcode
= opcode
;
1936 /* Assign operand codes and indexes. */
1937 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1939 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1941 inst
->operands
[i
].type
= opcode
->operands
[i
];
1942 inst
->operands
[i
].idx
= i
;
1945 /* Call the opcode decoder indicated by flags. */
1946 if (opcode_has_special_coder (opcode
) && do_special_decoding (inst
) == 0)
1948 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1952 /* Call operand decoders. */
1953 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1955 const aarch64_operand
*opnd
;
1956 enum aarch64_opnd type
;
1957 type
= opcode
->operands
[i
];
1958 if (type
== AARCH64_OPND_NIL
)
1960 opnd
= &aarch64_operands
[type
];
1961 if (operand_has_extractor (opnd
)
1962 && (! aarch64_extract_operand (opnd
, &inst
->operands
[i
], code
, inst
)))
1964 DEBUG_TRACE ("operand decoder FAIL at operand %d", i
);
1969 /* Match the qualifiers. */
1970 if (aarch64_match_operands_constraint (inst
, NULL
) == 1)
1972 /* Arriving here, the CODE has been determined as a valid instruction
1973 of OPCODE and *INST has been filled with information of this OPCODE
1974 instruction. Before the return, check if the instruction has any
1975 alias and should be disassembled in the form of its alias instead.
1976 If the answer is yes, *INST will be updated. */
1978 determine_disassembling_preference (inst
);
1979 DEBUG_TRACE ("SUCCESS");
1984 DEBUG_TRACE ("constraint matching FAIL");
1991 /* This does some user-friendly fix-up to *INST. It is currently focus on
1992 the adjustment of qualifiers to help the printed instruction
1993 recognized/understood more easily. */
1996 user_friendly_fixup (aarch64_inst
*inst
)
1998 switch (inst
->opcode
->iclass
)
2001 /* TBNZ Xn|Wn, #uimm6, label
2002 Test and Branch Not Zero: conditionally jumps to label if bit number
2003 uimm6 in register Xn is not zero. The bit number implies the width of
2004 the register, which may be written and should be disassembled as Wn if
2005 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2007 if (inst
->operands
[1].imm
.value
< 32)
2008 inst
->operands
[0].qualifier
= AARCH64_OPND_QLF_W
;
2014 /* Decode INSN and fill in *INST the instruction information. */
2017 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED
, uint32_t insn
,
2020 const aarch64_opcode
*opcode
= aarch64_opcode_lookup (insn
);
2022 #ifdef DEBUG_AARCH64
2025 const aarch64_opcode
*tmp
= opcode
;
2027 DEBUG_TRACE ("opcode lookup:");
2030 aarch64_verbose (" %s", tmp
->name
);
2031 tmp
= aarch64_find_next_opcode (tmp
);
2034 #endif /* DEBUG_AARCH64 */
2036 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2037 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2038 opcode field and value, apart from the difference that one of them has an
2039 extra field as part of the opcode, but such a field is used for operand
2040 encoding in other opcode(s) ('immh' in the case of the example). */
2041 while (opcode
!= NULL
)
2043 /* But only one opcode can be decoded successfully for, as the
2044 decoding routine will check the constraint carefully. */
2045 if (aarch64_opcode_decode (opcode
, insn
, inst
, no_aliases
) == 1)
2047 opcode
= aarch64_find_next_opcode (opcode
);
2053 /* Print operands. */
2056 print_operands (bfd_vma pc
, const aarch64_opcode
*opcode
,
2057 const aarch64_opnd_info
*opnds
, struct disassemble_info
*info
)
2059 int i
, pcrel_p
, num_printed
;
2060 for (i
= 0, num_printed
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2062 const size_t size
= 128;
2064 /* We regard the opcode operand info more, however we also look into
2065 the inst->operands to support the disassembling of the optional
2067 The two operand code should be the same in all cases, apart from
2068 when the operand can be optional. */
2069 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
2070 || opnds
[i
].type
== AARCH64_OPND_NIL
)
2073 /* Generate the operand string in STR. */
2074 aarch64_print_operand (str
, size
, pc
, opcode
, opnds
, i
, &pcrel_p
,
2077 /* Print the delimiter (taking account of omitted operand(s)). */
2079 (*info
->fprintf_func
) (info
->stream
, "%s",
2080 num_printed
++ == 0 ? "\t" : ", ");
2082 /* Print the operand. */
2084 (*info
->print_address_func
) (info
->target
, info
);
2086 (*info
->fprintf_func
) (info
->stream
, "%s", str
);
2090 /* Print the instruction mnemonic name. */
2093 print_mnemonic_name (const aarch64_inst
*inst
, struct disassemble_info
*info
)
2095 if (inst
->opcode
->flags
& F_COND
)
2097 /* For instructions that are truly conditionally executed, e.g. b.cond,
2098 prepare the full mnemonic name with the corresponding condition
2103 ptr
= strchr (inst
->opcode
->name
, '.');
2104 assert (ptr
&& inst
->cond
);
2105 len
= ptr
- inst
->opcode
->name
;
2107 strncpy (name
, inst
->opcode
->name
, len
);
2109 (*info
->fprintf_func
) (info
->stream
, "%s.%s", name
, inst
->cond
->names
[0]);
2112 (*info
->fprintf_func
) (info
->stream
, "%s", inst
->opcode
->name
);
2115 /* Print the instruction according to *INST. */
2118 print_aarch64_insn (bfd_vma pc
, const aarch64_inst
*inst
,
2119 struct disassemble_info
*info
)
2121 print_mnemonic_name (inst
, info
);
2122 print_operands (pc
, inst
->opcode
, inst
->operands
, info
);
2125 /* Entry-point of the instruction disassembler and printer. */
2128 print_insn_aarch64_word (bfd_vma pc
,
2130 struct disassemble_info
*info
)
2132 static const char *err_msg
[6] =
2135 [-ERR_UND
] = "undefined",
2136 [-ERR_UNP
] = "unpredictable",
2143 info
->insn_info_valid
= 1;
2144 info
->branch_delay_insns
= 0;
2145 info
->data_size
= 0;
2149 if (info
->flags
& INSN_HAS_RELOC
)
2150 /* If the instruction has a reloc associated with it, then
2151 the offset field in the instruction will actually be the
2152 addend for the reloc. (If we are using REL type relocs).
2153 In such cases, we can ignore the pc when computing
2154 addresses, since the addend is not currently pc-relative. */
2157 ret
= disas_aarch64_insn (pc
, word
, &inst
);
2159 if (((word
>> 21) & 0x3ff) == 1)
2161 /* RESERVED for ALES. */
2162 assert (ret
!= ERR_OK
);
2171 /* Handle undefined instructions. */
2172 info
->insn_type
= dis_noninsn
;
2173 (*info
->fprintf_func
) (info
->stream
,".inst\t0x%08x ; %s",
2174 word
, err_msg
[-ret
]);
2177 user_friendly_fixup (&inst
);
2178 print_aarch64_insn (pc
, &inst
, info
);
2185 /* Disallow mapping symbols ($x, $d etc) from
2186 being displayed in symbol relative addresses. */
2189 aarch64_symbol_is_valid (asymbol
* sym
,
2190 struct disassemble_info
* info ATTRIBUTE_UNUSED
)
2197 name
= bfd_asymbol_name (sym
);
2201 || (name
[1] != 'x' && name
[1] != 'd')
2202 || (name
[2] != '\0' && name
[2] != '.'));
2205 /* Print data bytes on INFO->STREAM. */
2208 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED
,
2210 struct disassemble_info
*info
)
2212 switch (info
->bytes_per_chunk
)
2215 info
->fprintf_func (info
->stream
, ".byte\t0x%02x", word
);
2218 info
->fprintf_func (info
->stream
, ".short\t0x%04x", word
);
2221 info
->fprintf_func (info
->stream
, ".word\t0x%08x", word
);
2228 /* Try to infer the code or data type from a symbol.
2229 Returns nonzero if *MAP_TYPE was set. */
2232 get_sym_code_type (struct disassemble_info
*info
, int n
,
2233 enum map_type
*map_type
)
2235 elf_symbol_type
*es
;
2239 es
= *(elf_symbol_type
**)(info
->symtab
+ n
);
2240 type
= ELF_ST_TYPE (es
->internal_elf_sym
.st_info
);
2242 /* If the symbol has function type then use that. */
2243 if (type
== STT_FUNC
)
2245 *map_type
= MAP_INSN
;
2249 /* Check for mapping symbols. */
2250 name
= bfd_asymbol_name(info
->symtab
[n
]);
2252 && (name
[1] == 'x' || name
[1] == 'd')
2253 && (name
[2] == '\0' || name
[2] == '.'))
2255 *map_type
= (name
[1] == 'x' ? MAP_INSN
: MAP_DATA
);
2262 /* Entry-point of the AArch64 disassembler. */
2265 print_insn_aarch64 (bfd_vma pc
,
2266 struct disassemble_info
*info
)
2268 bfd_byte buffer
[INSNLEN
];
2270 void (*printer
) (bfd_vma
, uint32_t, struct disassemble_info
*);
2271 bfd_boolean found
= FALSE
;
2272 unsigned int size
= 4;
2275 if (info
->disassembler_options
)
2277 set_default_aarch64_dis_options (info
);
2279 parse_aarch64_dis_options (info
->disassembler_options
);
2281 /* To avoid repeated parsing of these options, we remove them here. */
2282 info
->disassembler_options
= NULL
;
2285 /* Aarch64 instructions are always little-endian */
2286 info
->endian_code
= BFD_ENDIAN_LITTLE
;
2288 /* First check the full symtab for a mapping symbol, even if there
2289 are no usable non-mapping symbols for this address. */
2290 if (info
->symtab_size
!= 0
2291 && bfd_asymbol_flavour (*info
->symtab
) == bfd_target_elf_flavour
)
2293 enum map_type type
= MAP_INSN
;
2298 if (pc
<= last_mapping_addr
)
2299 last_mapping_sym
= -1;
2301 /* Start scanning at the start of the function, or wherever
2302 we finished last time. */
2303 n
= info
->symtab_pos
+ 1;
2304 if (n
< last_mapping_sym
)
2305 n
= last_mapping_sym
;
2307 /* Scan up to the location being disassembled. */
2308 for (; n
< info
->symtab_size
; n
++)
2310 addr
= bfd_asymbol_value (info
->symtab
[n
]);
2313 if ((info
->section
== NULL
2314 || info
->section
== info
->symtab
[n
]->section
)
2315 && get_sym_code_type (info
, n
, &type
))
2324 n
= info
->symtab_pos
;
2325 if (n
< last_mapping_sym
)
2326 n
= last_mapping_sym
;
2328 /* No mapping symbol found at this address. Look backwards
2329 for a preceeding one. */
2332 if (get_sym_code_type (info
, n
, &type
))
2341 last_mapping_sym
= last_sym
;
2344 /* Look a little bit ahead to see if we should print out
2345 less than four bytes of data. If there's a symbol,
2346 mapping or otherwise, after two bytes then don't
2348 if (last_type
== MAP_DATA
)
2350 size
= 4 - (pc
& 3);
2351 for (n
= last_sym
+ 1; n
< info
->symtab_size
; n
++)
2353 addr
= bfd_asymbol_value (info
->symtab
[n
]);
2356 if (addr
- pc
< size
)
2361 /* If the next symbol is after three bytes, we need to
2362 print only part of the data, so that we can use either
2365 size
= (pc
& 1) ? 1 : 2;
2369 if (last_type
== MAP_DATA
)
2371 /* size was set above. */
2372 info
->bytes_per_chunk
= size
;
2373 info
->display_endian
= info
->endian
;
2374 printer
= print_insn_data
;
2378 info
->bytes_per_chunk
= size
= INSNLEN
;
2379 info
->display_endian
= info
->endian_code
;
2380 printer
= print_insn_aarch64_word
;
2383 status
= (*info
->read_memory_func
) (pc
, buffer
, size
, info
);
2386 (*info
->memory_error_func
) (status
, pc
, info
);
2390 data
= bfd_get_bits (buffer
, size
* 8,
2391 info
->display_endian
== BFD_ENDIAN_BIG
);
2393 (*printer
) (pc
, data
, info
);
2399 print_aarch64_disassembler_options (FILE *stream
)
2401 fprintf (stream
, _("\n\
2402 The following AARCH64 specific disassembler options are supported for use\n\
2403 with the -M switch (multiple options should be separated by commas):\n"));
2405 fprintf (stream
, _("\n\
2406 no-aliases Don't print instruction aliases.\n"));
2408 fprintf (stream
, _("\n\
2409 aliases Do print instruction aliases.\n"));
2411 #ifdef DEBUG_AARCH64
2412 fprintf (stream
, _("\n\
2413 debug_dump Temp switch for debug trace.\n"));
2414 #endif /* DEBUG_AARCH64 */
2416 fprintf (stream
, _("\n"));