1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright 2009-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "bfd_stdint.h"
24 #include "libiberty.h"
26 #include "aarch64-dis.h"
36 /* Cached mapping symbol state. */
43 static enum map_type last_type
;
44 static int last_mapping_sym
= -1;
45 static bfd_vma last_mapping_addr
= 0;
48 static int no_aliases
= 0; /* If set disassemble as most general inst. */
52 set_default_aarch64_dis_options (struct disassemble_info
*info ATTRIBUTE_UNUSED
)
57 parse_aarch64_dis_option (const char *option
, unsigned int len ATTRIBUTE_UNUSED
)
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option
, "no-aliases"))
66 if (CONST_STRNEQ (option
, "aliases"))
73 if (CONST_STRNEQ (option
, "debug_dump"))
78 #endif /* DEBUG_AARCH64 */
81 fprintf (stderr
, _("Unrecognised disassembler option: %s\n"), option
);
85 parse_aarch64_dis_options (const char *options
)
87 const char *option_end
;
92 while (*options
!= '\0')
94 /* Skip empty options. */
101 /* We know that *options is neither NUL or a comma. */
102 option_end
= options
+ 1;
103 while (*option_end
!= ',' && *option_end
!= '\0')
106 parse_aarch64_dis_option (options
, option_end
- options
);
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options
= option_end
;
114 /* Functions doing the instruction disassembling. */
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code
, aarch64_insn mask
, ...)
130 const aarch64_field
*field
;
131 enum aarch64_field_kind kind
;
135 num
= va_arg (va
, uint32_t);
137 aarch64_insn value
= 0x0;
140 kind
= va_arg (va
, enum aarch64_field_kind
);
141 field
= &fields
[kind
];
142 value
<<= field
->width
;
143 value
|= extract_field (kind
, code
, mask
);
148 /* Sign-extend bit I of VALUE. */
149 static inline int32_t
150 sign_extend (aarch64_insn value
, unsigned i
)
152 uint32_t ret
= value
;
155 if ((value
>> i
) & 0x1)
157 uint32_t val
= (uint32_t)(-1) << i
;
160 return (int32_t) ret
;
163 /* N.B. the following inline helpfer functions create a dependency on the
164 order of operand qualifier enumerators. */
166 /* Given VALUE, return qualifier for a general purpose register. */
167 static inline enum aarch64_opnd_qualifier
168 get_greg_qualifier_from_value (aarch64_insn value
)
170 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_W
+ value
;
172 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
176 /* Given VALUE, return qualifier for a vector register. */
177 static inline enum aarch64_opnd_qualifier
178 get_vreg_qualifier_from_value (aarch64_insn value
)
180 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_V_8B
+ value
;
183 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
187 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
188 static inline enum aarch64_opnd_qualifier
189 get_sreg_qualifier_from_value (aarch64_insn value
)
191 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_S_B
+ value
;
194 && aarch64_get_qualifier_standard_value (qualifier
) == value
);
198 /* Given the instruction in *INST which is probably half way through the
199 decoding and our caller wants to know the expected qualifier for operand
200 I. Return such a qualifier if we can establish it; otherwise return
201 AARCH64_OPND_QLF_NIL. */
203 static aarch64_opnd_qualifier_t
204 get_expected_qualifier (const aarch64_inst
*inst
, int i
)
206 aarch64_opnd_qualifier_seq_t qualifiers
;
207 /* Should not be called if the qualifier is known. */
208 assert (inst
->operands
[i
].qualifier
== AARCH64_OPND_QLF_NIL
);
209 if (aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
,
211 return qualifiers
[i
];
213 return AARCH64_OPND_QLF_NIL
;
216 /* Operand extractors. */
219 aarch64_ext_regno (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
220 const aarch64_insn code
,
221 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
223 info
->reg
.regno
= extract_field (self
->fields
[0], code
, 0);
227 /* e.g. IC <ic_op>{, <Xt>}. */
229 aarch64_ext_regrt_sysins (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
230 const aarch64_insn code
,
231 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
233 info
->reg
.regno
= extract_field (self
->fields
[0], code
, 0);
234 assert (info
->idx
== 1
235 && (aarch64_get_operand_class (inst
->operands
[0].type
)
236 == AARCH64_OPND_CLASS_SYSTEM
));
237 /* This will make the constraint checking happy and more importantly will
238 help the disassembler determine whether this operand is optional or
240 info
->present
= inst
->operands
[0].sysins_op
->has_xt
;
245 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
247 aarch64_ext_reglane (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
248 const aarch64_insn code
,
249 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
252 info
->reglane
.regno
= extract_field (self
->fields
[0], code
,
255 /* Index and/or type. */
256 if (inst
->opcode
->iclass
== asisdone
257 || inst
->opcode
->iclass
== asimdins
)
259 if (info
->type
== AARCH64_OPND_En
260 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
263 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
264 assert (info
->idx
== 1); /* Vn */
265 aarch64_insn value
= extract_field (FLD_imm4
, code
, 0);
266 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
267 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
268 shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
269 info
->reglane
.index
= value
>> shift
;
273 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
281 aarch64_insn value
= extract_field (FLD_imm5
, code
, 0);
282 while (++pos
<= 3 && (value
& 0x1) == 0)
286 info
->qualifier
= get_sreg_qualifier_from_value (pos
);
287 info
->reglane
.index
= (unsigned) (value
>> 1);
292 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
293 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
295 /* Need information in other operand(s) to help decoding. */
296 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
297 switch (info
->qualifier
)
299 case AARCH64_OPND_QLF_S_H
:
301 info
->reglane
.index
= extract_fields (code
, 0, 3, FLD_H
, FLD_L
,
303 info
->reglane
.regno
&= 0xf;
305 case AARCH64_OPND_QLF_S_S
:
307 info
->reglane
.index
= extract_fields (code
, 0, 2, FLD_H
, FLD_L
);
309 case AARCH64_OPND_QLF_S_D
:
311 info
->reglane
.index
= extract_field (FLD_H
, code
, 0);
322 aarch64_ext_reglist (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
323 const aarch64_insn code
,
324 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
327 info
->reglist
.first_regno
= extract_field (self
->fields
[0], code
, 0);
329 info
->reglist
.num_regs
= extract_field (FLD_len
, code
, 0) + 1;
333 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
335 aarch64_ext_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
336 aarch64_opnd_info
*info
, const aarch64_insn code
,
337 const aarch64_inst
*inst
)
340 /* Number of elements in each structure to be loaded/stored. */
341 unsigned expected_num
= get_opcode_dependent_value (inst
->opcode
);
345 unsigned is_reserved
;
347 unsigned num_elements
;
363 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
365 value
= extract_field (FLD_opcode
, code
, 0);
366 if (expected_num
!= data
[value
].num_elements
|| data
[value
].is_reserved
)
368 info
->reglist
.num_regs
= data
[value
].num_regs
;
373 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
374 lanes instructions. */
376 aarch64_ext_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
377 aarch64_opnd_info
*info
, const aarch64_insn code
,
378 const aarch64_inst
*inst
)
383 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
385 value
= extract_field (FLD_S
, code
, 0);
387 /* Number of registers is equal to the number of elements in
388 each structure to be loaded/stored. */
389 info
->reglist
.num_regs
= get_opcode_dependent_value (inst
->opcode
);
390 assert (info
->reglist
.num_regs
>= 1 && info
->reglist
.num_regs
<= 4);
392 /* Except when it is LD1R. */
393 if (info
->reglist
.num_regs
== 1 && value
== (aarch64_insn
) 1)
394 info
->reglist
.num_regs
= 2;
399 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
400 load/store single element instructions. */
402 aarch64_ext_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
403 aarch64_opnd_info
*info
, const aarch64_insn code
,
404 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
406 aarch64_field field
= {0, 0};
407 aarch64_insn QSsize
; /* fields Q:S:size. */
408 aarch64_insn opcodeh2
; /* opcode<2:1> */
411 info
->reglist
.first_regno
= extract_field (FLD_Rt
, code
, 0);
413 /* Decode the index, opcode<2:1> and size. */
414 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
415 opcodeh2
= extract_field_2 (&field
, code
, 0);
416 QSsize
= extract_fields (code
, 0, 3, FLD_Q
, FLD_S
, FLD_vldst_size
);
420 info
->qualifier
= AARCH64_OPND_QLF_S_B
;
421 /* Index encoded in "Q:S:size". */
422 info
->reglist
.index
= QSsize
;
425 info
->qualifier
= AARCH64_OPND_QLF_S_H
;
426 /* Index encoded in "Q:S:size<1>". */
427 info
->reglist
.index
= QSsize
>> 1;
430 if ((QSsize
& 0x1) == 0)
432 info
->qualifier
= AARCH64_OPND_QLF_S_S
;
433 /* Index encoded in "Q:S". */
434 info
->reglist
.index
= QSsize
>> 2;
438 info
->qualifier
= AARCH64_OPND_QLF_S_D
;
439 /* Index encoded in "Q". */
440 info
->reglist
.index
= QSsize
>> 3;
441 if (extract_field (FLD_S
, code
, 0))
450 info
->reglist
.has_index
= 1;
451 info
->reglist
.num_regs
= 0;
452 /* Number of registers is equal to the number of elements in
453 each structure to be loaded/stored. */
454 info
->reglist
.num_regs
= get_opcode_dependent_value (inst
->opcode
);
455 assert (info
->reglist
.num_regs
>= 1 && info
->reglist
.num_regs
<= 4);
460 /* Decode fields immh:immb and/or Q for e.g.
461 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
462 or SSHR <V><d>, <V><n>, #<shift>. */
465 aarch64_ext_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
466 aarch64_opnd_info
*info
, const aarch64_insn code
,
467 const aarch64_inst
*inst
)
470 aarch64_insn Q
, imm
, immh
;
471 enum aarch64_insn_class iclass
= inst
->opcode
->iclass
;
473 immh
= extract_field (FLD_immh
, code
, 0);
476 imm
= extract_fields (code
, 0, 2, FLD_immh
, FLD_immb
);
478 /* Get highest set bit in immh. */
479 while (--pos
>= 0 && (immh
& 0x8) == 0)
482 assert ((iclass
== asimdshf
|| iclass
== asisdshf
)
483 && (info
->type
== AARCH64_OPND_IMM_VLSR
484 || info
->type
== AARCH64_OPND_IMM_VLSL
));
486 if (iclass
== asimdshf
)
488 Q
= extract_field (FLD_Q
, code
, 0);
490 0000 x SEE AdvSIMD modified immediate
500 get_vreg_qualifier_from_value ((pos
<< 1) | (int) Q
);
503 info
->qualifier
= get_sreg_qualifier_from_value (pos
);
505 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
507 0000 SEE AdvSIMD modified immediate
508 0001 (16-UInt(immh:immb))
509 001x (32-UInt(immh:immb))
510 01xx (64-UInt(immh:immb))
511 1xxx (128-UInt(immh:immb)) */
512 info
->imm
.value
= (16 << pos
) - imm
;
516 0000 SEE AdvSIMD modified immediate
517 0001 (UInt(immh:immb)-8)
518 001x (UInt(immh:immb)-16)
519 01xx (UInt(immh:immb)-32)
520 1xxx (UInt(immh:immb)-64) */
521 info
->imm
.value
= imm
- (8 << pos
);
526 /* Decode shift immediate for e.g. sshr (imm). */
528 aarch64_ext_shll_imm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
529 aarch64_opnd_info
*info
, const aarch64_insn code
,
530 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
534 val
= extract_field (FLD_size
, code
, 0);
537 case 0: imm
= 8; break;
538 case 1: imm
= 16; break;
539 case 2: imm
= 32; break;
542 info
->imm
.value
= imm
;
546 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
547 value in the field(s) will be extracted as unsigned immediate value. */
549 aarch64_ext_imm (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
550 const aarch64_insn code
,
551 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
554 /* Maximum of two fields to extract. */
555 assert (self
->fields
[2] == FLD_NIL
);
557 if (self
->fields
[1] == FLD_NIL
)
558 imm
= extract_field (self
->fields
[0], code
, 0);
560 /* e.g. TBZ b5:b40. */
561 imm
= extract_fields (code
, 0, 2, self
->fields
[0], self
->fields
[1]);
563 if (info
->type
== AARCH64_OPND_FPIMM
)
566 if (operand_need_sign_extension (self
))
567 imm
= sign_extend (imm
, get_operand_fields_width (self
) - 1);
569 if (operand_need_shift_by_two (self
))
572 if (info
->type
== AARCH64_OPND_ADDR_ADRP
)
575 info
->imm
.value
= imm
;
579 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
581 aarch64_ext_imm_half (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
582 const aarch64_insn code
,
583 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
585 aarch64_ext_imm (self
, info
, code
, inst
);
586 info
->shifter
.kind
= AARCH64_MOD_LSL
;
587 info
->shifter
.amount
= extract_field (FLD_hw
, code
, 0) << 4;
591 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
592 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
594 aarch64_ext_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
595 aarch64_opnd_info
*info
,
596 const aarch64_insn code
,
597 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
600 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
601 aarch64_field field
= {0, 0};
603 assert (info
->idx
== 1);
605 if (info
->type
== AARCH64_OPND_SIMD_FPIMM
)
608 /* a:b:c:d:e:f:g:h */
609 imm
= extract_fields (code
, 0, 2, FLD_abc
, FLD_defgh
);
610 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
612 /* Either MOVI <Dd>, #<imm>
613 or MOVI <Vd>.2D, #<imm>.
614 <imm> is a 64-bit immediate
615 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
616 encoded in "a:b:c:d:e:f:g:h". */
618 unsigned abcdefgh
= imm
;
619 for (imm
= 0ull, i
= 0; i
< 8; i
++)
620 if (((abcdefgh
>> i
) & 0x1) != 0)
621 imm
|= 0xffull
<< (8 * i
);
623 info
->imm
.value
= imm
;
626 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
627 switch (info
->qualifier
)
629 case AARCH64_OPND_QLF_NIL
:
631 info
->shifter
.kind
= AARCH64_MOD_NONE
;
633 case AARCH64_OPND_QLF_LSL
:
635 info
->shifter
.kind
= AARCH64_MOD_LSL
;
636 switch (aarch64_get_qualifier_esize (opnd0_qualifier
))
638 case 4: gen_sub_field (FLD_cmode
, 1, 2, &field
); break; /* per word */
639 case 2: gen_sub_field (FLD_cmode
, 1, 1, &field
); break; /* per half */
640 case 1: gen_sub_field (FLD_cmode
, 1, 0, &field
); break; /* per byte */
641 default: assert (0); return 0;
643 /* 00: 0; 01: 8; 10:16; 11:24. */
644 info
->shifter
.amount
= extract_field_2 (&field
, code
, 0) << 3;
646 case AARCH64_OPND_QLF_MSL
:
648 info
->shifter
.kind
= AARCH64_MOD_MSL
;
649 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
650 info
->shifter
.amount
= extract_field_2 (&field
, code
, 0) ? 16 : 8;
660 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
662 aarch64_ext_fbits (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
663 aarch64_opnd_info
*info
, const aarch64_insn code
,
664 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
666 info
->imm
.value
= 64- extract_field (FLD_scale
, code
, 0);
670 /* Decode arithmetic immediate for e.g.
671 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
673 aarch64_ext_aimm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
674 aarch64_opnd_info
*info
, const aarch64_insn code
,
675 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
679 info
->shifter
.kind
= AARCH64_MOD_LSL
;
681 value
= extract_field (FLD_shift
, code
, 0);
684 info
->shifter
.amount
= value
? 12 : 0;
685 /* imm12 (unsigned) */
686 info
->imm
.value
= extract_field (FLD_imm12
, code
, 0);
691 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
694 aarch64_ext_limm (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
695 aarch64_opnd_info
*info
, const aarch64_insn code
,
696 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
704 value
= extract_fields (code
, 0, 3, FLD_N
, FLD_immr
, FLD_imms
);
705 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
706 || inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_X
);
707 sf
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
) != 4;
709 /* value is N:immr:imms. */
711 R
= (value
>> 6) & 0x3f;
712 N
= (value
>> 12) & 0x1;
714 if (sf
== 0 && N
== 1)
717 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
718 (in other words, right rotated by R), then replicated. */
722 mask
= 0xffffffffffffffffull
;
728 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size
= 32; break;
729 case 0x20 ... 0x2f: /* 10xxxx */ simd_size
= 16; S
&= 0xf; break;
730 case 0x30 ... 0x37: /* 110xxx */ simd_size
= 8; S
&= 0x7; break;
731 case 0x38 ... 0x3b: /* 1110xx */ simd_size
= 4; S
&= 0x3; break;
732 case 0x3c ... 0x3d: /* 11110x */ simd_size
= 2; S
&= 0x1; break;
735 mask
= (1ull << simd_size
) - 1;
736 /* Top bits are IGNORED. */
739 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
740 if (S
== simd_size
- 1)
742 /* S+1 consecutive bits to 1. */
743 /* NOTE: S can't be 63 due to detection above. */
744 imm
= (1ull << (S
+ 1)) - 1;
745 /* Rotate to the left by simd_size - R. */
747 imm
= ((imm
<< (simd_size
- R
)) & mask
) | (imm
>> R
);
748 /* Replicate the value according to SIMD size. */
751 case 2: imm
= (imm
<< 2) | imm
;
752 case 4: imm
= (imm
<< 4) | imm
;
753 case 8: imm
= (imm
<< 8) | imm
;
754 case 16: imm
= (imm
<< 16) | imm
;
755 case 32: imm
= (imm
<< 32) | imm
;
757 default: assert (0); return 0;
760 info
->imm
.value
= sf
? imm
: imm
& 0xffffffff;
765 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
766 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
768 aarch64_ext_ft (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
769 aarch64_opnd_info
*info
,
770 const aarch64_insn code
, const aarch64_inst
*inst
)
775 info
->reg
.regno
= extract_field (FLD_Rt
, code
, 0);
778 value
= extract_field (FLD_ldst_size
, code
, 0);
779 if (inst
->opcode
->iclass
== ldstpair_indexed
780 || inst
->opcode
->iclass
== ldstnapair_offs
781 || inst
->opcode
->iclass
== ldstpair_off
782 || inst
->opcode
->iclass
== loadlit
)
784 enum aarch64_opnd_qualifier qualifier
;
787 case 0: qualifier
= AARCH64_OPND_QLF_S_S
; break;
788 case 1: qualifier
= AARCH64_OPND_QLF_S_D
; break;
789 case 2: qualifier
= AARCH64_OPND_QLF_S_Q
; break;
792 info
->qualifier
= qualifier
;
797 value
= extract_fields (code
, 0, 2, FLD_opc1
, FLD_ldst_size
);
800 info
->qualifier
= get_sreg_qualifier_from_value (value
);
806 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
808 aarch64_ext_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
809 aarch64_opnd_info
*info
,
811 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
814 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
818 /* Decode the address operand for e.g.
819 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
821 aarch64_ext_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
822 aarch64_opnd_info
*info
,
823 aarch64_insn code
, const aarch64_inst
*inst
)
825 aarch64_insn S
, value
;
828 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
830 info
->addr
.offset
.regno
= extract_field (FLD_Rm
, code
, 0);
832 value
= extract_field (FLD_option
, code
, 0);
834 aarch64_get_operand_modifier_from_value (value
, TRUE
/* extend_p */);
835 /* Fix-up the shifter kind; although the table-driven approach is
836 efficient, it is slightly inflexible, thus needing this fix-up. */
837 if (info
->shifter
.kind
== AARCH64_MOD_UXTX
)
838 info
->shifter
.kind
= AARCH64_MOD_LSL
;
840 S
= extract_field (FLD_S
, code
, 0);
843 info
->shifter
.amount
= 0;
844 info
->shifter
.amount_present
= 0;
849 /* Need information in other operand(s) to help achieve the decoding
851 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
852 /* Get the size of the data element that is accessed, which may be
853 different from that of the source register size, e.g. in strb/ldrb. */
854 size
= aarch64_get_qualifier_esize (info
->qualifier
);
855 info
->shifter
.amount
= get_logsz (size
);
856 info
->shifter
.amount_present
= 1;
862 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
864 aarch64_ext_addr_simm (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
865 aarch64_insn code
, const aarch64_inst
*inst
)
868 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
871 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
872 /* simm (imm9 or imm7) */
873 imm
= extract_field (self
->fields
[0], code
, 0);
874 info
->addr
.offset
.imm
= sign_extend (imm
, fields
[self
->fields
[0]].width
- 1);
875 if (self
->fields
[0] == FLD_imm7
)
876 /* scaled immediate in ld/st pair instructions. */
877 info
->addr
.offset
.imm
*= aarch64_get_qualifier_esize (info
->qualifier
);
879 if (inst
->opcode
->iclass
== ldst_unscaled
880 || inst
->opcode
->iclass
== ldstnapair_offs
881 || inst
->opcode
->iclass
== ldstpair_off
882 || inst
->opcode
->iclass
== ldst_unpriv
)
883 info
->addr
.writeback
= 0;
886 /* pre/post- index */
887 info
->addr
.writeback
= 1;
888 if (extract_field (self
->fields
[1], code
, 0) == 1)
889 info
->addr
.preind
= 1;
891 info
->addr
.postind
= 1;
897 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
899 aarch64_ext_addr_uimm12 (const aarch64_operand
*self
, aarch64_opnd_info
*info
,
901 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
904 info
->qualifier
= get_expected_qualifier (inst
, info
->idx
);
905 shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
907 info
->addr
.base_regno
= extract_field (self
->fields
[0], code
, 0);
909 info
->addr
.offset
.imm
= extract_field (self
->fields
[1], code
, 0) << shift
;
913 /* Decode the address operand for e.g.
914 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
916 aarch64_ext_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
917 aarch64_opnd_info
*info
,
918 aarch64_insn code
, const aarch64_inst
*inst
)
920 /* The opcode dependent area stores the number of elements in
921 each structure to be loaded/stored. */
922 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
925 info
->addr
.base_regno
= extract_field (FLD_Rn
, code
, 0);
927 info
->addr
.offset
.regno
= extract_field (FLD_Rm
, code
, 0);
928 if (info
->addr
.offset
.regno
== 31)
930 if (inst
->opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
931 /* Special handling of loading single structure to all lane. */
932 info
->addr
.offset
.imm
= (is_ld1r
? 1
933 : inst
->operands
[0].reglist
.num_regs
)
934 * aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
936 info
->addr
.offset
.imm
= inst
->operands
[0].reglist
.num_regs
937 * aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
)
938 * aarch64_get_qualifier_nelem (inst
->operands
[0].qualifier
);
941 info
->addr
.offset
.is_reg
= 1;
942 info
->addr
.writeback
= 1;
947 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
949 aarch64_ext_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
950 aarch64_opnd_info
*info
,
951 aarch64_insn code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
955 value
= extract_field (FLD_cond
, code
, 0);
956 info
->cond
= get_cond_from_value (value
);
960 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
962 aarch64_ext_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
963 aarch64_opnd_info
*info
,
965 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
967 /* op0:op1:CRn:CRm:op2 */
968 info
->sysreg
= extract_fields (code
, 0, 5, FLD_op0
, FLD_op1
, FLD_CRn
,
973 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
975 aarch64_ext_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
976 aarch64_opnd_info
*info
, aarch64_insn code
,
977 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
981 info
->pstatefield
= extract_fields (code
, 0, 2, FLD_op1
, FLD_op2
);
982 for (i
= 0; aarch64_pstatefields
[i
].name
!= NULL
; ++i
)
983 if (aarch64_pstatefields
[i
].value
== (aarch64_insn
)info
->pstatefield
)
985 /* Reserved value in <pstatefield>. */
989 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
991 aarch64_ext_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
992 aarch64_opnd_info
*info
,
994 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
998 const aarch64_sys_ins_reg
*sysins_ops
;
999 /* op0:op1:CRn:CRm:op2 */
1000 value
= extract_fields (code
, 0, 5,
1001 FLD_op0
, FLD_op1
, FLD_CRn
,
1006 case AARCH64_OPND_SYSREG_AT
: sysins_ops
= aarch64_sys_regs_at
; break;
1007 case AARCH64_OPND_SYSREG_DC
: sysins_ops
= aarch64_sys_regs_dc
; break;
1008 case AARCH64_OPND_SYSREG_IC
: sysins_ops
= aarch64_sys_regs_ic
; break;
1009 case AARCH64_OPND_SYSREG_TLBI
: sysins_ops
= aarch64_sys_regs_tlbi
; break;
1010 default: assert (0); return 0;
1013 for (i
= 0; sysins_ops
[i
].template != NULL
; ++i
)
1014 if (sysins_ops
[i
].value
== value
)
1016 info
->sysins_op
= sysins_ops
+ i
;
1017 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1018 info
->sysins_op
->template,
1019 (unsigned)info
->sysins_op
->value
,
1020 info
->sysins_op
->has_xt
, i
);
1027 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1030 aarch64_ext_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1031 aarch64_opnd_info
*info
,
1033 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1036 info
->barrier
= aarch64_barrier_options
+ extract_field (FLD_CRm
, code
, 0);
1040 /* Decode the prefetch operation option operand for e.g.
1041 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1044 aarch64_ext_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1045 aarch64_opnd_info
*info
,
1046 aarch64_insn code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1049 info
->prfop
= aarch64_prfops
+ extract_field (FLD_Rt
, code
, 0);
1053 /* Decode the extended register operand for e.g.
1054 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1056 aarch64_ext_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1057 aarch64_opnd_info
*info
,
1059 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1064 info
->reg
.regno
= extract_field (FLD_Rm
, code
, 0);
1066 value
= extract_field (FLD_option
, code
, 0);
1067 info
->shifter
.kind
=
1068 aarch64_get_operand_modifier_from_value (value
, TRUE
/* extend_p */);
1070 info
->shifter
.amount
= extract_field (FLD_imm3
, code
, 0);
1072 /* This makes the constraint checking happy. */
1073 info
->shifter
.operator_present
= 1;
1075 /* Assume inst->operands[0].qualifier has been resolved. */
1076 assert (inst
->operands
[0].qualifier
!= AARCH64_OPND_QLF_NIL
);
1077 info
->qualifier
= AARCH64_OPND_QLF_W
;
1078 if (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_X
1079 && (info
->shifter
.kind
== AARCH64_MOD_UXTX
1080 || info
->shifter
.kind
== AARCH64_MOD_SXTX
))
1081 info
->qualifier
= AARCH64_OPND_QLF_X
;
1086 /* Decode the shifted register operand for e.g.
1087 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1089 aarch64_ext_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1090 aarch64_opnd_info
*info
,
1092 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1097 info
->reg
.regno
= extract_field (FLD_Rm
, code
, 0);
1099 value
= extract_field (FLD_shift
, code
, 0);
1100 info
->shifter
.kind
=
1101 aarch64_get_operand_modifier_from_value (value
, FALSE
/* extend_p */);
1102 if (info
->shifter
.kind
== AARCH64_MOD_ROR
1103 && inst
->opcode
->iclass
!= log_shift
)
1104 /* ROR is not available for the shifted register operand in arithmetic
1108 info
->shifter
.amount
= extract_field (FLD_imm6
, code
, 0);
1110 /* This makes the constraint checking happy. */
1111 info
->shifter
.operator_present
= 1;
1116 /* Bitfields that are commonly used to encode certain operands' information
1117 may be partially used as part of the base opcode in some instructions.
1118 For example, the bit 1 of the field 'size' in
1119 FCVTXN <Vb><d>, <Va><n>
1120 is actually part of the base opcode, while only size<0> is available
1121 for encoding the register type. Another example is the AdvSIMD
1122 instruction ORR (register), in which the field 'size' is also used for
1123 the base opcode, leaving only the field 'Q' available to encode the
1124 vector register arrangement specifier '8B' or '16B'.
1126 This function tries to deduce the qualifier from the value of partially
1127 constrained field(s). Given the VALUE of such a field or fields, the
1128 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1129 operand encoding), the function returns the matching qualifier or
1130 AARCH64_OPND_QLF_NIL if nothing matches.
1132 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1133 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1134 may end with AARCH64_OPND_QLF_NIL. */
1136 static enum aarch64_opnd_qualifier
1137 get_qualifier_from_partial_encoding (aarch64_insn value
,
1138 const enum aarch64_opnd_qualifier
* \
1143 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value
, (int)mask
);
1144 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1146 aarch64_insn standard_value
;
1147 if (candidates
[i
] == AARCH64_OPND_QLF_NIL
)
1149 standard_value
= aarch64_get_qualifier_standard_value (candidates
[i
]);
1150 if ((standard_value
& mask
) == (value
& mask
))
1151 return candidates
[i
];
1153 return AARCH64_OPND_QLF_NIL
;
1156 /* Given a list of qualifier sequences, return all possible valid qualifiers
1157 for operand IDX in QUALIFIERS.
1158 Assume QUALIFIERS is an array whose length is large enough. */
1161 get_operand_possible_qualifiers (int idx
,
1162 const aarch64_opnd_qualifier_seq_t
*list
,
1163 enum aarch64_opnd_qualifier
*qualifiers
)
1166 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1167 if ((qualifiers
[i
] = list
[i
][idx
]) == AARCH64_OPND_QLF_NIL
)
1171 /* Decode the size Q field for e.g. SHADD.
1172 We tag one operand with the qualifer according to the code;
1173 whether the qualifier is valid for this opcode or not, it is the
1174 duty of the semantic checking. */
1177 decode_sizeq (aarch64_inst
*inst
)
1180 enum aarch64_opnd_qualifier qualifier
;
1182 aarch64_insn value
, mask
;
1183 enum aarch64_field_kind fld_sz
;
1184 enum aarch64_opnd_qualifier candidates
[AARCH64_MAX_QLF_SEQ_NUM
];
1186 if (inst
->opcode
->iclass
== asisdlse
1187 || inst
->opcode
->iclass
== asisdlsep
1188 || inst
->opcode
->iclass
== asisdlso
1189 || inst
->opcode
->iclass
== asisdlsop
)
1190 fld_sz
= FLD_vldst_size
;
1195 value
= extract_fields (code
, inst
->opcode
->mask
, 2, fld_sz
, FLD_Q
);
1196 /* Obtain the info that which bits of fields Q and size are actually
1197 available for operand encoding. Opcodes like FMAXNM and FMLA have
1198 size[1] unavailable. */
1199 mask
= extract_fields (~inst
->opcode
->mask
, 0, 2, fld_sz
, FLD_Q
);
1201 /* The index of the operand we are going to tag a qualifier and the qualifer
1202 itself are reasoned from the value of the size and Q fields and the
1203 possible valid qualifier lists. */
1204 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1205 DEBUG_TRACE ("key idx: %d", idx
);
1207 /* For most related instruciton, size:Q are fully available for operand
1211 inst
->operands
[idx
].qualifier
= get_vreg_qualifier_from_value (value
);
1215 get_operand_possible_qualifiers (idx
, inst
->opcode
->qualifiers_list
,
1217 #ifdef DEBUG_AARCH64
1221 for (i
= 0; candidates
[i
] != AARCH64_OPND_QLF_NIL
1222 && i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
1223 DEBUG_TRACE ("qualifier %d: %s", i
,
1224 aarch64_get_qualifier_name(candidates
[i
]));
1225 DEBUG_TRACE ("%d, %d", (int)value
, (int)mask
);
1227 #endif /* DEBUG_AARCH64 */
1229 qualifier
= get_qualifier_from_partial_encoding (value
, candidates
, mask
);
1231 if (qualifier
== AARCH64_OPND_QLF_NIL
)
1234 inst
->operands
[idx
].qualifier
= qualifier
;
1238 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1239 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1242 decode_asimd_fcvt (aarch64_inst
*inst
)
1244 aarch64_field field
= {0, 0};
1246 enum aarch64_opnd_qualifier qualifier
;
1248 gen_sub_field (FLD_size
, 0, 1, &field
);
1249 value
= extract_field_2 (&field
, inst
->value
, 0);
1250 qualifier
= value
== 0 ? AARCH64_OPND_QLF_V_4S
1251 : AARCH64_OPND_QLF_V_2D
;
1252 switch (inst
->opcode
->op
)
1256 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1257 inst
->operands
[1].qualifier
= qualifier
;
1261 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1262 inst
->operands
[0].qualifier
= qualifier
;
1272 /* Decode size[0], i.e. bit 22, for
1273 e.g. FCVTXN <Vb><d>, <Va><n>. */
1276 decode_asisd_fcvtxn (aarch64_inst
*inst
)
1278 aarch64_field field
= {0, 0};
1279 gen_sub_field (FLD_size
, 0, 1, &field
);
1280 if (!extract_field_2 (&field
, inst
->value
, 0))
1282 inst
->operands
[0].qualifier
= AARCH64_OPND_QLF_S_S
;
1286 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1288 decode_fcvt (aarch64_inst
*inst
)
1290 enum aarch64_opnd_qualifier qualifier
;
1292 const aarch64_field field
= {15, 2};
1295 value
= extract_field_2 (&field
, inst
->value
, 0);
1298 case 0: qualifier
= AARCH64_OPND_QLF_S_S
; break;
1299 case 1: qualifier
= AARCH64_OPND_QLF_S_D
; break;
1300 case 3: qualifier
= AARCH64_OPND_QLF_S_H
; break;
1303 inst
->operands
[0].qualifier
= qualifier
;
1308 /* Do miscellaneous decodings that are not common enough to be driven by
1312 do_misc_decoding (aarch64_inst
*inst
)
1314 switch (inst
->opcode
->op
)
1317 return decode_fcvt (inst
);
1322 return decode_asimd_fcvt (inst
);
1324 return decode_asisd_fcvtxn (inst
);
1330 /* Opcodes that have fields shared by multiple operands are usually flagged
1331 with flags. In this function, we detect such flags, decode the related
1332 field(s) and store the information in one of the related operands. The
1333 'one' operand is not any operand but one of the operands that can
1334 accommadate all the information that has been decoded. */
1337 do_special_decoding (aarch64_inst
*inst
)
1341 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1342 if (inst
->opcode
->flags
& F_COND
)
1344 value
= extract_field (FLD_cond2
, inst
->value
, 0);
1345 inst
->cond
= get_cond_from_value (value
);
1348 if (inst
->opcode
->flags
& F_SF
)
1350 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1351 value
= extract_field (FLD_sf
, inst
->value
, 0);
1352 inst
->operands
[idx
].qualifier
= get_greg_qualifier_from_value (value
);
1353 if ((inst
->opcode
->flags
& F_N
)
1354 && extract_field (FLD_N
, inst
->value
, 0) != value
)
1357 /* size:Q fields. */
1358 if (inst
->opcode
->flags
& F_SIZEQ
)
1359 return decode_sizeq (inst
);
1361 if (inst
->opcode
->flags
& F_FPTYPE
)
1363 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1364 value
= extract_field (FLD_type
, inst
->value
, 0);
1367 case 0: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_S
; break;
1368 case 1: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_D
; break;
1369 case 3: inst
->operands
[idx
].qualifier
= AARCH64_OPND_QLF_S_H
; break;
1374 if (inst
->opcode
->flags
& F_SSIZE
)
1376 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1377 of the base opcode. */
1379 enum aarch64_opnd_qualifier candidates
[AARCH64_MAX_QLF_SEQ_NUM
];
1380 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1381 value
= extract_field (FLD_size
, inst
->value
, inst
->opcode
->mask
);
1382 mask
= extract_field (FLD_size
, ~inst
->opcode
->mask
, 0);
1383 /* For most related instruciton, the 'size' field is fully available for
1384 operand encoding. */
1386 inst
->operands
[idx
].qualifier
= get_sreg_qualifier_from_value (value
);
1389 get_operand_possible_qualifiers (idx
, inst
->opcode
->qualifiers_list
,
1391 inst
->operands
[idx
].qualifier
1392 = get_qualifier_from_partial_encoding (value
, candidates
, mask
);
1396 if (inst
->opcode
->flags
& F_T
)
1398 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1401 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1402 == AARCH64_OPND_CLASS_SIMD_REG
);
1413 val
= extract_field (FLD_imm5
, inst
->value
, 0);
1414 while ((val
& 0x1) == 0 && ++num
<= 3)
1418 Q
= (unsigned) extract_field (FLD_Q
, inst
->value
, inst
->opcode
->mask
);
1419 inst
->operands
[0].qualifier
=
1420 get_vreg_qualifier_from_value ((num
<< 1) | Q
);
1423 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1425 /* Use Rt to encode in the case of e.g.
1426 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1427 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1430 /* Otherwise use the result operand, which has to be a integer
1432 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1433 == AARCH64_OPND_CLASS_INT_REG
);
1436 assert (idx
== 0 || idx
== 1);
1437 value
= extract_field (FLD_Q
, inst
->value
, 0);
1438 inst
->operands
[idx
].qualifier
= get_greg_qualifier_from_value (value
);
1441 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1443 aarch64_field field
= {0, 0};
1444 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1445 == AARCH64_OPND_CLASS_INT_REG
);
1446 gen_sub_field (FLD_opc
, 0, 1, &field
);
1447 value
= extract_field_2 (&field
, inst
->value
, 0);
1448 inst
->operands
[0].qualifier
1449 = value
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
;
1452 /* Miscellaneous decoding; done as the last step. */
1453 if (inst
->opcode
->flags
& F_MISC
)
1454 return do_misc_decoding (inst
);
1459 /* Converters converting a real opcode instruction to its alias form. */
1461 /* ROR <Wd>, <Ws>, #<shift>
1463 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1465 convert_extr_to_ror (aarch64_inst
*inst
)
1467 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
)
1469 copy_operand_info (inst
, 2, 3);
1470 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1476 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1478 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1480 convert_shll_to_xtl (aarch64_inst
*inst
)
1482 if (inst
->operands
[2].imm
.value
== 0)
1484 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1491 UBFM <Xd>, <Xn>, #<shift>, #63.
1493 LSR <Xd>, <Xn>, #<shift>. */
1495 convert_bfm_to_sr (aarch64_inst
*inst
)
1499 imms
= inst
->operands
[3].imm
.value
;
1500 val
= inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1503 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1510 /* Convert MOV to ORR. */
1512 convert_orr_to_mov (aarch64_inst
*inst
)
1514 /* MOV <Vd>.<T>, <Vn>.<T>
1516 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1517 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
)
1519 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1525 /* When <imms> >= <immr>, the instruction written:
1526 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1528 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1531 convert_bfm_to_bfx (aarch64_inst
*inst
)
1535 immr
= inst
->operands
[2].imm
.value
;
1536 imms
= inst
->operands
[3].imm
.value
;
1540 inst
->operands
[2].imm
.value
= lsb
;
1541 inst
->operands
[3].imm
.value
= imms
+ 1 - lsb
;
1542 /* The two opcodes have different qualifiers for
1543 the immediate operands; reset to help the checking. */
1544 reset_operand_qualifier (inst
, 2);
1545 reset_operand_qualifier (inst
, 3);
1552 /* When <imms> < <immr>, the instruction written:
1553 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1555 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1558 convert_bfm_to_bfi (aarch64_inst
*inst
)
1560 int64_t immr
, imms
, val
;
1562 immr
= inst
->operands
[2].imm
.value
;
1563 imms
= inst
->operands
[3].imm
.value
;
1564 val
= inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 32 : 64;
1567 inst
->operands
[2].imm
.value
= (val
- immr
) & (val
- 1);
1568 inst
->operands
[3].imm
.value
= imms
+ 1;
1569 /* The two opcodes have different qualifiers for
1570 the immediate operands; reset to help the checking. */
1571 reset_operand_qualifier (inst
, 2);
1572 reset_operand_qualifier (inst
, 3);
1579 /* The instruction written:
1580 LSL <Xd>, <Xn>, #<shift>
1582 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1585 convert_ubfm_to_lsl (aarch64_inst
*inst
)
1587 int64_t immr
= inst
->operands
[2].imm
.value
;
1588 int64_t imms
= inst
->operands
[3].imm
.value
;
1590 = inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1592 if ((immr
== 0 && imms
== val
) || immr
== imms
+ 1)
1594 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1595 inst
->operands
[2].imm
.value
= val
- imms
;
1602 /* CINC <Wd>, <Wn>, <cond>
1604 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1605 where <cond> is not AL or NV. */
1608 convert_from_csel (aarch64_inst
*inst
)
1610 if (inst
->operands
[1].reg
.regno
== inst
->operands
[2].reg
.regno
1611 && (inst
->operands
[3].cond
->value
& 0xe) != 0xe)
1613 copy_operand_info (inst
, 2, 3);
1614 inst
->operands
[2].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1615 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1621 /* CSET <Wd>, <cond>
1623 CSINC <Wd>, WZR, WZR, invert(<cond>)
1624 where <cond> is not AL or NV. */
1627 convert_csinc_to_cset (aarch64_inst
*inst
)
1629 if (inst
->operands
[1].reg
.regno
== 0x1f
1630 && inst
->operands
[2].reg
.regno
== 0x1f
1631 && (inst
->operands
[3].cond
->value
& 0xe) != 0xe)
1633 copy_operand_info (inst
, 1, 3);
1634 inst
->operands
[1].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1635 inst
->operands
[3].type
= AARCH64_OPND_NIL
;
1636 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1644 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1646 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1647 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1648 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1649 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1650 machine-instruction mnemonic must be used. */
1653 convert_movewide_to_mov (aarch64_inst
*inst
)
1655 uint64_t value
= inst
->operands
[1].imm
.value
;
1656 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1657 if (value
== 0 && inst
->operands
[1].shifter
.amount
!= 0)
1659 inst
->operands
[1].type
= AARCH64_OPND_IMM_MOV
;
1660 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_NONE
;
1661 value
<<= inst
->operands
[1].shifter
.amount
;
1662 /* As an alias convertor, it has to be clear that the INST->OPCODE
1663 is the opcode of the real instruction. */
1664 if (inst
->opcode
->op
== OP_MOVN
)
1666 int is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1668 /* A MOVN has an immediate that could be encoded by MOVZ. */
1669 if (aarch64_wide_constant_p (value
, is32
, NULL
) == TRUE
)
1672 inst
->operands
[1].imm
.value
= value
;
1673 inst
->operands
[1].shifter
.amount
= 0;
1679 ORR <Wd>, WZR, #<imm>.
1681 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1682 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1683 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1684 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1685 machine-instruction mnemonic must be used. */
1688 convert_movebitmask_to_mov (aarch64_inst
*inst
)
1693 /* Should have been assured by the base opcode value. */
1694 assert (inst
->operands
[1].reg
.regno
== 0x1f);
1695 copy_operand_info (inst
, 1, 2);
1696 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1697 inst
->operands
[1].type
= AARCH64_OPND_IMM_MOV
;
1698 value
= inst
->operands
[1].imm
.value
;
1699 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1701 if (inst
->operands
[0].reg
.regno
!= 0x1f
1702 && (aarch64_wide_constant_p (value
, is32
, NULL
) == TRUE
1703 || aarch64_wide_constant_p (~value
, is32
, NULL
) == TRUE
))
1706 inst
->operands
[2].type
= AARCH64_OPND_NIL
;
1710 /* Some alias opcodes are disassembled by being converted from their real-form.
1711 N.B. INST->OPCODE is the real opcode rather than the alias. */
1714 convert_to_alias (aarch64_inst
*inst
, const aarch64_opcode
*alias
)
1720 return convert_bfm_to_sr (inst
);
1722 return convert_ubfm_to_lsl (inst
);
1726 return convert_from_csel (inst
);
1729 return convert_csinc_to_cset (inst
);
1733 return convert_bfm_to_bfx (inst
);
1737 return convert_bfm_to_bfi (inst
);
1739 return convert_orr_to_mov (inst
);
1740 case OP_MOV_IMM_WIDE
:
1741 case OP_MOV_IMM_WIDEN
:
1742 return convert_movewide_to_mov (inst
);
1743 case OP_MOV_IMM_LOG
:
1744 return convert_movebitmask_to_mov (inst
);
1746 return convert_extr_to_ror (inst
);
1751 return convert_shll_to_xtl (inst
);
1757 static int aarch64_opcode_decode (const aarch64_opcode
*, const aarch64_insn
,
1758 aarch64_inst
*, int);
1760 /* Given the instruction information in *INST, check if the instruction has
1761 any alias form that can be used to represent *INST. If the answer is yes,
1762 update *INST to be in the form of the determined alias. */
1764 /* In the opcode description table, the following flags are used in opcode
1765 entries to help establish the relations between the real and alias opcodes:
1767 F_ALIAS: opcode is an alias
1768 F_HAS_ALIAS: opcode has alias(es)
1771 F_P3: Disassembly preference priority 1-3 (the larger the
1772 higher). If nothing is specified, it is the priority
1773 0 by default, i.e. the lowest priority.
1775 Although the relation between the machine and the alias instructions are not
1776 explicitly described, it can be easily determined from the base opcode
1777 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1778 description entries:
1780 The mask of an alias opcode must be equal to or a super-set (i.e. more
1781 constrained) of that of the aliased opcode; so is the base opcode value.
1783 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1784 && (opcode->mask & real->mask) == real->mask
1785 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1786 then OPCODE is an alias of, and only of, the REAL instruction
1788 The alias relationship is forced flat-structured to keep related algorithm
1789 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1791 During the disassembling, the decoding decision tree (in
1792 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1793 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1794 not specified), the disassembler will check whether there is any alias
1795 instruction exists for this real instruction. If there is, the disassembler
1796 will try to disassemble the 32-bit binary again using the alias's rule, or
1797 try to convert the IR to the form of the alias. In the case of the multiple
1798 aliases, the aliases are tried one by one from the highest priority
1799 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1800 first succeeds first adopted.
1802 You may ask why there is a need for the conversion of IR from one form to
1803 another in handling certain aliases. This is because on one hand it avoids
1804 adding more operand code to handle unusual encoding/decoding; on other
1805 hand, during the disassembling, the conversion is an effective approach to
1806 check the condition of an alias (as an alias may be adopted only if certain
1807 conditions are met).
1809 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1810 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1811 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1814 determine_disassembling_preference (struct aarch64_inst
*inst
)
1816 const aarch64_opcode
*opcode
;
1817 const aarch64_opcode
*alias
;
1819 opcode
= inst
->opcode
;
1821 /* This opcode does not have an alias, so use itself. */
1822 if (opcode_has_alias (opcode
) == FALSE
)
1825 alias
= aarch64_find_alias_opcode (opcode
);
1828 #ifdef DEBUG_AARCH64
1831 const aarch64_opcode
*tmp
= alias
;
1832 printf ("#### LIST orderd: ");
1835 printf ("%s, ", tmp
->name
);
1836 tmp
= aarch64_find_next_alias_opcode (tmp
);
1840 #endif /* DEBUG_AARCH64 */
1842 for (; alias
; alias
= aarch64_find_next_alias_opcode (alias
))
1844 DEBUG_TRACE ("try %s", alias
->name
);
1845 assert (alias_opcode_p (alias
));
1847 /* An alias can be a pseudo opcode which will never be used in the
1848 disassembly, e.g. BIC logical immediate is such a pseudo opcode
1850 if (pseudo_opcode_p (alias
))
1852 DEBUG_TRACE ("skip pseudo %s", alias
->name
);
1856 if ((inst
->value
& alias
->mask
) != alias
->opcode
)
1858 DEBUG_TRACE ("skip %s as base opcode not match", alias
->name
);
1861 /* No need to do any complicated transformation on operands, if the alias
1862 opcode does not have any operand. */
1863 if (aarch64_num_of_operands (alias
) == 0 && alias
->opcode
== inst
->value
)
1865 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias
->name
);
1866 aarch64_replace_opcode (inst
, alias
);
1869 if (alias
->flags
& F_CONV
)
1872 memcpy (©
, inst
, sizeof (aarch64_inst
));
1873 /* ALIAS is the preference as long as the instruction can be
1874 successfully converted to the form of ALIAS. */
1875 if (convert_to_alias (©
, alias
) == 1)
1877 aarch64_replace_opcode (©
, alias
);
1878 assert (aarch64_match_operands_constraint (©
, NULL
));
1879 DEBUG_TRACE ("succeed with %s via conversion", alias
->name
);
1880 memcpy (inst
, ©
, sizeof (aarch64_inst
));
1886 /* Directly decode the alias opcode. */
1888 memset (&temp
, '\0', sizeof (aarch64_inst
));
1889 if (aarch64_opcode_decode (alias
, inst
->value
, &temp
, 1) == 1)
1891 DEBUG_TRACE ("succeed with %s via direct decoding", alias
->name
);
1892 memcpy (inst
, &temp
, sizeof (aarch64_inst
));
1899 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
1900 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
1903 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
1904 determined and used to disassemble CODE; this is done just before the
1908 aarch64_opcode_decode (const aarch64_opcode
*opcode
, const aarch64_insn code
,
1909 aarch64_inst
*inst
, int noaliases_p
)
1913 DEBUG_TRACE ("enter with %s", opcode
->name
);
1915 assert (opcode
&& inst
);
1917 /* Check the base opcode. */
1918 if ((code
& opcode
->mask
) != (opcode
->opcode
& opcode
->mask
))
1920 DEBUG_TRACE ("base opcode match FAIL");
1925 memset (inst
, '\0', sizeof (aarch64_inst
));
1927 inst
->opcode
= opcode
;
1930 /* Assign operand codes and indexes. */
1931 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1933 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1935 inst
->operands
[i
].type
= opcode
->operands
[i
];
1936 inst
->operands
[i
].idx
= i
;
1939 /* Call the opcode decoder indicated by flags. */
1940 if (opcode_has_special_coder (opcode
) && do_special_decoding (inst
) == 0)
1942 DEBUG_TRACE ("opcode flag-based decoder FAIL");
1946 /* Call operand decoders. */
1947 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1949 const aarch64_operand
*opnd
;
1950 enum aarch64_opnd type
;
1951 type
= opcode
->operands
[i
];
1952 if (type
== AARCH64_OPND_NIL
)
1954 opnd
= &aarch64_operands
[type
];
1955 if (operand_has_extractor (opnd
)
1956 && (! aarch64_extract_operand (opnd
, &inst
->operands
[i
], code
, inst
)))
1958 DEBUG_TRACE ("operand decoder FAIL at operand %d", i
);
1963 /* Match the qualifiers. */
1964 if (aarch64_match_operands_constraint (inst
, NULL
) == 1)
1966 /* Arriving here, the CODE has been determined as a valid instruction
1967 of OPCODE and *INST has been filled with information of this OPCODE
1968 instruction. Before the return, check if the instruction has any
1969 alias and should be disassembled in the form of its alias instead.
1970 If the answer is yes, *INST will be updated. */
1972 determine_disassembling_preference (inst
);
1973 DEBUG_TRACE ("SUCCESS");
1978 DEBUG_TRACE ("constraint matching FAIL");
1985 /* This does some user-friendly fix-up to *INST. It is currently focus on
1986 the adjustment of qualifiers to help the printed instruction
1987 recognized/understood more easily. */
1990 user_friendly_fixup (aarch64_inst
*inst
)
1992 switch (inst
->opcode
->iclass
)
1995 /* TBNZ Xn|Wn, #uimm6, label
1996 Test and Branch Not Zero: conditionally jumps to label if bit number
1997 uimm6 in register Xn is not zero. The bit number implies the width of
1998 the register, which may be written and should be disassembled as Wn if
1999 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2001 if (inst
->operands
[1].imm
.value
< 32)
2002 inst
->operands
[0].qualifier
= AARCH64_OPND_QLF_W
;
2008 /* Decode INSN and fill in *INST the instruction information. */
2011 disas_aarch64_insn (uint64_t pc ATTRIBUTE_UNUSED
, uint32_t insn
,
2014 const aarch64_opcode
*opcode
= aarch64_opcode_lookup (insn
);
2016 #ifdef DEBUG_AARCH64
2019 const aarch64_opcode
*tmp
= opcode
;
2021 DEBUG_TRACE ("opcode lookup:");
2024 aarch64_verbose (" %s", tmp
->name
);
2025 tmp
= aarch64_find_next_opcode (tmp
);
2028 #endif /* DEBUG_AARCH64 */
2030 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2031 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2032 opcode field and value, apart from the difference that one of them has an
2033 extra field as part of the opcode, but such a field is used for operand
2034 encoding in other opcode(s) ('immh' in the case of the example). */
2035 while (opcode
!= NULL
)
2037 /* But only one opcode can be decoded successfully for, as the
2038 decoding routine will check the constraint carefully. */
2039 if (aarch64_opcode_decode (opcode
, insn
, inst
, no_aliases
) == 1)
2041 opcode
= aarch64_find_next_opcode (opcode
);
2047 /* Print operands. */
2050 print_operands (bfd_vma pc
, const aarch64_opcode
*opcode
,
2051 const aarch64_opnd_info
*opnds
, struct disassemble_info
*info
)
2053 int i
, pcrel_p
, num_printed
;
2054 for (i
= 0, num_printed
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2056 const size_t size
= 128;
2058 /* We regard the opcode operand info more, however we also look into
2059 the inst->operands to support the disassembling of the optional
2061 The two operand code should be the same in all cases, apart from
2062 when the operand can be optional. */
2063 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
2064 || opnds
[i
].type
== AARCH64_OPND_NIL
)
2067 /* Generate the operand string in STR. */
2068 aarch64_print_operand (str
, size
, pc
, opcode
, opnds
, i
, &pcrel_p
,
2071 /* Print the delimiter (taking account of omitted operand(s)). */
2073 (*info
->fprintf_func
) (info
->stream
, "%s",
2074 num_printed
++ == 0 ? "\t" : ", ");
2076 /* Print the operand. */
2078 (*info
->print_address_func
) (info
->target
, info
);
2080 (*info
->fprintf_func
) (info
->stream
, "%s", str
);
2084 /* Print the instruction mnemonic name. */
2087 print_mnemonic_name (const aarch64_inst
*inst
, struct disassemble_info
*info
)
2089 if (inst
->opcode
->flags
& F_COND
)
2091 /* For instructions that are truly conditionally executed, e.g. b.cond,
2092 prepare the full mnemonic name with the corresponding condition
2097 ptr
= strchr (inst
->opcode
->name
, '.');
2098 assert (ptr
&& inst
->cond
);
2099 len
= ptr
- inst
->opcode
->name
;
2101 strncpy (name
, inst
->opcode
->name
, len
);
2103 (*info
->fprintf_func
) (info
->stream
, "%s.%s", name
, inst
->cond
->names
[0]);
2106 (*info
->fprintf_func
) (info
->stream
, "%s", inst
->opcode
->name
);
2109 /* Print the instruction according to *INST. */
2112 print_aarch64_insn (bfd_vma pc
, const aarch64_inst
*inst
,
2113 struct disassemble_info
*info
)
2115 print_mnemonic_name (inst
, info
);
2116 print_operands (pc
, inst
->opcode
, inst
->operands
, info
);
2119 /* Entry-point of the instruction disassembler and printer. */
2122 print_insn_aarch64_word (bfd_vma pc
,
2124 struct disassemble_info
*info
)
2126 static const char *err_msg
[6] =
2129 [-ERR_UND
] = "undefined",
2130 [-ERR_UNP
] = "unpredictable",
2137 info
->insn_info_valid
= 1;
2138 info
->branch_delay_insns
= 0;
2139 info
->data_size
= 0;
2143 if (info
->flags
& INSN_HAS_RELOC
)
2144 /* If the instruction has a reloc associated with it, then
2145 the offset field in the instruction will actually be the
2146 addend for the reloc. (If we are using REL type relocs).
2147 In such cases, we can ignore the pc when computing
2148 addresses, since the addend is not currently pc-relative. */
2151 ret
= disas_aarch64_insn (pc
, word
, &inst
);
2153 if (((word
>> 21) & 0x3ff) == 1)
2155 /* RESERVED for ALES. */
2156 assert (ret
!= ERR_OK
);
2165 /* Handle undefined instructions. */
2166 info
->insn_type
= dis_noninsn
;
2167 (*info
->fprintf_func
) (info
->stream
,".inst\t0x%08x ; %s",
2168 word
, err_msg
[-ret
]);
2171 user_friendly_fixup (&inst
);
2172 print_aarch64_insn (pc
, &inst
, info
);
2179 /* Disallow mapping symbols ($x, $d etc) from
2180 being displayed in symbol relative addresses. */
2183 aarch64_symbol_is_valid (asymbol
* sym
,
2184 struct disassemble_info
* info ATTRIBUTE_UNUSED
)
2191 name
= bfd_asymbol_name (sym
);
2195 || (name
[1] != 'x' && name
[1] != 'd')
2196 || (name
[2] != '\0' && name
[2] != '.'));
2199 /* Print data bytes on INFO->STREAM. */
2202 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED
,
2204 struct disassemble_info
*info
)
2206 switch (info
->bytes_per_chunk
)
2209 info
->fprintf_func (info
->stream
, ".byte\t0x%02x", word
);
2212 info
->fprintf_func (info
->stream
, ".short\t0x%04x", word
);
2215 info
->fprintf_func (info
->stream
, ".word\t0x%08x", word
);
2222 /* Try to infer the code or data type from a symbol.
2223 Returns nonzero if *MAP_TYPE was set. */
2226 get_sym_code_type (struct disassemble_info
*info
, int n
,
2227 enum map_type
*map_type
)
2229 elf_symbol_type
*es
;
2233 es
= *(elf_symbol_type
**)(info
->symtab
+ n
);
2234 type
= ELF_ST_TYPE (es
->internal_elf_sym
.st_info
);
2236 /* If the symbol has function type then use that. */
2237 if (type
== STT_FUNC
)
2239 *map_type
= MAP_INSN
;
2243 /* Check for mapping symbols. */
2244 name
= bfd_asymbol_name(info
->symtab
[n
]);
2246 && (name
[1] == 'x' || name
[1] == 'd')
2247 && (name
[2] == '\0' || name
[2] == '.'))
2249 *map_type
= (name
[1] == 'x' ? MAP_INSN
: MAP_DATA
);
2256 /* Entry-point of the AArch64 disassembler. */
2259 print_insn_aarch64 (bfd_vma pc
,
2260 struct disassemble_info
*info
)
2262 bfd_byte buffer
[INSNLEN
];
2264 void (*printer
) (bfd_vma
, uint32_t, struct disassemble_info
*);
2265 bfd_boolean found
= FALSE
;
2266 unsigned int size
= 4;
2269 if (info
->disassembler_options
)
2271 set_default_aarch64_dis_options (info
);
2273 parse_aarch64_dis_options (info
->disassembler_options
);
2275 /* To avoid repeated parsing of these options, we remove them here. */
2276 info
->disassembler_options
= NULL
;
2279 /* Aarch64 instructions are always little-endian */
2280 info
->endian_code
= BFD_ENDIAN_LITTLE
;
2282 /* First check the full symtab for a mapping symbol, even if there
2283 are no usable non-mapping symbols for this address. */
2284 if (info
->symtab_size
!= 0
2285 && bfd_asymbol_flavour (*info
->symtab
) == bfd_target_elf_flavour
)
2287 enum map_type type
= MAP_INSN
;
2292 if (pc
<= last_mapping_addr
)
2293 last_mapping_sym
= -1;
2295 /* Start scanning at the start of the function, or wherever
2296 we finished last time. */
2297 n
= info
->symtab_pos
+ 1;
2298 if (n
< last_mapping_sym
)
2299 n
= last_mapping_sym
;
2301 /* Scan up to the location being disassembled. */
2302 for (; n
< info
->symtab_size
; n
++)
2304 addr
= bfd_asymbol_value (info
->symtab
[n
]);
2307 if ((info
->section
== NULL
2308 || info
->section
== info
->symtab
[n
]->section
)
2309 && get_sym_code_type (info
, n
, &type
))
2318 n
= info
->symtab_pos
;
2319 if (n
< last_mapping_sym
)
2320 n
= last_mapping_sym
;
2322 /* No mapping symbol found at this address. Look backwards
2323 for a preceeding one. */
2326 if (get_sym_code_type (info
, n
, &type
))
2335 last_mapping_sym
= last_sym
;
2338 /* Look a little bit ahead to see if we should print out
2339 less than four bytes of data. If there's a symbol,
2340 mapping or otherwise, after two bytes then don't
2342 if (last_type
== MAP_DATA
)
2344 size
= 4 - (pc
& 3);
2345 for (n
= last_sym
+ 1; n
< info
->symtab_size
; n
++)
2347 addr
= bfd_asymbol_value (info
->symtab
[n
]);
2350 if (addr
- pc
< size
)
2355 /* If the next symbol is after three bytes, we need to
2356 print only part of the data, so that we can use either
2359 size
= (pc
& 1) ? 1 : 2;
2363 if (last_type
== MAP_DATA
)
2365 /* size was set above. */
2366 info
->bytes_per_chunk
= size
;
2367 info
->display_endian
= info
->endian
;
2368 printer
= print_insn_data
;
2372 info
->bytes_per_chunk
= size
= INSNLEN
;
2373 info
->display_endian
= info
->endian_code
;
2374 printer
= print_insn_aarch64_word
;
2377 status
= (*info
->read_memory_func
) (pc
, buffer
, size
, info
);
2380 (*info
->memory_error_func
) (status
, pc
, info
);
2384 data
= bfd_get_bits (buffer
, size
* 8,
2385 info
->display_endian
== BFD_ENDIAN_BIG
);
2387 (*printer
) (pc
, data
, info
);
2393 print_aarch64_disassembler_options (FILE *stream
)
2395 fprintf (stream
, _("\n\
2396 The following AARCH64 specific disassembler options are supported for use\n\
2397 with the -M switch (multiple options should be separated by commas):\n"));
2399 fprintf (stream
, _("\n\
2400 no-aliases Don't print instruction aliases.\n"));
2402 fprintf (stream
, _("\n\
2403 aliases Do print instruction aliases.\n"));
2405 #ifdef DEBUG_AARCH64
2406 fprintf (stream
, _("\n\
2407 debug_dump Temp switch for debug trace.\n"));
2408 #endif /* DEBUG_AARCH64 */
2410 fprintf (stream
, _("\n"));