1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
203 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
204 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
205 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
206 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
207 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
208 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
209 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
210 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
211 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
212 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
213 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
214 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
215 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
216 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
217 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
220 enum aarch64_operand_class
221 aarch64_get_operand_class (enum aarch64_opnd type
)
223 return aarch64_operands
[type
].op_class
;
227 aarch64_get_operand_name (enum aarch64_opnd type
)
229 return aarch64_operands
[type
].name
;
232 /* Get operand description string.
233 This is usually for the diagnosis purpose. */
235 aarch64_get_operand_desc (enum aarch64_opnd type
)
237 return aarch64_operands
[type
].desc
;
240 /* Table of all conditional affixes. */
241 const aarch64_cond aarch64_conds
[16] =
246 {{"cc", "lo", "ul"}, 0x3},
262 get_cond_from_value (aarch64_insn value
)
265 return &aarch64_conds
[(unsigned int) value
];
269 get_inverted_cond (const aarch64_cond
*cond
)
271 return &aarch64_conds
[cond
->value
^ 0x1];
274 /* Table describing the operand extension/shifting operators; indexed by
275 enum aarch64_modifier_kind.
277 The value column provides the most common values for encoding modifiers,
278 which enables table-driven encoding/decoding for the modifiers. */
279 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
298 enum aarch64_modifier_kind
299 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
301 return desc
- aarch64_operand_modifiers
;
305 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
307 return aarch64_operand_modifiers
[kind
].value
;
310 enum aarch64_modifier_kind
311 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
312 bfd_boolean extend_p
)
314 if (extend_p
== TRUE
)
315 return AARCH64_MOD_UXTB
+ value
;
317 return AARCH64_MOD_LSL
- value
;
321 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
323 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
327 static inline bfd_boolean
328 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
330 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
334 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
354 /* Table describing the operands supported by the aliases of the HINT
357 The name column is the operand that is accepted for the alias. The value
358 column is the hint number of the alias. The list of operands is terminated
359 by NULL in the name column. */
361 const struct aarch64_name_value_pair aarch64_hint_options
[] =
363 { "csync", 0x11 }, /* PSB CSYNC. */
367 /* op -> op: load = 0 instruction = 1 store = 2
369 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
370 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
371 const struct aarch64_name_value_pair aarch64_prfops
[32] =
373 { "pldl1keep", B(0, 1, 0) },
374 { "pldl1strm", B(0, 1, 1) },
375 { "pldl2keep", B(0, 2, 0) },
376 { "pldl2strm", B(0, 2, 1) },
377 { "pldl3keep", B(0, 3, 0) },
378 { "pldl3strm", B(0, 3, 1) },
381 { "plil1keep", B(1, 1, 0) },
382 { "plil1strm", B(1, 1, 1) },
383 { "plil2keep", B(1, 2, 0) },
384 { "plil2strm", B(1, 2, 1) },
385 { "plil3keep", B(1, 3, 0) },
386 { "plil3strm", B(1, 3, 1) },
389 { "pstl1keep", B(2, 1, 0) },
390 { "pstl1strm", B(2, 1, 1) },
391 { "pstl2keep", B(2, 2, 0) },
392 { "pstl2strm", B(2, 2, 1) },
393 { "pstl3keep", B(2, 3, 0) },
394 { "pstl3strm", B(2, 3, 1) },
408 /* Utilities on value constraint. */
411 value_in_range_p (int64_t value
, int low
, int high
)
413 return (value
>= low
&& value
<= high
) ? 1 : 0;
417 value_aligned_p (int64_t value
, int align
)
419 return ((value
& (align
- 1)) == 0) ? 1 : 0;
422 /* A signed value fits in a field. */
424 value_fit_signed_field_p (int64_t value
, unsigned width
)
427 if (width
< sizeof (value
) * 8)
429 int64_t lim
= (int64_t)1 << (width
- 1);
430 if (value
>= -lim
&& value
< lim
)
436 /* An unsigned value fits in a field. */
438 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
441 if (width
< sizeof (value
) * 8)
443 int64_t lim
= (int64_t)1 << width
;
444 if (value
>= 0 && value
< lim
)
450 /* Return 1 if OPERAND is SP or WSP. */
452 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
454 return ((aarch64_get_operand_class (operand
->type
)
455 == AARCH64_OPND_CLASS_INT_REG
)
456 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
457 && operand
->reg
.regno
== 31);
460 /* Return 1 if OPERAND is XZR or WZP. */
462 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
464 return ((aarch64_get_operand_class (operand
->type
)
465 == AARCH64_OPND_CLASS_INT_REG
)
466 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
467 && operand
->reg
.regno
== 31);
470 /* Return true if the operand *OPERAND that has the operand code
471 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
472 qualified by the qualifier TARGET. */
475 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
476 aarch64_opnd_qualifier_t target
)
478 switch (operand
->qualifier
)
480 case AARCH64_OPND_QLF_W
:
481 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
484 case AARCH64_OPND_QLF_X
:
485 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
488 case AARCH64_OPND_QLF_WSP
:
489 if (target
== AARCH64_OPND_QLF_W
490 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
493 case AARCH64_OPND_QLF_SP
:
494 if (target
== AARCH64_OPND_QLF_X
495 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
505 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
506 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
508 Return NIL if more than one expected qualifiers are found. */
510 aarch64_opnd_qualifier_t
511 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
513 const aarch64_opnd_qualifier_t known_qlf
,
520 When the known qualifier is NIL, we have to assume that there is only
521 one qualifier sequence in the *QSEQ_LIST and return the corresponding
522 qualifier directly. One scenario is that for instruction
523 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
524 which has only one possible valid qualifier sequence
526 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
527 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
529 Because the qualifier NIL has dual roles in the qualifier sequence:
530 it can mean no qualifier for the operand, or the qualifer sequence is
531 not in use (when all qualifiers in the sequence are NILs), we have to
532 handle this special case here. */
533 if (known_qlf
== AARCH64_OPND_NIL
)
535 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
536 return qseq_list
[0][idx
];
539 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
541 if (qseq_list
[i
][known_idx
] == known_qlf
)
544 /* More than one sequences are found to have KNOWN_QLF at
546 return AARCH64_OPND_NIL
;
551 return qseq_list
[saved_i
][idx
];
554 enum operand_qualifier_kind
562 /* Operand qualifier description. */
563 struct operand_qualifier_data
565 /* The usage of the three data fields depends on the qualifier kind. */
572 enum operand_qualifier_kind kind
;
575 /* Indexed by the operand qualifier enumerators. */
576 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
578 {0, 0, 0, "NIL", OQK_NIL
},
580 /* Operand variant qualifiers.
582 element size, number of elements and common value for encoding. */
584 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
585 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
586 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
587 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
589 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
590 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
591 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
592 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
593 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
595 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
596 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
597 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
598 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
599 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
600 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
601 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
602 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
603 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
604 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
606 /* Qualifiers constraining the value range.
608 Lower bound, higher bound, unused. */
610 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
611 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
612 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
613 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
614 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
615 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
617 /* Qualifiers for miscellaneous purpose.
619 unused, unused and unused. */
624 {0, 0, 0, "retrieving", 0},
627 static inline bfd_boolean
628 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
630 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
634 static inline bfd_boolean
635 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
637 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
642 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
644 return aarch64_opnd_qualifiers
[qualifier
].desc
;
647 /* Given an operand qualifier, return the expected data element size
648 of a qualified operand. */
650 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
652 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
653 return aarch64_opnd_qualifiers
[qualifier
].data0
;
657 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
659 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
660 return aarch64_opnd_qualifiers
[qualifier
].data1
;
664 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
666 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
667 return aarch64_opnd_qualifiers
[qualifier
].data2
;
671 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
673 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
674 return aarch64_opnd_qualifiers
[qualifier
].data0
;
678 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
680 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
681 return aarch64_opnd_qualifiers
[qualifier
].data1
;
686 aarch64_verbose (const char *str
, ...)
697 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
701 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
702 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
707 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
708 const aarch64_opnd_qualifier_t
*qualifier
)
711 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
713 aarch64_verbose ("dump_match_qualifiers:");
714 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
715 curr
[i
] = opnd
[i
].qualifier
;
716 dump_qualifier_sequence (curr
);
717 aarch64_verbose ("against");
718 dump_qualifier_sequence (qualifier
);
720 #endif /* DEBUG_AARCH64 */
722 /* TODO improve this, we can have an extra field at the runtime to
723 store the number of operands rather than calculating it every time. */
726 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
729 const enum aarch64_opnd
*opnds
= opcode
->operands
;
730 while (opnds
[i
++] != AARCH64_OPND_NIL
)
733 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
737 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
738 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
740 N.B. on the entry, it is very likely that only some operands in *INST
741 have had their qualifiers been established.
743 If STOP_AT is not -1, the function will only try to match
744 the qualifier sequence for operands before and including the operand
745 of index STOP_AT; and on success *RET will only be filled with the first
746 (STOP_AT+1) qualifiers.
748 A couple examples of the matching algorithm:
756 Apart from serving the main encoding routine, this can also be called
757 during or after the operand decoding. */
760 aarch64_find_best_match (const aarch64_inst
*inst
,
761 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
762 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
766 const aarch64_opnd_qualifier_t
*qualifiers
;
768 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
771 DEBUG_TRACE ("SUCCEED: no operand");
775 if (stop_at
< 0 || stop_at
>= num_opnds
)
776 stop_at
= num_opnds
- 1;
778 /* For each pattern. */
779 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
782 qualifiers
= *qualifiers_list
;
784 /* Start as positive. */
787 DEBUG_TRACE ("%d", i
);
790 dump_match_qualifiers (inst
->operands
, qualifiers
);
793 /* Most opcodes has much fewer patterns in the list.
794 First NIL qualifier indicates the end in the list. */
795 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
797 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
803 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
805 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
807 /* Either the operand does not have qualifier, or the qualifier
808 for the operand needs to be deduced from the qualifier
810 In the latter case, any constraint checking related with
811 the obtained qualifier should be done later in
812 operand_general_constraint_met_p. */
815 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
817 /* Unless the target qualifier can also qualify the operand
818 (which has already had a non-nil qualifier), non-equal
819 qualifiers are generally un-matched. */
820 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
829 continue; /* Equal qualifiers are certainly matched. */
832 /* Qualifiers established. */
839 /* Fill the result in *RET. */
841 qualifiers
= *qualifiers_list
;
843 DEBUG_TRACE ("complete qualifiers using list %d", i
);
846 dump_qualifier_sequence (qualifiers
);
849 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
850 ret
[j
] = *qualifiers
;
851 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
852 ret
[j
] = AARCH64_OPND_QLF_NIL
;
854 DEBUG_TRACE ("SUCCESS");
858 DEBUG_TRACE ("FAIL");
862 /* Operand qualifier matching and resolving.
864 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
865 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
867 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
871 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
874 aarch64_opnd_qualifier_seq_t qualifiers
;
876 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
879 DEBUG_TRACE ("matching FAIL");
883 if (inst
->opcode
->flags
& F_STRICT
)
885 /* Require an exact qualifier match, even for NIL qualifiers. */
886 nops
= aarch64_num_of_operands (inst
->opcode
);
887 for (i
= 0; i
< nops
; ++i
)
888 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
892 /* Update the qualifiers. */
893 if (update_p
== TRUE
)
894 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
896 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
898 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
899 "update %s with %s for operand %d",
900 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
901 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
902 inst
->operands
[i
].qualifier
= qualifiers
[i
];
905 DEBUG_TRACE ("matching SUCCESS");
909 /* Return TRUE if VALUE is a wide constant that can be moved into a general
912 IS32 indicates whether value is a 32-bit immediate or not.
913 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
914 amount will be returned in *SHIFT_AMOUNT. */
917 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
921 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
925 /* Allow all zeros or all ones in top 32-bits, so that
926 32-bit constant expressions like ~0x80000000 are
928 uint64_t ext
= value
;
929 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
930 /* Immediate out of range. */
932 value
&= (int64_t) 0xffffffff;
935 /* first, try movz then movn */
937 if ((value
& ((int64_t) 0xffff << 0)) == value
)
939 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
941 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
943 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
948 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
952 if (shift_amount
!= NULL
)
953 *shift_amount
= amount
;
955 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
960 /* Build the accepted values for immediate logical SIMD instructions.
962 The standard encodings of the immediate value are:
963 N imms immr SIMD size R S
964 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
965 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
966 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
967 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
968 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
969 0 11110s 00000r 2 UInt(r) UInt(s)
970 where all-ones value of S is reserved.
972 Let's call E the SIMD size.
974 The immediate value is: S+1 bits '1' rotated to the right by R.
976 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
977 (remember S != E - 1). */
979 #define TOTAL_IMM_NB 5334
984 aarch64_insn encoding
;
987 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
990 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
992 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
993 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
995 if (imm1
->imm
< imm2
->imm
)
997 if (imm1
->imm
> imm2
->imm
)
1002 /* immediate bitfield standard encoding
1003 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1004 1 ssssss rrrrrr 64 rrrrrr ssssss
1005 0 0sssss 0rrrrr 32 rrrrr sssss
1006 0 10ssss 00rrrr 16 rrrr ssss
1007 0 110sss 000rrr 8 rrr sss
1008 0 1110ss 0000rr 4 rr ss
1009 0 11110s 00000r 2 r s */
1011 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1013 return (is64
<< 12) | (r
<< 6) | s
;
1017 build_immediate_table (void)
1019 uint32_t log_e
, e
, s
, r
, s_mask
;
1025 for (log_e
= 1; log_e
<= 6; log_e
++)
1027 /* Get element size. */
1032 mask
= 0xffffffffffffffffull
;
1038 mask
= (1ull << e
) - 1;
1040 1 ((1 << 4) - 1) << 2 = 111100
1041 2 ((1 << 3) - 1) << 3 = 111000
1042 3 ((1 << 2) - 1) << 4 = 110000
1043 4 ((1 << 1) - 1) << 5 = 100000
1044 5 ((1 << 0) - 1) << 6 = 000000 */
1045 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1047 for (s
= 0; s
< e
- 1; s
++)
1048 for (r
= 0; r
< e
; r
++)
1050 /* s+1 consecutive bits to 1 (s < 63) */
1051 imm
= (1ull << (s
+ 1)) - 1;
1052 /* rotate right by r */
1054 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1055 /* replicate the constant depending on SIMD size */
1058 case 1: imm
= (imm
<< 2) | imm
;
1059 case 2: imm
= (imm
<< 4) | imm
;
1060 case 3: imm
= (imm
<< 8) | imm
;
1061 case 4: imm
= (imm
<< 16) | imm
;
1062 case 5: imm
= (imm
<< 32) | imm
;
1066 simd_immediates
[nb_imms
].imm
= imm
;
1067 simd_immediates
[nb_imms
].encoding
=
1068 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1072 assert (nb_imms
== TOTAL_IMM_NB
);
1073 qsort(simd_immediates
, nb_imms
,
1074 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1077 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1078 be accepted by logical (immediate) instructions
1079 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1081 ESIZE is the number of bytes in the decoded immediate value.
1082 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1083 VALUE will be returned in *ENCODING. */
1086 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1088 simd_imm_encoding imm_enc
;
1089 const simd_imm_encoding
*imm_encoding
;
1090 static bfd_boolean initialized
= FALSE
;
1094 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1097 if (initialized
== FALSE
)
1099 build_immediate_table ();
1103 /* Allow all zeros or all ones in top bits, so that
1104 constant expressions like ~1 are permitted. */
1105 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1106 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1109 /* Replicate to a full 64-bit value. */
1111 for (i
= esize
* 8; i
< 64; i
*= 2)
1112 value
|= (value
<< i
);
1114 imm_enc
.imm
= value
;
1115 imm_encoding
= (const simd_imm_encoding
*)
1116 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1117 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1118 if (imm_encoding
== NULL
)
1120 DEBUG_TRACE ("exit with FALSE");
1123 if (encoding
!= NULL
)
1124 *encoding
= imm_encoding
->encoding
;
1125 DEBUG_TRACE ("exit with TRUE");
1129 /* If 64-bit immediate IMM is in the format of
1130 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1131 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1132 of value "abcdefgh". Otherwise return -1. */
1134 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1140 for (i
= 0; i
< 8; i
++)
1142 byte
= (imm
>> (8 * i
)) & 0xff;
1145 else if (byte
!= 0x00)
1151 /* Utility inline functions for operand_general_constraint_met_p. */
1154 set_error (aarch64_operand_error
*mismatch_detail
,
1155 enum aarch64_operand_error_kind kind
, int idx
,
1158 if (mismatch_detail
== NULL
)
1160 mismatch_detail
->kind
= kind
;
1161 mismatch_detail
->index
= idx
;
1162 mismatch_detail
->error
= error
;
1166 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1169 if (mismatch_detail
== NULL
)
1171 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1175 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1176 int idx
, int lower_bound
, int upper_bound
,
1179 if (mismatch_detail
== NULL
)
1181 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1182 mismatch_detail
->data
[0] = lower_bound
;
1183 mismatch_detail
->data
[1] = upper_bound
;
1187 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1188 int idx
, int lower_bound
, int upper_bound
)
1190 if (mismatch_detail
== NULL
)
1192 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1193 _("immediate value"));
1197 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1198 int idx
, int lower_bound
, int upper_bound
)
1200 if (mismatch_detail
== NULL
)
1202 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1203 _("immediate offset"));
1207 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1208 int idx
, int lower_bound
, int upper_bound
)
1210 if (mismatch_detail
== NULL
)
1212 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1213 _("register number"));
1217 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1218 int idx
, int lower_bound
, int upper_bound
)
1220 if (mismatch_detail
== NULL
)
1222 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1223 _("register element index"));
1227 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1228 int idx
, int lower_bound
, int upper_bound
)
1230 if (mismatch_detail
== NULL
)
1232 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1237 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1240 if (mismatch_detail
== NULL
)
1242 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1243 mismatch_detail
->data
[0] = alignment
;
1247 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1250 if (mismatch_detail
== NULL
)
1252 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1253 mismatch_detail
->data
[0] = expected_num
;
1257 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1260 if (mismatch_detail
== NULL
)
1262 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1265 /* General constraint checking based on operand code.
1267 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1268 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1270 This function has to be called after the qualifiers for all operands
1273 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1274 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1275 of error message during the disassembling where error message is not
1276 wanted. We avoid the dynamic construction of strings of error messages
1277 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1278 use a combination of error code, static string and some integer data to
1279 represent an error. */
1282 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1283 enum aarch64_opnd type
,
1284 const aarch64_opcode
*opcode
,
1285 aarch64_operand_error
*mismatch_detail
)
1290 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1291 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1293 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1295 switch (aarch64_operands
[type
].op_class
)
1297 case AARCH64_OPND_CLASS_INT_REG
:
1298 /* Check pair reg constraints for cas* instructions. */
1299 if (type
== AARCH64_OPND_PAIRREG
)
1301 assert (idx
== 1 || idx
== 3);
1302 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1304 set_syntax_error (mismatch_detail
, idx
- 1,
1305 _("reg pair must start from even reg"));
1308 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1310 set_syntax_error (mismatch_detail
, idx
,
1311 _("reg pair must be contiguous"));
1317 /* <Xt> may be optional in some IC and TLBI instructions. */
1318 if (type
== AARCH64_OPND_Rt_SYS
)
1320 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1321 == AARCH64_OPND_CLASS_SYSTEM
));
1322 if (opnds
[1].present
1323 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1325 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1328 if (!opnds
[1].present
1329 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1331 set_other_error (mismatch_detail
, idx
, _("missing register"));
1337 case AARCH64_OPND_QLF_WSP
:
1338 case AARCH64_OPND_QLF_SP
:
1339 if (!aarch64_stack_pointer_p (opnd
))
1341 set_other_error (mismatch_detail
, idx
,
1342 _("stack pointer register expected"));
1351 case AARCH64_OPND_CLASS_SVE_REG
:
1354 case AARCH64_OPND_SVE_Zn_INDEX
:
1355 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1356 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1358 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1364 case AARCH64_OPND_SVE_ZnxN
:
1365 case AARCH64_OPND_SVE_ZtxN
:
1366 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1368 set_other_error (mismatch_detail
, idx
,
1369 _("invalid register list"));
1379 case AARCH64_OPND_CLASS_PRED_REG
:
1380 if (opnd
->reg
.regno
>= 8
1381 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1383 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1388 case AARCH64_OPND_CLASS_COND
:
1389 if (type
== AARCH64_OPND_COND1
1390 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1392 /* Not allow AL or NV. */
1393 set_syntax_error (mismatch_detail
, idx
, NULL
);
1397 case AARCH64_OPND_CLASS_ADDRESS
:
1398 /* Check writeback. */
1399 switch (opcode
->iclass
)
1403 case ldstnapair_offs
:
1406 if (opnd
->addr
.writeback
== 1)
1408 set_syntax_error (mismatch_detail
, idx
,
1409 _("unexpected address writeback"));
1414 case ldstpair_indexed
:
1417 if (opnd
->addr
.writeback
== 0)
1419 set_syntax_error (mismatch_detail
, idx
,
1420 _("address writeback expected"));
1425 assert (opnd
->addr
.writeback
== 0);
1430 case AARCH64_OPND_ADDR_SIMM7
:
1431 /* Scaled signed 7 bits immediate offset. */
1432 /* Get the size of the data element that is accessed, which may be
1433 different from that of the source register size,
1434 e.g. in strb/ldrb. */
1435 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1436 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1438 set_offset_out_of_range_error (mismatch_detail
, idx
,
1439 -64 * size
, 63 * size
);
1442 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1444 set_unaligned_error (mismatch_detail
, idx
, size
);
1448 case AARCH64_OPND_ADDR_SIMM9
:
1449 /* Unscaled signed 9 bits immediate offset. */
1450 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1452 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1457 case AARCH64_OPND_ADDR_SIMM9_2
:
1458 /* Unscaled signed 9 bits immediate offset, which has to be negative
1460 size
= aarch64_get_qualifier_esize (qualifier
);
1461 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1462 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1463 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1465 set_other_error (mismatch_detail
, idx
,
1466 _("negative or unaligned offset expected"));
1469 case AARCH64_OPND_SIMD_ADDR_POST
:
1470 /* AdvSIMD load/store multiple structures, post-index. */
1472 if (opnd
->addr
.offset
.is_reg
)
1474 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1478 set_other_error (mismatch_detail
, idx
,
1479 _("invalid register offset"));
1485 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1486 unsigned num_bytes
; /* total number of bytes transferred. */
1487 /* The opcode dependent area stores the number of elements in
1488 each structure to be loaded/stored. */
1489 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1490 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1491 /* Special handling of loading single structure to all lane. */
1492 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1493 * aarch64_get_qualifier_esize (prev
->qualifier
);
1495 num_bytes
= prev
->reglist
.num_regs
1496 * aarch64_get_qualifier_esize (prev
->qualifier
)
1497 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1498 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1500 set_other_error (mismatch_detail
, idx
,
1501 _("invalid post-increment amount"));
1507 case AARCH64_OPND_ADDR_REGOFF
:
1508 /* Get the size of the data element that is accessed, which may be
1509 different from that of the source register size,
1510 e.g. in strb/ldrb. */
1511 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1512 /* It is either no shift or shift by the binary logarithm of SIZE. */
1513 if (opnd
->shifter
.amount
!= 0
1514 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1516 set_other_error (mismatch_detail
, idx
,
1517 _("invalid shift amount"));
1520 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1522 switch (opnd
->shifter
.kind
)
1524 case AARCH64_MOD_UXTW
:
1525 case AARCH64_MOD_LSL
:
1526 case AARCH64_MOD_SXTW
:
1527 case AARCH64_MOD_SXTX
: break;
1529 set_other_error (mismatch_detail
, idx
,
1530 _("invalid extend/shift operator"));
1535 case AARCH64_OPND_ADDR_UIMM12
:
1536 imm
= opnd
->addr
.offset
.imm
;
1537 /* Get the size of the data element that is accessed, which may be
1538 different from that of the source register size,
1539 e.g. in strb/ldrb. */
1540 size
= aarch64_get_qualifier_esize (qualifier
);
1541 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1543 set_offset_out_of_range_error (mismatch_detail
, idx
,
1547 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1549 set_unaligned_error (mismatch_detail
, idx
, size
);
1554 case AARCH64_OPND_ADDR_PCREL14
:
1555 case AARCH64_OPND_ADDR_PCREL19
:
1556 case AARCH64_OPND_ADDR_PCREL21
:
1557 case AARCH64_OPND_ADDR_PCREL26
:
1558 imm
= opnd
->imm
.value
;
1559 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1561 /* The offset value in a PC-relative branch instruction is alway
1562 4-byte aligned and is encoded without the lowest 2 bits. */
1563 if (!value_aligned_p (imm
, 4))
1565 set_unaligned_error (mismatch_detail
, idx
, 4);
1568 /* Right shift by 2 so that we can carry out the following check
1572 size
= get_operand_fields_width (get_operand_from_code (type
));
1573 if (!value_fit_signed_field_p (imm
, size
))
1575 set_other_error (mismatch_detail
, idx
,
1576 _("immediate out of range"));
1586 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1587 if (type
== AARCH64_OPND_LEt
)
1589 /* Get the upper bound for the element index. */
1590 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1591 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1593 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1597 /* The opcode dependent area stores the number of elements in
1598 each structure to be loaded/stored. */
1599 num
= get_opcode_dependent_value (opcode
);
1602 case AARCH64_OPND_LVt
:
1603 assert (num
>= 1 && num
<= 4);
1604 /* Unless LD1/ST1, the number of registers should be equal to that
1605 of the structure elements. */
1606 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1608 set_reg_list_error (mismatch_detail
, idx
, num
);
1612 case AARCH64_OPND_LVt_AL
:
1613 case AARCH64_OPND_LEt
:
1614 assert (num
>= 1 && num
<= 4);
1615 /* The number of registers should be equal to that of the structure
1617 if (opnd
->reglist
.num_regs
!= num
)
1619 set_reg_list_error (mismatch_detail
, idx
, num
);
1628 case AARCH64_OPND_CLASS_IMMEDIATE
:
1629 /* Constraint check on immediate operand. */
1630 imm
= opnd
->imm
.value
;
1631 /* E.g. imm_0_31 constrains value to be 0..31. */
1632 if (qualifier_value_in_range_constraint_p (qualifier
)
1633 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1634 get_upper_bound (qualifier
)))
1636 set_imm_out_of_range_error (mismatch_detail
, idx
,
1637 get_lower_bound (qualifier
),
1638 get_upper_bound (qualifier
));
1644 case AARCH64_OPND_AIMM
:
1645 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1647 set_other_error (mismatch_detail
, idx
,
1648 _("invalid shift operator"));
1651 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1653 set_other_error (mismatch_detail
, idx
,
1654 _("shift amount expected to be 0 or 12"));
1657 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1659 set_other_error (mismatch_detail
, idx
,
1660 _("immediate out of range"));
1665 case AARCH64_OPND_HALF
:
1666 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1667 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1669 set_other_error (mismatch_detail
, idx
,
1670 _("invalid shift operator"));
1673 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1674 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1676 set_other_error (mismatch_detail
, idx
,
1677 _("shift amount should be a multiple of 16"));
1680 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1682 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1686 if (opnd
->imm
.value
< 0)
1688 set_other_error (mismatch_detail
, idx
,
1689 _("negative immediate value not allowed"));
1692 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1694 set_other_error (mismatch_detail
, idx
,
1695 _("immediate out of range"));
1700 case AARCH64_OPND_IMM_MOV
:
1702 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1703 imm
= opnd
->imm
.value
;
1707 case OP_MOV_IMM_WIDEN
:
1709 /* Fall through... */
1710 case OP_MOV_IMM_WIDE
:
1711 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
1713 set_other_error (mismatch_detail
, idx
,
1714 _("immediate out of range"));
1718 case OP_MOV_IMM_LOG
:
1719 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
1721 set_other_error (mismatch_detail
, idx
,
1722 _("immediate out of range"));
1733 case AARCH64_OPND_NZCV
:
1734 case AARCH64_OPND_CCMP_IMM
:
1735 case AARCH64_OPND_EXCEPTION
:
1736 case AARCH64_OPND_UIMM4
:
1737 case AARCH64_OPND_UIMM7
:
1738 case AARCH64_OPND_UIMM3_OP1
:
1739 case AARCH64_OPND_UIMM3_OP2
:
1740 size
= get_operand_fields_width (get_operand_from_code (type
));
1742 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1744 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1750 case AARCH64_OPND_WIDTH
:
1751 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1752 && opnds
[0].type
== AARCH64_OPND_Rd
);
1753 size
= get_upper_bound (qualifier
);
1754 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1755 /* lsb+width <= reg.size */
1757 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1758 size
- opnds
[idx
-1].imm
.value
);
1763 case AARCH64_OPND_LIMM
:
1765 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1766 uint64_t uimm
= opnd
->imm
.value
;
1767 if (opcode
->op
== OP_BIC
)
1769 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
1771 set_other_error (mismatch_detail
, idx
,
1772 _("immediate out of range"));
1778 case AARCH64_OPND_IMM0
:
1779 case AARCH64_OPND_FPIMM0
:
1780 if (opnd
->imm
.value
!= 0)
1782 set_other_error (mismatch_detail
, idx
,
1783 _("immediate zero expected"));
1788 case AARCH64_OPND_SHLL_IMM
:
1790 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1791 if (opnd
->imm
.value
!= size
)
1793 set_other_error (mismatch_detail
, idx
,
1794 _("invalid shift amount"));
1799 case AARCH64_OPND_IMM_VLSL
:
1800 size
= aarch64_get_qualifier_esize (qualifier
);
1801 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1803 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1809 case AARCH64_OPND_IMM_VLSR
:
1810 size
= aarch64_get_qualifier_esize (qualifier
);
1811 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1813 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1818 case AARCH64_OPND_SIMD_IMM
:
1819 case AARCH64_OPND_SIMD_IMM_SFT
:
1820 /* Qualifier check. */
1823 case AARCH64_OPND_QLF_LSL
:
1824 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1826 set_other_error (mismatch_detail
, idx
,
1827 _("invalid shift operator"));
1831 case AARCH64_OPND_QLF_MSL
:
1832 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1834 set_other_error (mismatch_detail
, idx
,
1835 _("invalid shift operator"));
1839 case AARCH64_OPND_QLF_NIL
:
1840 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1842 set_other_error (mismatch_detail
, idx
,
1843 _("shift is not permitted"));
1851 /* Is the immediate valid? */
1853 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1855 /* uimm8 or simm8 */
1856 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1858 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1862 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1865 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1866 ffffffffgggggggghhhhhhhh'. */
1867 set_other_error (mismatch_detail
, idx
,
1868 _("invalid value for immediate"));
1871 /* Is the shift amount valid? */
1872 switch (opnd
->shifter
.kind
)
1874 case AARCH64_MOD_LSL
:
1875 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1876 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1878 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1882 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1884 set_unaligned_error (mismatch_detail
, idx
, 8);
1888 case AARCH64_MOD_MSL
:
1889 /* Only 8 and 16 are valid shift amount. */
1890 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1892 set_other_error (mismatch_detail
, idx
,
1893 _("shift amount expected to be 0 or 16"));
1898 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1900 set_other_error (mismatch_detail
, idx
,
1901 _("invalid shift operator"));
1908 case AARCH64_OPND_FPIMM
:
1909 case AARCH64_OPND_SIMD_FPIMM
:
1910 if (opnd
->imm
.is_fp
== 0)
1912 set_other_error (mismatch_detail
, idx
,
1913 _("floating-point immediate expected"));
1916 /* The value is expected to be an 8-bit floating-point constant with
1917 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1918 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1920 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1922 set_other_error (mismatch_detail
, idx
,
1923 _("immediate out of range"));
1926 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1928 set_other_error (mismatch_detail
, idx
,
1929 _("invalid shift operator"));
1939 case AARCH64_OPND_CLASS_CP_REG
:
1940 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1941 valid range: C0 - C15. */
1942 if (opnd
->reg
.regno
> 15)
1944 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1949 case AARCH64_OPND_CLASS_SYSTEM
:
1952 case AARCH64_OPND_PSTATEFIELD
:
1953 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1956 The immediate must be #0 or #1. */
1957 if ((opnd
->pstatefield
== 0x03 /* UAO. */
1958 || opnd
->pstatefield
== 0x04) /* PAN. */
1959 && opnds
[1].imm
.value
> 1)
1961 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1964 /* MSR SPSel, #uimm4
1965 Uses uimm4 as a control value to select the stack pointer: if
1966 bit 0 is set it selects the current exception level's stack
1967 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1968 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1969 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1971 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1980 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1981 /* Get the upper bound for the element index. */
1982 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1983 /* Index out-of-range. */
1984 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1986 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1989 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1990 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1991 number is encoded in "size:M:Rm":
1997 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
1998 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2000 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2005 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2006 assert (idx
== 1 || idx
== 2);
2009 case AARCH64_OPND_Rm_EXT
:
2010 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2011 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2013 set_other_error (mismatch_detail
, idx
,
2014 _("extend operator expected"));
2017 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2018 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2019 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2021 if (!aarch64_stack_pointer_p (opnds
+ 0)
2022 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2024 if (!opnd
->shifter
.operator_present
)
2026 set_other_error (mismatch_detail
, idx
,
2027 _("missing extend operator"));
2030 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2032 set_other_error (mismatch_detail
, idx
,
2033 _("'LSL' operator not allowed"));
2037 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2038 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2039 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2041 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2044 /* In the 64-bit form, the final register operand is written as Wm
2045 for all but the (possibly omitted) UXTX/LSL and SXTX
2047 N.B. GAS allows X register to be used with any operator as a
2048 programming convenience. */
2049 if (qualifier
== AARCH64_OPND_QLF_X
2050 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2051 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2052 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2054 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2059 case AARCH64_OPND_Rm_SFT
:
2060 /* ROR is not available to the shifted register operand in
2061 arithmetic instructions. */
2062 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2064 set_other_error (mismatch_detail
, idx
,
2065 _("shift operator expected"));
2068 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2069 && opcode
->iclass
!= log_shift
)
2071 set_other_error (mismatch_detail
, idx
,
2072 _("'ROR' operator not allowed"));
2075 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2076 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2078 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2095 /* Main entrypoint for the operand constraint checking.
2097 Return 1 if operands of *INST meet the constraint applied by the operand
2098 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2099 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2100 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2101 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2102 error kind when it is notified that an instruction does not pass the check).
2104 Un-determined operand qualifiers may get established during the process. */
2107 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2108 aarch64_operand_error
*mismatch_detail
)
2112 DEBUG_TRACE ("enter");
2114 /* Check for cases where a source register needs to be the same as the
2115 destination register. Do this before matching qualifiers since if
2116 an instruction has both invalid tying and invalid qualifiers,
2117 the error about qualifiers would suggest several alternative
2118 instructions that also have invalid tying. */
2119 i
= inst
->opcode
->tied_operand
;
2120 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2122 if (mismatch_detail
)
2124 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2125 mismatch_detail
->index
= i
;
2126 mismatch_detail
->error
= NULL
;
2131 /* Match operands' qualifier.
2132 *INST has already had qualifier establish for some, if not all, of
2133 its operands; we need to find out whether these established
2134 qualifiers match one of the qualifier sequence in
2135 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2136 with the corresponding qualifier in such a sequence.
2137 Only basic operand constraint checking is done here; the more thorough
2138 constraint checking will carried out by operand_general_constraint_met_p,
2139 which has be to called after this in order to get all of the operands'
2140 qualifiers established. */
2141 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2143 DEBUG_TRACE ("FAIL on operand qualifier matching");
2144 if (mismatch_detail
)
2146 /* Return an error type to indicate that it is the qualifier
2147 matching failure; we don't care about which operand as there
2148 are enough information in the opcode table to reproduce it. */
2149 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2150 mismatch_detail
->index
= -1;
2151 mismatch_detail
->error
= NULL
;
2156 /* Match operands' constraint. */
2157 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2159 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2160 if (type
== AARCH64_OPND_NIL
)
2162 if (inst
->operands
[i
].skip
)
2164 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2167 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2168 inst
->opcode
, mismatch_detail
) == 0)
2170 DEBUG_TRACE ("FAIL on operand %d", i
);
2175 DEBUG_TRACE ("PASS");
2180 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2181 Also updates the TYPE of each INST->OPERANDS with the corresponding
2182 value of OPCODE->OPERANDS.
2184 Note that some operand qualifiers may need to be manually cleared by
2185 the caller before it further calls the aarch64_opcode_encode; by
2186 doing this, it helps the qualifier matching facilities work
2189 const aarch64_opcode
*
2190 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2193 const aarch64_opcode
*old
= inst
->opcode
;
2195 inst
->opcode
= opcode
;
2197 /* Update the operand types. */
2198 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2200 inst
->operands
[i
].type
= opcode
->operands
[i
];
2201 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2205 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2211 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2214 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2215 if (operands
[i
] == operand
)
2217 else if (operands
[i
] == AARCH64_OPND_NIL
)
2222 /* R0...R30, followed by FOR31. */
2223 #define BANK(R, FOR31) \
2224 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2225 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2226 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2227 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2228 /* [0][0] 32-bit integer regs with sp Wn
2229 [0][1] 64-bit integer regs with sp Xn sf=1
2230 [1][0] 32-bit integer regs with #0 Wn
2231 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2232 static const char *int_reg
[2][2][32] = {
2233 #define R32(X) "w" #X
2234 #define R64(X) "x" #X
2235 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2236 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2242 /* Return the integer register name.
2243 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2245 static inline const char *
2246 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2248 const int has_zr
= sp_reg_p
? 0 : 1;
2249 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2250 return int_reg
[has_zr
][is_64
][regno
];
2253 /* Like get_int_reg_name, but IS_64 is always 1. */
2255 static inline const char *
2256 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2258 const int has_zr
= sp_reg_p
? 0 : 1;
2259 return int_reg
[has_zr
][1][regno
];
2262 /* Get the name of the integer offset register in OPND, using the shift type
2263 to decide whether it's a word or doubleword. */
2265 static inline const char *
2266 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2268 switch (opnd
->shifter
.kind
)
2270 case AARCH64_MOD_UXTW
:
2271 case AARCH64_MOD_SXTW
:
2272 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2274 case AARCH64_MOD_LSL
:
2275 case AARCH64_MOD_SXTX
:
2276 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2283 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2303 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2304 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2305 (depending on the type of the instruction). IMM8 will be expanded to a
2306 single-precision floating-point value (SIZE == 4) or a double-precision
2307 floating-point value (SIZE == 8). A half-precision floating-point value
2308 (SIZE == 2) is expanded to a single-precision floating-point value. The
2309 expanded value is returned. */
2312 expand_fp_imm (int size
, uint32_t imm8
)
2315 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2317 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2318 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2319 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2320 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2321 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2324 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2325 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2326 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2327 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2328 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2331 else if (size
== 4 || size
== 2)
2333 imm
= (imm8_7
<< 31) /* imm8<7> */
2334 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2335 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2336 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2340 /* An unsupported size. */
2347 /* Produce the string representation of the register list operand *OPND
2348 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2349 the register name that comes before the register number, such as "v". */
2351 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2354 const int num_regs
= opnd
->reglist
.num_regs
;
2355 const int first_reg
= opnd
->reglist
.first_regno
;
2356 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2357 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2358 char tb
[8]; /* Temporary buffer. */
2360 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2361 assert (num_regs
>= 1 && num_regs
<= 4);
2363 /* Prepare the index if any. */
2364 if (opnd
->reglist
.has_index
)
2365 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2369 /* The hyphenated form is preferred for disassembly if there are
2370 more than two registers in the list, and the register numbers
2371 are monotonically increasing in increments of one. */
2372 if (num_regs
> 2 && last_reg
> first_reg
)
2373 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2374 prefix
, last_reg
, qlf_name
, tb
);
2377 const int reg0
= first_reg
;
2378 const int reg1
= (first_reg
+ 1) & 0x1f;
2379 const int reg2
= (first_reg
+ 2) & 0x1f;
2380 const int reg3
= (first_reg
+ 3) & 0x1f;
2385 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2388 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2389 prefix
, reg1
, qlf_name
, tb
);
2392 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2393 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2394 prefix
, reg2
, qlf_name
, tb
);
2397 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2398 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2399 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2405 /* Print the register+immediate address in OPND to BUF, which has SIZE
2406 characters. BASE is the name of the base register. */
2409 print_immediate_offset_address (char *buf
, size_t size
,
2410 const aarch64_opnd_info
*opnd
,
2413 if (opnd
->addr
.writeback
)
2415 if (opnd
->addr
.preind
)
2416 snprintf (buf
, size
, "[%s,#%d]!", base
, opnd
->addr
.offset
.imm
);
2418 snprintf (buf
, size
, "[%s],#%d", base
, opnd
->addr
.offset
.imm
);
2422 if (opnd
->addr
.offset
.imm
)
2423 snprintf (buf
, size
, "[%s,#%d]", base
, opnd
->addr
.offset
.imm
);
2425 snprintf (buf
, size
, "[%s]", base
);
2429 /* Produce the string representation of the register offset address operand
2430 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2431 the names of the base and offset registers. */
2433 print_register_offset_address (char *buf
, size_t size
,
2434 const aarch64_opnd_info
*opnd
,
2435 const char *base
, const char *offset
)
2437 char tb
[16]; /* Temporary buffer. */
2438 bfd_boolean print_extend_p
= TRUE
;
2439 bfd_boolean print_amount_p
= TRUE
;
2440 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2442 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2443 || !opnd
->shifter
.amount_present
))
2445 /* Not print the shift/extend amount when the amount is zero and
2446 when it is not the special case of 8-bit load/store instruction. */
2447 print_amount_p
= FALSE
;
2448 /* Likewise, no need to print the shift operator LSL in such a
2450 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2451 print_extend_p
= FALSE
;
2454 /* Prepare for the extend/shift. */
2458 snprintf (tb
, sizeof (tb
), ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2460 snprintf (tb
, sizeof (tb
), ",%s", shift_name
);
2465 snprintf (buf
, size
, "[%s,%s%s]", base
, offset
, tb
);
2468 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2469 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2470 PC, PCREL_P and ADDRESS are used to pass in and return information about
2471 the PC-relative address calculation, where the PC value is passed in
2472 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2473 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2474 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2476 The function serves both the disassembler and the assembler diagnostics
2477 issuer, which is the reason why it lives in this file. */
2480 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2481 const aarch64_opcode
*opcode
,
2482 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2486 const char *name
= NULL
;
2487 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2488 enum aarch64_modifier_kind kind
;
2497 case AARCH64_OPND_Rd
:
2498 case AARCH64_OPND_Rn
:
2499 case AARCH64_OPND_Rm
:
2500 case AARCH64_OPND_Rt
:
2501 case AARCH64_OPND_Rt2
:
2502 case AARCH64_OPND_Rs
:
2503 case AARCH64_OPND_Ra
:
2504 case AARCH64_OPND_Rt_SYS
:
2505 case AARCH64_OPND_PAIRREG
:
2506 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2507 the <ic_op>, therefore we we use opnd->present to override the
2508 generic optional-ness information. */
2509 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2511 /* Omit the operand, e.g. RET. */
2512 if (optional_operand_p (opcode
, idx
)
2513 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2515 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2516 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2517 snprintf (buf
, size
, "%s",
2518 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2521 case AARCH64_OPND_Rd_SP
:
2522 case AARCH64_OPND_Rn_SP
:
2523 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2524 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2525 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2526 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2527 snprintf (buf
, size
, "%s",
2528 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2531 case AARCH64_OPND_Rm_EXT
:
2532 kind
= opnd
->shifter
.kind
;
2533 assert (idx
== 1 || idx
== 2);
2534 if ((aarch64_stack_pointer_p (opnds
)
2535 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2536 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2537 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2538 && kind
== AARCH64_MOD_UXTW
)
2539 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2540 && kind
== AARCH64_MOD_UXTX
)))
2542 /* 'LSL' is the preferred form in this case. */
2543 kind
= AARCH64_MOD_LSL
;
2544 if (opnd
->shifter
.amount
== 0)
2546 /* Shifter omitted. */
2547 snprintf (buf
, size
, "%s",
2548 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2552 if (opnd
->shifter
.amount
)
2553 snprintf (buf
, size
, "%s, %s #%d",
2554 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2555 aarch64_operand_modifiers
[kind
].name
,
2556 opnd
->shifter
.amount
);
2558 snprintf (buf
, size
, "%s, %s",
2559 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2560 aarch64_operand_modifiers
[kind
].name
);
2563 case AARCH64_OPND_Rm_SFT
:
2564 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2565 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2566 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2567 snprintf (buf
, size
, "%s",
2568 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2570 snprintf (buf
, size
, "%s, %s #%d",
2571 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2572 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2573 opnd
->shifter
.amount
);
2576 case AARCH64_OPND_Fd
:
2577 case AARCH64_OPND_Fn
:
2578 case AARCH64_OPND_Fm
:
2579 case AARCH64_OPND_Fa
:
2580 case AARCH64_OPND_Ft
:
2581 case AARCH64_OPND_Ft2
:
2582 case AARCH64_OPND_Sd
:
2583 case AARCH64_OPND_Sn
:
2584 case AARCH64_OPND_Sm
:
2585 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2589 case AARCH64_OPND_Vd
:
2590 case AARCH64_OPND_Vn
:
2591 case AARCH64_OPND_Vm
:
2592 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2593 aarch64_get_qualifier_name (opnd
->qualifier
));
2596 case AARCH64_OPND_Ed
:
2597 case AARCH64_OPND_En
:
2598 case AARCH64_OPND_Em
:
2599 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2600 aarch64_get_qualifier_name (opnd
->qualifier
),
2601 opnd
->reglane
.index
);
2604 case AARCH64_OPND_VdD1
:
2605 case AARCH64_OPND_VnD1
:
2606 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2609 case AARCH64_OPND_LVn
:
2610 case AARCH64_OPND_LVt
:
2611 case AARCH64_OPND_LVt_AL
:
2612 case AARCH64_OPND_LEt
:
2613 print_register_list (buf
, size
, opnd
, "v");
2616 case AARCH64_OPND_SVE_Pd
:
2617 case AARCH64_OPND_SVE_Pg3
:
2618 case AARCH64_OPND_SVE_Pg4_5
:
2619 case AARCH64_OPND_SVE_Pg4_10
:
2620 case AARCH64_OPND_SVE_Pg4_16
:
2621 case AARCH64_OPND_SVE_Pm
:
2622 case AARCH64_OPND_SVE_Pn
:
2623 case AARCH64_OPND_SVE_Pt
:
2624 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2625 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
2627 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
2628 aarch64_get_qualifier_name (opnd
->qualifier
));
2631 case AARCH64_OPND_SVE_Za_5
:
2632 case AARCH64_OPND_SVE_Za_16
:
2633 case AARCH64_OPND_SVE_Zd
:
2634 case AARCH64_OPND_SVE_Zm_5
:
2635 case AARCH64_OPND_SVE_Zm_16
:
2636 case AARCH64_OPND_SVE_Zn
:
2637 case AARCH64_OPND_SVE_Zt
:
2638 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2639 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
2641 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
2642 aarch64_get_qualifier_name (opnd
->qualifier
));
2645 case AARCH64_OPND_SVE_ZnxN
:
2646 case AARCH64_OPND_SVE_ZtxN
:
2647 print_register_list (buf
, size
, opnd
, "z");
2650 case AARCH64_OPND_SVE_Zn_INDEX
:
2651 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2652 aarch64_get_qualifier_name (opnd
->qualifier
),
2653 opnd
->reglane
.index
);
2656 case AARCH64_OPND_Cn
:
2657 case AARCH64_OPND_Cm
:
2658 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2661 case AARCH64_OPND_IDX
:
2662 case AARCH64_OPND_IMM
:
2663 case AARCH64_OPND_WIDTH
:
2664 case AARCH64_OPND_UIMM3_OP1
:
2665 case AARCH64_OPND_UIMM3_OP2
:
2666 case AARCH64_OPND_BIT_NUM
:
2667 case AARCH64_OPND_IMM_VLSL
:
2668 case AARCH64_OPND_IMM_VLSR
:
2669 case AARCH64_OPND_SHLL_IMM
:
2670 case AARCH64_OPND_IMM0
:
2671 case AARCH64_OPND_IMMR
:
2672 case AARCH64_OPND_IMMS
:
2673 case AARCH64_OPND_FBITS
:
2674 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2677 case AARCH64_OPND_IMM_MOV
:
2678 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2680 case 4: /* e.g. MOV Wd, #<imm32>. */
2682 int imm32
= opnd
->imm
.value
;
2683 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2686 case 8: /* e.g. MOV Xd, #<imm64>. */
2687 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2688 opnd
->imm
.value
, opnd
->imm
.value
);
2690 default: assert (0);
2694 case AARCH64_OPND_FPIMM0
:
2695 snprintf (buf
, size
, "#0.0");
2698 case AARCH64_OPND_LIMM
:
2699 case AARCH64_OPND_AIMM
:
2700 case AARCH64_OPND_HALF
:
2701 if (opnd
->shifter
.amount
)
2702 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2703 opnd
->shifter
.amount
);
2705 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2708 case AARCH64_OPND_SIMD_IMM
:
2709 case AARCH64_OPND_SIMD_IMM_SFT
:
2710 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2711 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2712 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2714 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2715 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2716 opnd
->shifter
.amount
);
2719 case AARCH64_OPND_FPIMM
:
2720 case AARCH64_OPND_SIMD_FPIMM
:
2721 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2723 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2726 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2727 snprintf (buf
, size
, "#%.18e", c
.f
);
2730 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2733 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2734 snprintf (buf
, size
, "#%.18e", c
.f
);
2737 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2740 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2741 snprintf (buf
, size
, "#%.18e", c
.d
);
2744 default: assert (0);
2748 case AARCH64_OPND_CCMP_IMM
:
2749 case AARCH64_OPND_NZCV
:
2750 case AARCH64_OPND_EXCEPTION
:
2751 case AARCH64_OPND_UIMM4
:
2752 case AARCH64_OPND_UIMM7
:
2753 if (optional_operand_p (opcode
, idx
) == TRUE
2754 && (opnd
->imm
.value
==
2755 (int64_t) get_optional_operand_default_value (opcode
)))
2756 /* Omit the operand, e.g. DCPS1. */
2758 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2761 case AARCH64_OPND_COND
:
2762 case AARCH64_OPND_COND1
:
2763 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2766 case AARCH64_OPND_ADDR_ADRP
:
2767 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2773 /* This is not necessary during the disassembling, as print_address_func
2774 in the disassemble_info will take care of the printing. But some
2775 other callers may be still interested in getting the string in *STR,
2776 so here we do snprintf regardless. */
2777 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2780 case AARCH64_OPND_ADDR_PCREL14
:
2781 case AARCH64_OPND_ADDR_PCREL19
:
2782 case AARCH64_OPND_ADDR_PCREL21
:
2783 case AARCH64_OPND_ADDR_PCREL26
:
2784 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2789 /* This is not necessary during the disassembling, as print_address_func
2790 in the disassemble_info will take care of the printing. But some
2791 other callers may be still interested in getting the string in *STR,
2792 so here we do snprintf regardless. */
2793 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2796 case AARCH64_OPND_ADDR_SIMPLE
:
2797 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2798 case AARCH64_OPND_SIMD_ADDR_POST
:
2799 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2800 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2802 if (opnd
->addr
.offset
.is_reg
)
2803 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2805 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2808 snprintf (buf
, size
, "[%s]", name
);
2811 case AARCH64_OPND_ADDR_REGOFF
:
2812 print_register_offset_address
2813 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2814 get_offset_int_reg_name (opnd
));
2817 case AARCH64_OPND_ADDR_SIMM7
:
2818 case AARCH64_OPND_ADDR_SIMM9
:
2819 case AARCH64_OPND_ADDR_SIMM9_2
:
2820 print_immediate_offset_address
2821 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
2824 case AARCH64_OPND_ADDR_UIMM12
:
2825 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2826 if (opnd
->addr
.offset
.imm
)
2827 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2829 snprintf (buf
, size
, "[%s]", name
);
2832 case AARCH64_OPND_SYSREG
:
2833 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2834 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2835 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2837 if (aarch64_sys_regs
[i
].name
)
2838 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2841 /* Implementation defined system register. */
2842 unsigned int value
= opnd
->sysreg
;
2843 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2844 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2849 case AARCH64_OPND_PSTATEFIELD
:
2850 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2851 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2853 assert (aarch64_pstatefields
[i
].name
);
2854 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2857 case AARCH64_OPND_SYSREG_AT
:
2858 case AARCH64_OPND_SYSREG_DC
:
2859 case AARCH64_OPND_SYSREG_IC
:
2860 case AARCH64_OPND_SYSREG_TLBI
:
2861 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2864 case AARCH64_OPND_BARRIER
:
2865 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2868 case AARCH64_OPND_BARRIER_ISB
:
2869 /* Operand can be omitted, e.g. in DCPS1. */
2870 if (! optional_operand_p (opcode
, idx
)
2871 || (opnd
->barrier
->value
2872 != get_optional_operand_default_value (opcode
)))
2873 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2876 case AARCH64_OPND_PRFOP
:
2877 if (opnd
->prfop
->name
!= NULL
)
2878 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2880 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2883 case AARCH64_OPND_BARRIER_PSB
:
2884 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
2892 #define CPENC(op0,op1,crn,crm,op2) \
2893 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2894 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2895 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2896 /* for 3.9.10 System Instructions */
2897 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2919 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2924 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2929 #define F_HASXT 0x4 /* System instruction register <Xt>
2933 /* TODO there are two more issues need to be resolved
2934 1. handle read-only and write-only system registers
2935 2. handle cpu-implementation-defined system registers. */
2936 const aarch64_sys_reg aarch64_sys_regs
[] =
2938 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2939 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2940 { "elr_el1", CPEN_(0,C0
,1), 0 },
2941 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2942 { "sp_el0", CPEN_(0,C1
,0), 0 },
2943 { "spsel", CPEN_(0,C2
,0), 0 },
2944 { "daif", CPEN_(3,C2
,1), 0 },
2945 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2946 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2947 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
2948 { "nzcv", CPEN_(3,C2
,0), 0 },
2949 { "fpcr", CPEN_(3,C4
,0), 0 },
2950 { "fpsr", CPEN_(3,C4
,1), 0 },
2951 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2952 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2953 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2954 { "elr_el2", CPEN_(4,C0
,1), 0 },
2955 { "sp_el1", CPEN_(4,C1
,0), 0 },
2956 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2957 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2958 { "spsr_und", CPEN_(4,C3
,2), 0 },
2959 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2960 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2961 { "elr_el3", CPEN_(6,C0
,1), 0 },
2962 { "sp_el2", CPEN_(6,C1
,0), 0 },
2963 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2964 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2965 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2966 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2967 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2968 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2969 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2970 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2971 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2972 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2973 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2974 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2975 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2976 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2977 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2978 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2979 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2980 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2981 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2982 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2983 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2984 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2985 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2986 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2987 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2988 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2989 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2990 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2991 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2992 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
2993 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
2994 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
2995 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
2996 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
2997 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
2998 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
2999 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3000 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3001 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3002 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3003 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3004 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3005 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3006 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3007 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3008 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3009 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3010 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3011 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3012 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3013 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3014 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3015 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3016 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3017 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3018 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3019 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3020 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3021 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3022 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3023 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3024 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3025 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3026 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3027 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3028 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3029 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3030 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3031 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3032 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3033 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3034 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3035 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3036 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3037 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3038 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3039 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3040 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3041 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3042 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3043 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3044 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3045 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3046 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3047 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3048 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3049 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3050 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3051 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3052 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3053 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3054 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3055 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3056 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3057 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3058 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3059 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3060 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3061 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3062 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3063 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3064 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3065 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3066 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3067 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3068 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3069 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3070 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3071 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3072 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3073 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3074 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3075 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3076 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3077 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3078 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3079 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3080 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3081 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3082 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3083 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3084 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3085 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3086 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3087 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3088 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3089 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3090 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3091 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3092 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3093 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3094 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3095 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3096 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3097 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3098 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3099 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3100 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3101 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3102 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3103 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3104 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3105 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3106 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3107 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3108 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3109 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3110 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3111 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3112 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3113 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3114 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3115 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3116 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3117 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3118 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3119 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3120 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3121 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3122 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3123 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3124 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3125 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3126 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3127 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3128 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3129 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3130 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3131 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3132 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3133 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3134 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3135 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3136 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3137 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3138 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3139 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3140 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3141 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3142 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3143 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3144 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3145 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3146 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3147 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3148 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3149 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3150 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3151 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3152 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3153 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3154 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3155 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3156 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3157 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3158 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3159 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3160 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3161 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3162 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3163 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3164 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3165 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3166 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3167 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3168 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3169 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3170 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3171 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3172 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3173 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3174 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3175 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3176 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3177 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3178 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3179 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3180 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3181 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3182 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3183 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3184 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3185 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3186 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3187 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3188 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3189 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3190 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3191 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3192 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3193 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3194 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3195 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3196 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3197 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3198 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3199 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3200 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3201 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3202 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3203 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3204 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3205 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3206 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3207 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3208 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3209 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3210 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3211 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3212 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3213 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3214 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3215 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3216 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3217 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3218 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3219 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3220 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3221 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3222 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3223 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3224 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3225 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3226 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3227 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3228 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3229 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3230 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3231 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3232 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3233 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3234 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3235 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3236 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3237 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3238 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3239 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3240 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3241 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3242 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3243 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3244 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3245 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3246 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3247 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3248 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3249 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3250 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3251 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3252 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3253 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3254 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3255 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3256 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3257 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3258 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3259 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3260 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3261 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3262 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3263 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3264 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3265 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3266 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3267 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3268 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3269 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3270 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3271 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3272 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3273 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3274 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3275 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3276 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3277 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3278 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3279 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3280 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3281 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3282 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3283 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3284 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3285 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3286 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3287 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3288 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3289 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3290 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3291 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3292 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3293 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3294 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3295 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3296 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3297 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3298 { 0, CPENC(0,0,0,0,0), 0 },
3302 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3304 return (reg
->flags
& F_DEPRECATED
) != 0;
3308 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3309 const aarch64_sys_reg
*reg
)
3311 if (!(reg
->flags
& F_ARCHEXT
))
3314 /* PAN. Values are from aarch64_sys_regs. */
3315 if (reg
->value
== CPEN_(0,C2
,3)
3316 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3319 /* Virtualization host extensions: system registers. */
3320 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3321 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3322 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3323 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3324 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3325 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3328 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3329 if ((reg
->value
== CPEN_ (5, C0
, 0)
3330 || reg
->value
== CPEN_ (5, C0
, 1)
3331 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3332 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3333 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3334 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3335 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3336 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3337 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3338 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3339 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3340 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3341 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3342 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3343 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3344 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3345 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3348 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3349 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3350 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3351 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3352 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3353 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3354 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3355 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3358 /* ARMv8.2 features. */
3360 /* ID_AA64MMFR2_EL1. */
3361 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3362 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3366 if (reg
->value
== CPEN_ (0, C2
, 4)
3367 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3370 /* RAS extension. */
3372 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3373 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3374 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3375 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
3376 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3377 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3378 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
3379 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
3380 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
3381 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
3382 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3383 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3384 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3387 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3388 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3389 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3390 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3391 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3394 /* Statistical Profiling extension. */
3395 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3396 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3397 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3398 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3399 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3400 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3401 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3402 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3403 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3404 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3405 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3406 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3407 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3408 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3414 const aarch64_sys_reg aarch64_pstatefields
[] =
3416 { "spsel", 0x05, 0 },
3417 { "daifset", 0x1e, 0 },
3418 { "daifclr", 0x1f, 0 },
3419 { "pan", 0x04, F_ARCHEXT
},
3420 { "uao", 0x03, F_ARCHEXT
},
3421 { 0, CPENC(0,0,0,0,0), 0 },
3425 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3426 const aarch64_sys_reg
*reg
)
3428 if (!(reg
->flags
& F_ARCHEXT
))
3431 /* PAN. Values are from aarch64_pstatefields. */
3432 if (reg
->value
== 0x04
3433 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3436 /* UAO. Values are from aarch64_pstatefields. */
3437 if (reg
->value
== 0x03
3438 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3444 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3446 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3447 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3448 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3449 { 0, CPENS(0,0,0,0), 0 }
3452 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3454 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3455 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3456 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3457 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3458 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3459 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3460 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3461 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3462 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3463 { 0, CPENS(0,0,0,0), 0 }
3466 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3468 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3469 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3470 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3471 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3472 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3473 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3474 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3475 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3476 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3477 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3478 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3479 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3480 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3481 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3482 { 0, CPENS(0,0,0,0), 0 }
3485 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3487 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3488 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3489 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3490 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3491 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3492 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3493 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3494 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3495 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3496 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3497 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3498 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3499 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3500 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3501 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3502 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3503 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3504 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3505 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3506 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3507 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3508 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3509 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3510 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3511 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3512 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3513 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3514 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3515 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3516 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3517 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3518 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3519 { 0, CPENS(0,0,0,0), 0 }
3523 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3525 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3529 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3530 const aarch64_sys_ins_reg
*reg
)
3532 if (!(reg
->flags
& F_ARCHEXT
))
3535 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3536 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3537 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3540 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3541 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3542 || reg
->value
== CPENS (0, C7
, C9
, 1))
3543 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3566 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3567 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3570 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
3571 const aarch64_insn insn
)
3573 int t
= BITS (insn
, 4, 0);
3574 int n
= BITS (insn
, 9, 5);
3575 int t2
= BITS (insn
, 14, 10);
3579 /* Write back enabled. */
3580 if ((t
== n
|| t2
== n
) && n
!= 31)
3594 /* Include the opcode description table as well as the operand description
3596 #define VERIFIER(x) verify_##x
3597 #include "aarch64-tbl.h"