1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
268 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
269 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
270 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
271 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
272 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
273 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
274 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
275 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
276 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
277 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
278 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
279 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
280 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
281 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
282 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
283 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
284 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
285 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
286 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
287 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
288 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
289 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
290 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
291 { 5, 1 }, /* SVE_i1: single-bit immediate. */
292 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
293 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
294 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
295 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
296 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
297 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
298 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
299 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
300 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
301 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
302 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
303 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
304 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
305 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
306 { 16, 4 }, /* SVE_tsz: triangular size select. */
307 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
308 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
309 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
310 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
311 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
314 enum aarch64_operand_class
315 aarch64_get_operand_class (enum aarch64_opnd type
)
317 return aarch64_operands
[type
].op_class
;
321 aarch64_get_operand_name (enum aarch64_opnd type
)
323 return aarch64_operands
[type
].name
;
326 /* Get operand description string.
327 This is usually for the diagnosis purpose. */
329 aarch64_get_operand_desc (enum aarch64_opnd type
)
331 return aarch64_operands
[type
].desc
;
334 /* Table of all conditional affixes. */
335 const aarch64_cond aarch64_conds
[16] =
337 {{"eq", "none"}, 0x0},
338 {{"ne", "any"}, 0x1},
339 {{"cs", "hs", "nlast"}, 0x2},
340 {{"cc", "lo", "ul", "last"}, 0x3},
341 {{"mi", "first"}, 0x4},
342 {{"pl", "nfrst"}, 0x5},
345 {{"hi", "pmore"}, 0x8},
346 {{"ls", "plast"}, 0x9},
347 {{"ge", "tcont"}, 0xa},
348 {{"lt", "tstop"}, 0xb},
356 get_cond_from_value (aarch64_insn value
)
359 return &aarch64_conds
[(unsigned int) value
];
363 get_inverted_cond (const aarch64_cond
*cond
)
365 return &aarch64_conds
[cond
->value
^ 0x1];
368 /* Table describing the operand extension/shifting operators; indexed by
369 enum aarch64_modifier_kind.
371 The value column provides the most common values for encoding modifiers,
372 which enables table-driven encoding/decoding for the modifiers. */
373 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
394 enum aarch64_modifier_kind
395 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
397 return desc
- aarch64_operand_modifiers
;
401 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
403 return aarch64_operand_modifiers
[kind
].value
;
406 enum aarch64_modifier_kind
407 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
408 bfd_boolean extend_p
)
410 if (extend_p
== TRUE
)
411 return AARCH64_MOD_UXTB
+ value
;
413 return AARCH64_MOD_LSL
- value
;
417 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
419 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
423 static inline bfd_boolean
424 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
426 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
430 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
450 /* Table describing the operands supported by the aliases of the HINT
453 The name column is the operand that is accepted for the alias. The value
454 column is the hint number of the alias. The list of operands is terminated
455 by NULL in the name column. */
457 const struct aarch64_name_value_pair aarch64_hint_options
[] =
459 { "csync", 0x11 }, /* PSB CSYNC. */
463 /* op -> op: load = 0 instruction = 1 store = 2
465 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
466 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
467 const struct aarch64_name_value_pair aarch64_prfops
[32] =
469 { "pldl1keep", B(0, 1, 0) },
470 { "pldl1strm", B(0, 1, 1) },
471 { "pldl2keep", B(0, 2, 0) },
472 { "pldl2strm", B(0, 2, 1) },
473 { "pldl3keep", B(0, 3, 0) },
474 { "pldl3strm", B(0, 3, 1) },
477 { "plil1keep", B(1, 1, 0) },
478 { "plil1strm", B(1, 1, 1) },
479 { "plil2keep", B(1, 2, 0) },
480 { "plil2strm", B(1, 2, 1) },
481 { "plil3keep", B(1, 3, 0) },
482 { "plil3strm", B(1, 3, 1) },
485 { "pstl1keep", B(2, 1, 0) },
486 { "pstl1strm", B(2, 1, 1) },
487 { "pstl2keep", B(2, 2, 0) },
488 { "pstl2strm", B(2, 2, 1) },
489 { "pstl3keep", B(2, 3, 0) },
490 { "pstl3strm", B(2, 3, 1) },
504 /* Utilities on value constraint. */
507 value_in_range_p (int64_t value
, int low
, int high
)
509 return (value
>= low
&& value
<= high
) ? 1 : 0;
512 /* Return true if VALUE is a multiple of ALIGN. */
514 value_aligned_p (int64_t value
, int align
)
516 return (value
% align
) == 0;
519 /* A signed value fits in a field. */
521 value_fit_signed_field_p (int64_t value
, unsigned width
)
524 if (width
< sizeof (value
) * 8)
526 int64_t lim
= (int64_t)1 << (width
- 1);
527 if (value
>= -lim
&& value
< lim
)
533 /* An unsigned value fits in a field. */
535 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
538 if (width
< sizeof (value
) * 8)
540 int64_t lim
= (int64_t)1 << width
;
541 if (value
>= 0 && value
< lim
)
547 /* Return 1 if OPERAND is SP or WSP. */
549 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
551 return ((aarch64_get_operand_class (operand
->type
)
552 == AARCH64_OPND_CLASS_INT_REG
)
553 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
554 && operand
->reg
.regno
== 31);
557 /* Return 1 if OPERAND is XZR or WZP. */
559 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
561 return ((aarch64_get_operand_class (operand
->type
)
562 == AARCH64_OPND_CLASS_INT_REG
)
563 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
564 && operand
->reg
.regno
== 31);
567 /* Return true if the operand *OPERAND that has the operand code
568 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
569 qualified by the qualifier TARGET. */
572 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
573 aarch64_opnd_qualifier_t target
)
575 switch (operand
->qualifier
)
577 case AARCH64_OPND_QLF_W
:
578 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
581 case AARCH64_OPND_QLF_X
:
582 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
585 case AARCH64_OPND_QLF_WSP
:
586 if (target
== AARCH64_OPND_QLF_W
587 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
590 case AARCH64_OPND_QLF_SP
:
591 if (target
== AARCH64_OPND_QLF_X
592 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
602 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
603 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
605 Return NIL if more than one expected qualifiers are found. */
607 aarch64_opnd_qualifier_t
608 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
610 const aarch64_opnd_qualifier_t known_qlf
,
617 When the known qualifier is NIL, we have to assume that there is only
618 one qualifier sequence in the *QSEQ_LIST and return the corresponding
619 qualifier directly. One scenario is that for instruction
620 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
621 which has only one possible valid qualifier sequence
623 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
624 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
626 Because the qualifier NIL has dual roles in the qualifier sequence:
627 it can mean no qualifier for the operand, or the qualifer sequence is
628 not in use (when all qualifiers in the sequence are NILs), we have to
629 handle this special case here. */
630 if (known_qlf
== AARCH64_OPND_NIL
)
632 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
633 return qseq_list
[0][idx
];
636 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
638 if (qseq_list
[i
][known_idx
] == known_qlf
)
641 /* More than one sequences are found to have KNOWN_QLF at
643 return AARCH64_OPND_NIL
;
648 return qseq_list
[saved_i
][idx
];
651 enum operand_qualifier_kind
659 /* Operand qualifier description. */
660 struct operand_qualifier_data
662 /* The usage of the three data fields depends on the qualifier kind. */
669 enum operand_qualifier_kind kind
;
672 /* Indexed by the operand qualifier enumerators. */
673 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
675 {0, 0, 0, "NIL", OQK_NIL
},
677 /* Operand variant qualifiers.
679 element size, number of elements and common value for encoding. */
681 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
682 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
683 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
684 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
686 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
687 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
688 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
689 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
690 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
692 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
693 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
694 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
695 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
696 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
697 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
698 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
699 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
700 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
701 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
703 {0, 0, 0, "z", OQK_OPD_VARIANT
},
704 {0, 0, 0, "m", OQK_OPD_VARIANT
},
706 /* Qualifiers constraining the value range.
708 Lower bound, higher bound, unused. */
710 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
711 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
712 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
713 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
714 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
715 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
717 /* Qualifiers for miscellaneous purpose.
719 unused, unused and unused. */
724 {0, 0, 0, "retrieving", 0},
727 static inline bfd_boolean
728 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
730 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
734 static inline bfd_boolean
735 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
737 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
742 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
744 return aarch64_opnd_qualifiers
[qualifier
].desc
;
747 /* Given an operand qualifier, return the expected data element size
748 of a qualified operand. */
750 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
752 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
753 return aarch64_opnd_qualifiers
[qualifier
].data0
;
757 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
759 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
760 return aarch64_opnd_qualifiers
[qualifier
].data1
;
764 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
766 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
767 return aarch64_opnd_qualifiers
[qualifier
].data2
;
771 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
773 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
774 return aarch64_opnd_qualifiers
[qualifier
].data0
;
778 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
780 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
781 return aarch64_opnd_qualifiers
[qualifier
].data1
;
786 aarch64_verbose (const char *str
, ...)
797 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
801 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
802 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
807 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
808 const aarch64_opnd_qualifier_t
*qualifier
)
811 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
813 aarch64_verbose ("dump_match_qualifiers:");
814 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
815 curr
[i
] = opnd
[i
].qualifier
;
816 dump_qualifier_sequence (curr
);
817 aarch64_verbose ("against");
818 dump_qualifier_sequence (qualifier
);
820 #endif /* DEBUG_AARCH64 */
822 /* TODO improve this, we can have an extra field at the runtime to
823 store the number of operands rather than calculating it every time. */
826 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
829 const enum aarch64_opnd
*opnds
= opcode
->operands
;
830 while (opnds
[i
++] != AARCH64_OPND_NIL
)
833 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
837 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
838 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
840 N.B. on the entry, it is very likely that only some operands in *INST
841 have had their qualifiers been established.
843 If STOP_AT is not -1, the function will only try to match
844 the qualifier sequence for operands before and including the operand
845 of index STOP_AT; and on success *RET will only be filled with the first
846 (STOP_AT+1) qualifiers.
848 A couple examples of the matching algorithm:
856 Apart from serving the main encoding routine, this can also be called
857 during or after the operand decoding. */
860 aarch64_find_best_match (const aarch64_inst
*inst
,
861 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
862 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
866 const aarch64_opnd_qualifier_t
*qualifiers
;
868 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
871 DEBUG_TRACE ("SUCCEED: no operand");
875 if (stop_at
< 0 || stop_at
>= num_opnds
)
876 stop_at
= num_opnds
- 1;
878 /* For each pattern. */
879 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
882 qualifiers
= *qualifiers_list
;
884 /* Start as positive. */
887 DEBUG_TRACE ("%d", i
);
890 dump_match_qualifiers (inst
->operands
, qualifiers
);
893 /* Most opcodes has much fewer patterns in the list.
894 First NIL qualifier indicates the end in the list. */
895 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
897 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
903 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
905 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
907 /* Either the operand does not have qualifier, or the qualifier
908 for the operand needs to be deduced from the qualifier
910 In the latter case, any constraint checking related with
911 the obtained qualifier should be done later in
912 operand_general_constraint_met_p. */
915 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
917 /* Unless the target qualifier can also qualify the operand
918 (which has already had a non-nil qualifier), non-equal
919 qualifiers are generally un-matched. */
920 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
929 continue; /* Equal qualifiers are certainly matched. */
932 /* Qualifiers established. */
939 /* Fill the result in *RET. */
941 qualifiers
= *qualifiers_list
;
943 DEBUG_TRACE ("complete qualifiers using list %d", i
);
946 dump_qualifier_sequence (qualifiers
);
949 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
950 ret
[j
] = *qualifiers
;
951 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
952 ret
[j
] = AARCH64_OPND_QLF_NIL
;
954 DEBUG_TRACE ("SUCCESS");
958 DEBUG_TRACE ("FAIL");
962 /* Operand qualifier matching and resolving.
964 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
965 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
967 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
971 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
974 aarch64_opnd_qualifier_seq_t qualifiers
;
976 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
979 DEBUG_TRACE ("matching FAIL");
983 if (inst
->opcode
->flags
& F_STRICT
)
985 /* Require an exact qualifier match, even for NIL qualifiers. */
986 nops
= aarch64_num_of_operands (inst
->opcode
);
987 for (i
= 0; i
< nops
; ++i
)
988 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
992 /* Update the qualifiers. */
993 if (update_p
== TRUE
)
994 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
996 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
998 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
999 "update %s with %s for operand %d",
1000 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1001 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1002 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1005 DEBUG_TRACE ("matching SUCCESS");
1009 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1012 IS32 indicates whether value is a 32-bit immediate or not.
1013 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1014 amount will be returned in *SHIFT_AMOUNT. */
1017 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1021 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1025 /* Allow all zeros or all ones in top 32-bits, so that
1026 32-bit constant expressions like ~0x80000000 are
1028 uint64_t ext
= value
;
1029 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1030 /* Immediate out of range. */
1032 value
&= (int64_t) 0xffffffff;
1035 /* first, try movz then movn */
1037 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1039 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1041 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1043 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1048 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1052 if (shift_amount
!= NULL
)
1053 *shift_amount
= amount
;
1055 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1060 /* Build the accepted values for immediate logical SIMD instructions.
1062 The standard encodings of the immediate value are:
1063 N imms immr SIMD size R S
1064 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1065 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1066 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1067 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1068 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1069 0 11110s 00000r 2 UInt(r) UInt(s)
1070 where all-ones value of S is reserved.
1072 Let's call E the SIMD size.
1074 The immediate value is: S+1 bits '1' rotated to the right by R.
1076 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1077 (remember S != E - 1). */
1079 #define TOTAL_IMM_NB 5334
1084 aarch64_insn encoding
;
1085 } simd_imm_encoding
;
1087 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1090 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1092 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1093 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1095 if (imm1
->imm
< imm2
->imm
)
1097 if (imm1
->imm
> imm2
->imm
)
1102 /* immediate bitfield standard encoding
1103 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1104 1 ssssss rrrrrr 64 rrrrrr ssssss
1105 0 0sssss 0rrrrr 32 rrrrr sssss
1106 0 10ssss 00rrrr 16 rrrr ssss
1107 0 110sss 000rrr 8 rrr sss
1108 0 1110ss 0000rr 4 rr ss
1109 0 11110s 00000r 2 r s */
1111 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1113 return (is64
<< 12) | (r
<< 6) | s
;
1117 build_immediate_table (void)
1119 uint32_t log_e
, e
, s
, r
, s_mask
;
1125 for (log_e
= 1; log_e
<= 6; log_e
++)
1127 /* Get element size. */
1132 mask
= 0xffffffffffffffffull
;
1138 mask
= (1ull << e
) - 1;
1140 1 ((1 << 4) - 1) << 2 = 111100
1141 2 ((1 << 3) - 1) << 3 = 111000
1142 3 ((1 << 2) - 1) << 4 = 110000
1143 4 ((1 << 1) - 1) << 5 = 100000
1144 5 ((1 << 0) - 1) << 6 = 000000 */
1145 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1147 for (s
= 0; s
< e
- 1; s
++)
1148 for (r
= 0; r
< e
; r
++)
1150 /* s+1 consecutive bits to 1 (s < 63) */
1151 imm
= (1ull << (s
+ 1)) - 1;
1152 /* rotate right by r */
1154 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1155 /* replicate the constant depending on SIMD size */
1158 case 1: imm
= (imm
<< 2) | imm
;
1159 case 2: imm
= (imm
<< 4) | imm
;
1160 case 3: imm
= (imm
<< 8) | imm
;
1161 case 4: imm
= (imm
<< 16) | imm
;
1162 case 5: imm
= (imm
<< 32) | imm
;
1166 simd_immediates
[nb_imms
].imm
= imm
;
1167 simd_immediates
[nb_imms
].encoding
=
1168 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1172 assert (nb_imms
== TOTAL_IMM_NB
);
1173 qsort(simd_immediates
, nb_imms
,
1174 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1177 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1178 be accepted by logical (immediate) instructions
1179 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1181 ESIZE is the number of bytes in the decoded immediate value.
1182 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1183 VALUE will be returned in *ENCODING. */
1186 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1188 simd_imm_encoding imm_enc
;
1189 const simd_imm_encoding
*imm_encoding
;
1190 static bfd_boolean initialized
= FALSE
;
1194 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1197 if (initialized
== FALSE
)
1199 build_immediate_table ();
1203 /* Allow all zeros or all ones in top bits, so that
1204 constant expressions like ~1 are permitted. */
1205 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1206 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1209 /* Replicate to a full 64-bit value. */
1211 for (i
= esize
* 8; i
< 64; i
*= 2)
1212 value
|= (value
<< i
);
1214 imm_enc
.imm
= value
;
1215 imm_encoding
= (const simd_imm_encoding
*)
1216 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1217 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1218 if (imm_encoding
== NULL
)
1220 DEBUG_TRACE ("exit with FALSE");
1223 if (encoding
!= NULL
)
1224 *encoding
= imm_encoding
->encoding
;
1225 DEBUG_TRACE ("exit with TRUE");
1229 /* If 64-bit immediate IMM is in the format of
1230 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1231 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1232 of value "abcdefgh". Otherwise return -1. */
1234 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1240 for (i
= 0; i
< 8; i
++)
1242 byte
= (imm
>> (8 * i
)) & 0xff;
1245 else if (byte
!= 0x00)
1251 /* Utility inline functions for operand_general_constraint_met_p. */
1254 set_error (aarch64_operand_error
*mismatch_detail
,
1255 enum aarch64_operand_error_kind kind
, int idx
,
1258 if (mismatch_detail
== NULL
)
1260 mismatch_detail
->kind
= kind
;
1261 mismatch_detail
->index
= idx
;
1262 mismatch_detail
->error
= error
;
1266 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1269 if (mismatch_detail
== NULL
)
1271 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1275 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1276 int idx
, int lower_bound
, int upper_bound
,
1279 if (mismatch_detail
== NULL
)
1281 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1282 mismatch_detail
->data
[0] = lower_bound
;
1283 mismatch_detail
->data
[1] = upper_bound
;
1287 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1288 int idx
, int lower_bound
, int upper_bound
)
1290 if (mismatch_detail
== NULL
)
1292 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1293 _("immediate value"));
1297 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1298 int idx
, int lower_bound
, int upper_bound
)
1300 if (mismatch_detail
== NULL
)
1302 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1303 _("immediate offset"));
1307 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1308 int idx
, int lower_bound
, int upper_bound
)
1310 if (mismatch_detail
== NULL
)
1312 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1313 _("register number"));
1317 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1318 int idx
, int lower_bound
, int upper_bound
)
1320 if (mismatch_detail
== NULL
)
1322 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1323 _("register element index"));
1327 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1328 int idx
, int lower_bound
, int upper_bound
)
1330 if (mismatch_detail
== NULL
)
1332 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1336 /* Report that the MUL modifier in operand IDX should be in the range
1337 [LOWER_BOUND, UPPER_BOUND]. */
1339 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1340 int idx
, int lower_bound
, int upper_bound
)
1342 if (mismatch_detail
== NULL
)
1344 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1349 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1352 if (mismatch_detail
== NULL
)
1354 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1355 mismatch_detail
->data
[0] = alignment
;
1359 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1362 if (mismatch_detail
== NULL
)
1364 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1365 mismatch_detail
->data
[0] = expected_num
;
1369 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1372 if (mismatch_detail
== NULL
)
1374 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1377 /* General constraint checking based on operand code.
1379 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1380 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1382 This function has to be called after the qualifiers for all operands
1385 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1386 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1387 of error message during the disassembling where error message is not
1388 wanted. We avoid the dynamic construction of strings of error messages
1389 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1390 use a combination of error code, static string and some integer data to
1391 represent an error. */
1394 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1395 enum aarch64_opnd type
,
1396 const aarch64_opcode
*opcode
,
1397 aarch64_operand_error
*mismatch_detail
)
1399 unsigned num
, modifiers
, shift
;
1401 int64_t imm
, min_value
, max_value
;
1402 uint64_t uvalue
, mask
;
1403 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1404 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1406 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1408 switch (aarch64_operands
[type
].op_class
)
1410 case AARCH64_OPND_CLASS_INT_REG
:
1411 /* Check pair reg constraints for cas* instructions. */
1412 if (type
== AARCH64_OPND_PAIRREG
)
1414 assert (idx
== 1 || idx
== 3);
1415 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1417 set_syntax_error (mismatch_detail
, idx
- 1,
1418 _("reg pair must start from even reg"));
1421 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1423 set_syntax_error (mismatch_detail
, idx
,
1424 _("reg pair must be contiguous"));
1430 /* <Xt> may be optional in some IC and TLBI instructions. */
1431 if (type
== AARCH64_OPND_Rt_SYS
)
1433 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1434 == AARCH64_OPND_CLASS_SYSTEM
));
1435 if (opnds
[1].present
1436 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1438 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1441 if (!opnds
[1].present
1442 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1444 set_other_error (mismatch_detail
, idx
, _("missing register"));
1450 case AARCH64_OPND_QLF_WSP
:
1451 case AARCH64_OPND_QLF_SP
:
1452 if (!aarch64_stack_pointer_p (opnd
))
1454 set_other_error (mismatch_detail
, idx
,
1455 _("stack pointer register expected"));
1464 case AARCH64_OPND_CLASS_SVE_REG
:
1467 case AARCH64_OPND_SVE_Zn_INDEX
:
1468 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1469 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1471 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1477 case AARCH64_OPND_SVE_ZnxN
:
1478 case AARCH64_OPND_SVE_ZtxN
:
1479 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1481 set_other_error (mismatch_detail
, idx
,
1482 _("invalid register list"));
1492 case AARCH64_OPND_CLASS_PRED_REG
:
1493 if (opnd
->reg
.regno
>= 8
1494 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1496 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1501 case AARCH64_OPND_CLASS_COND
:
1502 if (type
== AARCH64_OPND_COND1
1503 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1505 /* Not allow AL or NV. */
1506 set_syntax_error (mismatch_detail
, idx
, NULL
);
1510 case AARCH64_OPND_CLASS_ADDRESS
:
1511 /* Check writeback. */
1512 switch (opcode
->iclass
)
1516 case ldstnapair_offs
:
1519 if (opnd
->addr
.writeback
== 1)
1521 set_syntax_error (mismatch_detail
, idx
,
1522 _("unexpected address writeback"));
1527 case ldstpair_indexed
:
1530 if (opnd
->addr
.writeback
== 0)
1532 set_syntax_error (mismatch_detail
, idx
,
1533 _("address writeback expected"));
1538 assert (opnd
->addr
.writeback
== 0);
1543 case AARCH64_OPND_ADDR_SIMM7
:
1544 /* Scaled signed 7 bits immediate offset. */
1545 /* Get the size of the data element that is accessed, which may be
1546 different from that of the source register size,
1547 e.g. in strb/ldrb. */
1548 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1549 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1551 set_offset_out_of_range_error (mismatch_detail
, idx
,
1552 -64 * size
, 63 * size
);
1555 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1557 set_unaligned_error (mismatch_detail
, idx
, size
);
1561 case AARCH64_OPND_ADDR_SIMM9
:
1562 /* Unscaled signed 9 bits immediate offset. */
1563 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1565 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1570 case AARCH64_OPND_ADDR_SIMM9_2
:
1571 /* Unscaled signed 9 bits immediate offset, which has to be negative
1573 size
= aarch64_get_qualifier_esize (qualifier
);
1574 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1575 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1576 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1578 set_other_error (mismatch_detail
, idx
,
1579 _("negative or unaligned offset expected"));
1582 case AARCH64_OPND_SIMD_ADDR_POST
:
1583 /* AdvSIMD load/store multiple structures, post-index. */
1585 if (opnd
->addr
.offset
.is_reg
)
1587 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1591 set_other_error (mismatch_detail
, idx
,
1592 _("invalid register offset"));
1598 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1599 unsigned num_bytes
; /* total number of bytes transferred. */
1600 /* The opcode dependent area stores the number of elements in
1601 each structure to be loaded/stored. */
1602 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1603 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1604 /* Special handling of loading single structure to all lane. */
1605 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1606 * aarch64_get_qualifier_esize (prev
->qualifier
);
1608 num_bytes
= prev
->reglist
.num_regs
1609 * aarch64_get_qualifier_esize (prev
->qualifier
)
1610 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1611 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1613 set_other_error (mismatch_detail
, idx
,
1614 _("invalid post-increment amount"));
1620 case AARCH64_OPND_ADDR_REGOFF
:
1621 /* Get the size of the data element that is accessed, which may be
1622 different from that of the source register size,
1623 e.g. in strb/ldrb. */
1624 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1625 /* It is either no shift or shift by the binary logarithm of SIZE. */
1626 if (opnd
->shifter
.amount
!= 0
1627 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1629 set_other_error (mismatch_detail
, idx
,
1630 _("invalid shift amount"));
1633 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1635 switch (opnd
->shifter
.kind
)
1637 case AARCH64_MOD_UXTW
:
1638 case AARCH64_MOD_LSL
:
1639 case AARCH64_MOD_SXTW
:
1640 case AARCH64_MOD_SXTX
: break;
1642 set_other_error (mismatch_detail
, idx
,
1643 _("invalid extend/shift operator"));
1648 case AARCH64_OPND_ADDR_UIMM12
:
1649 imm
= opnd
->addr
.offset
.imm
;
1650 /* Get the size of the data element that is accessed, which may be
1651 different from that of the source register size,
1652 e.g. in strb/ldrb. */
1653 size
= aarch64_get_qualifier_esize (qualifier
);
1654 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1656 set_offset_out_of_range_error (mismatch_detail
, idx
,
1660 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1662 set_unaligned_error (mismatch_detail
, idx
, size
);
1667 case AARCH64_OPND_ADDR_PCREL14
:
1668 case AARCH64_OPND_ADDR_PCREL19
:
1669 case AARCH64_OPND_ADDR_PCREL21
:
1670 case AARCH64_OPND_ADDR_PCREL26
:
1671 imm
= opnd
->imm
.value
;
1672 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1674 /* The offset value in a PC-relative branch instruction is alway
1675 4-byte aligned and is encoded without the lowest 2 bits. */
1676 if (!value_aligned_p (imm
, 4))
1678 set_unaligned_error (mismatch_detail
, idx
, 4);
1681 /* Right shift by 2 so that we can carry out the following check
1685 size
= get_operand_fields_width (get_operand_from_code (type
));
1686 if (!value_fit_signed_field_p (imm
, size
))
1688 set_other_error (mismatch_detail
, idx
,
1689 _("immediate out of range"));
1694 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1695 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1696 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1697 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1701 assert (!opnd
->addr
.offset
.is_reg
);
1702 assert (opnd
->addr
.preind
);
1703 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1706 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1707 || (opnd
->shifter
.operator_present
1708 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1710 set_other_error (mismatch_detail
, idx
,
1711 _("invalid addressing mode"));
1714 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1716 set_offset_out_of_range_error (mismatch_detail
, idx
,
1717 min_value
, max_value
);
1720 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1722 set_unaligned_error (mismatch_detail
, idx
, num
);
1727 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1730 goto sve_imm_offset_vl
;
1732 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1735 goto sve_imm_offset_vl
;
1737 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1738 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1739 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1740 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1744 assert (!opnd
->addr
.offset
.is_reg
);
1745 assert (opnd
->addr
.preind
);
1746 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1749 if (opnd
->shifter
.operator_present
1750 || opnd
->shifter
.amount_present
)
1752 set_other_error (mismatch_detail
, idx
,
1753 _("invalid addressing mode"));
1756 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1758 set_offset_out_of_range_error (mismatch_detail
, idx
,
1759 min_value
, max_value
);
1762 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1764 set_unaligned_error (mismatch_detail
, idx
, num
);
1769 case AARCH64_OPND_SVE_ADDR_RR
:
1770 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1771 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1772 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1773 case AARCH64_OPND_SVE_ADDR_RX
:
1774 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1775 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1776 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1777 case AARCH64_OPND_SVE_ADDR_RZ
:
1778 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1779 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1780 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1781 modifiers
= 1 << AARCH64_MOD_LSL
;
1783 assert (opnd
->addr
.offset
.is_reg
);
1784 assert (opnd
->addr
.preind
);
1785 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1786 && opnd
->addr
.offset
.regno
== 31)
1788 set_other_error (mismatch_detail
, idx
,
1789 _("index register xzr is not allowed"));
1792 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1793 || (opnd
->shifter
.amount
1794 != get_operand_specific_data (&aarch64_operands
[type
])))
1796 set_other_error (mismatch_detail
, idx
,
1797 _("invalid addressing mode"));
1802 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1803 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1804 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1805 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1806 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1807 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1808 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1809 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1810 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1811 goto sve_rr_operand
;
1813 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1814 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1815 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1816 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1819 goto sve_imm_offset
;
1821 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1822 modifiers
= 1 << AARCH64_MOD_LSL
;
1824 assert (opnd
->addr
.offset
.is_reg
);
1825 assert (opnd
->addr
.preind
);
1826 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1827 || opnd
->shifter
.amount
< 0
1828 || opnd
->shifter
.amount
> 3)
1830 set_other_error (mismatch_detail
, idx
,
1831 _("invalid addressing mode"));
1836 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1837 modifiers
= (1 << AARCH64_MOD_SXTW
);
1838 goto sve_zz_operand
;
1840 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1841 modifiers
= 1 << AARCH64_MOD_UXTW
;
1842 goto sve_zz_operand
;
1849 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1850 if (type
== AARCH64_OPND_LEt
)
1852 /* Get the upper bound for the element index. */
1853 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1854 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1856 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1860 /* The opcode dependent area stores the number of elements in
1861 each structure to be loaded/stored. */
1862 num
= get_opcode_dependent_value (opcode
);
1865 case AARCH64_OPND_LVt
:
1866 assert (num
>= 1 && num
<= 4);
1867 /* Unless LD1/ST1, the number of registers should be equal to that
1868 of the structure elements. */
1869 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1871 set_reg_list_error (mismatch_detail
, idx
, num
);
1875 case AARCH64_OPND_LVt_AL
:
1876 case AARCH64_OPND_LEt
:
1877 assert (num
>= 1 && num
<= 4);
1878 /* The number of registers should be equal to that of the structure
1880 if (opnd
->reglist
.num_regs
!= num
)
1882 set_reg_list_error (mismatch_detail
, idx
, num
);
1891 case AARCH64_OPND_CLASS_IMMEDIATE
:
1892 /* Constraint check on immediate operand. */
1893 imm
= opnd
->imm
.value
;
1894 /* E.g. imm_0_31 constrains value to be 0..31. */
1895 if (qualifier_value_in_range_constraint_p (qualifier
)
1896 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1897 get_upper_bound (qualifier
)))
1899 set_imm_out_of_range_error (mismatch_detail
, idx
,
1900 get_lower_bound (qualifier
),
1901 get_upper_bound (qualifier
));
1907 case AARCH64_OPND_AIMM
:
1908 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1910 set_other_error (mismatch_detail
, idx
,
1911 _("invalid shift operator"));
1914 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1916 set_other_error (mismatch_detail
, idx
,
1917 _("shift amount must be 0 or 12"));
1920 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1922 set_other_error (mismatch_detail
, idx
,
1923 _("immediate out of range"));
1928 case AARCH64_OPND_HALF
:
1929 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1930 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1932 set_other_error (mismatch_detail
, idx
,
1933 _("invalid shift operator"));
1936 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1937 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1939 set_other_error (mismatch_detail
, idx
,
1940 _("shift amount must be a multiple of 16"));
1943 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1945 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1949 if (opnd
->imm
.value
< 0)
1951 set_other_error (mismatch_detail
, idx
,
1952 _("negative immediate value not allowed"));
1955 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1957 set_other_error (mismatch_detail
, idx
,
1958 _("immediate out of range"));
1963 case AARCH64_OPND_IMM_MOV
:
1965 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1966 imm
= opnd
->imm
.value
;
1970 case OP_MOV_IMM_WIDEN
:
1972 /* Fall through... */
1973 case OP_MOV_IMM_WIDE
:
1974 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
1976 set_other_error (mismatch_detail
, idx
,
1977 _("immediate out of range"));
1981 case OP_MOV_IMM_LOG
:
1982 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
1984 set_other_error (mismatch_detail
, idx
,
1985 _("immediate out of range"));
1996 case AARCH64_OPND_NZCV
:
1997 case AARCH64_OPND_CCMP_IMM
:
1998 case AARCH64_OPND_EXCEPTION
:
1999 case AARCH64_OPND_UIMM4
:
2000 case AARCH64_OPND_UIMM7
:
2001 case AARCH64_OPND_UIMM3_OP1
:
2002 case AARCH64_OPND_UIMM3_OP2
:
2003 case AARCH64_OPND_SVE_UIMM3
:
2004 case AARCH64_OPND_SVE_UIMM7
:
2005 case AARCH64_OPND_SVE_UIMM8
:
2006 case AARCH64_OPND_SVE_UIMM8_53
:
2007 size
= get_operand_fields_width (get_operand_from_code (type
));
2009 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2011 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2017 case AARCH64_OPND_SIMM5
:
2018 case AARCH64_OPND_SVE_SIMM5
:
2019 case AARCH64_OPND_SVE_SIMM5B
:
2020 case AARCH64_OPND_SVE_SIMM6
:
2021 case AARCH64_OPND_SVE_SIMM8
:
2022 size
= get_operand_fields_width (get_operand_from_code (type
));
2024 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2026 set_imm_out_of_range_error (mismatch_detail
, idx
,
2028 (1 << (size
- 1)) - 1);
2033 case AARCH64_OPND_WIDTH
:
2034 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2035 && opnds
[0].type
== AARCH64_OPND_Rd
);
2036 size
= get_upper_bound (qualifier
);
2037 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2038 /* lsb+width <= reg.size */
2040 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2041 size
- opnds
[idx
-1].imm
.value
);
2046 case AARCH64_OPND_LIMM
:
2047 case AARCH64_OPND_SVE_LIMM
:
2049 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2050 uint64_t uimm
= opnd
->imm
.value
;
2051 if (opcode
->op
== OP_BIC
)
2053 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
2055 set_other_error (mismatch_detail
, idx
,
2056 _("immediate out of range"));
2062 case AARCH64_OPND_IMM0
:
2063 case AARCH64_OPND_FPIMM0
:
2064 if (opnd
->imm
.value
!= 0)
2066 set_other_error (mismatch_detail
, idx
,
2067 _("immediate zero expected"));
2072 case AARCH64_OPND_SHLL_IMM
:
2074 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2075 if (opnd
->imm
.value
!= size
)
2077 set_other_error (mismatch_detail
, idx
,
2078 _("invalid shift amount"));
2083 case AARCH64_OPND_IMM_VLSL
:
2084 size
= aarch64_get_qualifier_esize (qualifier
);
2085 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2087 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2093 case AARCH64_OPND_IMM_VLSR
:
2094 size
= aarch64_get_qualifier_esize (qualifier
);
2095 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2097 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2102 case AARCH64_OPND_SIMD_IMM
:
2103 case AARCH64_OPND_SIMD_IMM_SFT
:
2104 /* Qualifier check. */
2107 case AARCH64_OPND_QLF_LSL
:
2108 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2110 set_other_error (mismatch_detail
, idx
,
2111 _("invalid shift operator"));
2115 case AARCH64_OPND_QLF_MSL
:
2116 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2118 set_other_error (mismatch_detail
, idx
,
2119 _("invalid shift operator"));
2123 case AARCH64_OPND_QLF_NIL
:
2124 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2126 set_other_error (mismatch_detail
, idx
,
2127 _("shift is not permitted"));
2135 /* Is the immediate valid? */
2137 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2139 /* uimm8 or simm8 */
2140 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2142 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2146 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2149 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2150 ffffffffgggggggghhhhhhhh'. */
2151 set_other_error (mismatch_detail
, idx
,
2152 _("invalid value for immediate"));
2155 /* Is the shift amount valid? */
2156 switch (opnd
->shifter
.kind
)
2158 case AARCH64_MOD_LSL
:
2159 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2160 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2162 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2166 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2168 set_unaligned_error (mismatch_detail
, idx
, 8);
2172 case AARCH64_MOD_MSL
:
2173 /* Only 8 and 16 are valid shift amount. */
2174 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2176 set_other_error (mismatch_detail
, idx
,
2177 _("shift amount must be 0 or 16"));
2182 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2184 set_other_error (mismatch_detail
, idx
,
2185 _("invalid shift operator"));
2192 case AARCH64_OPND_FPIMM
:
2193 case AARCH64_OPND_SIMD_FPIMM
:
2194 case AARCH64_OPND_SVE_FPIMM8
:
2195 if (opnd
->imm
.is_fp
== 0)
2197 set_other_error (mismatch_detail
, idx
,
2198 _("floating-point immediate expected"));
2201 /* The value is expected to be an 8-bit floating-point constant with
2202 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2203 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2205 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2207 set_other_error (mismatch_detail
, idx
,
2208 _("immediate out of range"));
2211 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2213 set_other_error (mismatch_detail
, idx
,
2214 _("invalid shift operator"));
2219 case AARCH64_OPND_SVE_AIMM
:
2222 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2223 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2224 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2225 uvalue
= opnd
->imm
.value
;
2226 shift
= opnd
->shifter
.amount
;
2231 set_other_error (mismatch_detail
, idx
,
2232 _("no shift amount allowed for"
2233 " 8-bit constants"));
2239 if (shift
!= 0 && shift
!= 8)
2241 set_other_error (mismatch_detail
, idx
,
2242 _("shift amount must be 0 or 8"));
2245 if (shift
== 0 && (uvalue
& 0xff) == 0)
2248 uvalue
= (int64_t) uvalue
/ 256;
2252 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2254 set_other_error (mismatch_detail
, idx
,
2255 _("immediate too big for element size"));
2258 uvalue
= (uvalue
- min_value
) & mask
;
2261 set_other_error (mismatch_detail
, idx
,
2262 _("invalid arithmetic immediate"));
2267 case AARCH64_OPND_SVE_ASIMM
:
2271 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2272 assert (opnd
->imm
.is_fp
);
2273 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2275 set_other_error (mismatch_detail
, idx
,
2276 _("floating-point value must be 0.5 or 1.0"));
2281 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2282 assert (opnd
->imm
.is_fp
);
2283 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2285 set_other_error (mismatch_detail
, idx
,
2286 _("floating-point value must be 0.5 or 2.0"));
2291 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2292 assert (opnd
->imm
.is_fp
);
2293 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2295 set_other_error (mismatch_detail
, idx
,
2296 _("floating-point value must be 0.0 or 1.0"));
2301 case AARCH64_OPND_SVE_INV_LIMM
:
2303 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2304 uint64_t uimm
= ~opnd
->imm
.value
;
2305 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2307 set_other_error (mismatch_detail
, idx
,
2308 _("immediate out of range"));
2314 case AARCH64_OPND_SVE_LIMM_MOV
:
2316 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2317 uint64_t uimm
= opnd
->imm
.value
;
2318 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2320 set_other_error (mismatch_detail
, idx
,
2321 _("immediate out of range"));
2324 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2326 set_other_error (mismatch_detail
, idx
,
2327 _("invalid replicated MOV immediate"));
2333 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2334 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2335 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2337 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2342 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2343 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2344 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2345 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2347 set_imm_out_of_range_error (mismatch_detail
, idx
,
2353 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2354 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2355 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2356 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2358 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2368 case AARCH64_OPND_CLASS_CP_REG
:
2369 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2370 valid range: C0 - C15. */
2371 if (opnd
->reg
.regno
> 15)
2373 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2378 case AARCH64_OPND_CLASS_SYSTEM
:
2381 case AARCH64_OPND_PSTATEFIELD
:
2382 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2385 The immediate must be #0 or #1. */
2386 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2387 || opnd
->pstatefield
== 0x04) /* PAN. */
2388 && opnds
[1].imm
.value
> 1)
2390 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2393 /* MSR SPSel, #uimm4
2394 Uses uimm4 as a control value to select the stack pointer: if
2395 bit 0 is set it selects the current exception level's stack
2396 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2397 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2398 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2400 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2409 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2410 /* Get the upper bound for the element index. */
2411 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2412 /* Index out-of-range. */
2413 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2415 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2418 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2419 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2420 number is encoded in "size:M:Rm":
2426 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2427 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2429 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2434 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2435 assert (idx
== 1 || idx
== 2);
2438 case AARCH64_OPND_Rm_EXT
:
2439 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2440 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2442 set_other_error (mismatch_detail
, idx
,
2443 _("extend operator expected"));
2446 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2447 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2448 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2450 if (!aarch64_stack_pointer_p (opnds
+ 0)
2451 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2453 if (!opnd
->shifter
.operator_present
)
2455 set_other_error (mismatch_detail
, idx
,
2456 _("missing extend operator"));
2459 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2461 set_other_error (mismatch_detail
, idx
,
2462 _("'LSL' operator not allowed"));
2466 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2467 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2468 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2470 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2473 /* In the 64-bit form, the final register operand is written as Wm
2474 for all but the (possibly omitted) UXTX/LSL and SXTX
2476 N.B. GAS allows X register to be used with any operator as a
2477 programming convenience. */
2478 if (qualifier
== AARCH64_OPND_QLF_X
2479 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2480 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2481 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2483 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2488 case AARCH64_OPND_Rm_SFT
:
2489 /* ROR is not available to the shifted register operand in
2490 arithmetic instructions. */
2491 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2493 set_other_error (mismatch_detail
, idx
,
2494 _("shift operator expected"));
2497 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2498 && opcode
->iclass
!= log_shift
)
2500 set_other_error (mismatch_detail
, idx
,
2501 _("'ROR' operator not allowed"));
2504 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2505 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2507 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2524 /* Main entrypoint for the operand constraint checking.
2526 Return 1 if operands of *INST meet the constraint applied by the operand
2527 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2528 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2529 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2530 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2531 error kind when it is notified that an instruction does not pass the check).
2533 Un-determined operand qualifiers may get established during the process. */
2536 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2537 aarch64_operand_error
*mismatch_detail
)
2541 DEBUG_TRACE ("enter");
2543 /* Check for cases where a source register needs to be the same as the
2544 destination register. Do this before matching qualifiers since if
2545 an instruction has both invalid tying and invalid qualifiers,
2546 the error about qualifiers would suggest several alternative
2547 instructions that also have invalid tying. */
2548 i
= inst
->opcode
->tied_operand
;
2549 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2551 if (mismatch_detail
)
2553 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2554 mismatch_detail
->index
= i
;
2555 mismatch_detail
->error
= NULL
;
2560 /* Match operands' qualifier.
2561 *INST has already had qualifier establish for some, if not all, of
2562 its operands; we need to find out whether these established
2563 qualifiers match one of the qualifier sequence in
2564 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2565 with the corresponding qualifier in such a sequence.
2566 Only basic operand constraint checking is done here; the more thorough
2567 constraint checking will carried out by operand_general_constraint_met_p,
2568 which has be to called after this in order to get all of the operands'
2569 qualifiers established. */
2570 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2572 DEBUG_TRACE ("FAIL on operand qualifier matching");
2573 if (mismatch_detail
)
2575 /* Return an error type to indicate that it is the qualifier
2576 matching failure; we don't care about which operand as there
2577 are enough information in the opcode table to reproduce it. */
2578 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2579 mismatch_detail
->index
= -1;
2580 mismatch_detail
->error
= NULL
;
2585 /* Match operands' constraint. */
2586 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2588 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2589 if (type
== AARCH64_OPND_NIL
)
2591 if (inst
->operands
[i
].skip
)
2593 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2596 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2597 inst
->opcode
, mismatch_detail
) == 0)
2599 DEBUG_TRACE ("FAIL on operand %d", i
);
2604 DEBUG_TRACE ("PASS");
2609 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2610 Also updates the TYPE of each INST->OPERANDS with the corresponding
2611 value of OPCODE->OPERANDS.
2613 Note that some operand qualifiers may need to be manually cleared by
2614 the caller before it further calls the aarch64_opcode_encode; by
2615 doing this, it helps the qualifier matching facilities work
2618 const aarch64_opcode
*
2619 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2622 const aarch64_opcode
*old
= inst
->opcode
;
2624 inst
->opcode
= opcode
;
2626 /* Update the operand types. */
2627 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2629 inst
->operands
[i
].type
= opcode
->operands
[i
];
2630 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2634 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2640 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2643 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2644 if (operands
[i
] == operand
)
2646 else if (operands
[i
] == AARCH64_OPND_NIL
)
2651 /* R0...R30, followed by FOR31. */
2652 #define BANK(R, FOR31) \
2653 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2654 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2655 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2656 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2657 /* [0][0] 32-bit integer regs with sp Wn
2658 [0][1] 64-bit integer regs with sp Xn sf=1
2659 [1][0] 32-bit integer regs with #0 Wn
2660 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2661 static const char *int_reg
[2][2][32] = {
2662 #define R32(X) "w" #X
2663 #define R64(X) "x" #X
2664 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2665 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2670 /* Names of the SVE vector registers, first with .S suffixes,
2671 then with .D suffixes. */
2673 static const char *sve_reg
[2][32] = {
2674 #define ZS(X) "z" #X ".s"
2675 #define ZD(X) "z" #X ".d"
2676 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2682 /* Return the integer register name.
2683 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2685 static inline const char *
2686 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2688 const int has_zr
= sp_reg_p
? 0 : 1;
2689 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2690 return int_reg
[has_zr
][is_64
][regno
];
2693 /* Like get_int_reg_name, but IS_64 is always 1. */
2695 static inline const char *
2696 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2698 const int has_zr
= sp_reg_p
? 0 : 1;
2699 return int_reg
[has_zr
][1][regno
];
2702 /* Get the name of the integer offset register in OPND, using the shift type
2703 to decide whether it's a word or doubleword. */
2705 static inline const char *
2706 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2708 switch (opnd
->shifter
.kind
)
2710 case AARCH64_MOD_UXTW
:
2711 case AARCH64_MOD_SXTW
:
2712 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2714 case AARCH64_MOD_LSL
:
2715 case AARCH64_MOD_SXTX
:
2716 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2723 /* Get the name of the SVE vector offset register in OPND, using the operand
2724 qualifier to decide whether the suffix should be .S or .D. */
2726 static inline const char *
2727 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2729 assert (qualifier
== AARCH64_OPND_QLF_S_S
2730 || qualifier
== AARCH64_OPND_QLF_S_D
);
2731 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2734 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2754 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2755 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2756 (depending on the type of the instruction). IMM8 will be expanded to a
2757 single-precision floating-point value (SIZE == 4) or a double-precision
2758 floating-point value (SIZE == 8). A half-precision floating-point value
2759 (SIZE == 2) is expanded to a single-precision floating-point value. The
2760 expanded value is returned. */
2763 expand_fp_imm (int size
, uint32_t imm8
)
2766 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2768 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2769 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2770 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2771 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2772 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2775 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2776 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2777 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2778 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2779 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2782 else if (size
== 4 || size
== 2)
2784 imm
= (imm8_7
<< 31) /* imm8<7> */
2785 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2786 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2787 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2791 /* An unsupported size. */
2798 /* Produce the string representation of the register list operand *OPND
2799 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2800 the register name that comes before the register number, such as "v". */
2802 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2805 const int num_regs
= opnd
->reglist
.num_regs
;
2806 const int first_reg
= opnd
->reglist
.first_regno
;
2807 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2808 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2809 char tb
[8]; /* Temporary buffer. */
2811 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2812 assert (num_regs
>= 1 && num_regs
<= 4);
2814 /* Prepare the index if any. */
2815 if (opnd
->reglist
.has_index
)
2816 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2820 /* The hyphenated form is preferred for disassembly if there are
2821 more than two registers in the list, and the register numbers
2822 are monotonically increasing in increments of one. */
2823 if (num_regs
> 2 && last_reg
> first_reg
)
2824 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2825 prefix
, last_reg
, qlf_name
, tb
);
2828 const int reg0
= first_reg
;
2829 const int reg1
= (first_reg
+ 1) & 0x1f;
2830 const int reg2
= (first_reg
+ 2) & 0x1f;
2831 const int reg3
= (first_reg
+ 3) & 0x1f;
2836 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2839 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2840 prefix
, reg1
, qlf_name
, tb
);
2843 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2844 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2845 prefix
, reg2
, qlf_name
, tb
);
2848 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2849 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2850 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2856 /* Print the register+immediate address in OPND to BUF, which has SIZE
2857 characters. BASE is the name of the base register. */
2860 print_immediate_offset_address (char *buf
, size_t size
,
2861 const aarch64_opnd_info
*opnd
,
2864 if (opnd
->addr
.writeback
)
2866 if (opnd
->addr
.preind
)
2867 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
2869 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
2873 if (opnd
->shifter
.operator_present
)
2875 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
2876 snprintf (buf
, size
, "[%s, #%d, mul vl]",
2877 base
, opnd
->addr
.offset
.imm
);
2879 else if (opnd
->addr
.offset
.imm
)
2880 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
2882 snprintf (buf
, size
, "[%s]", base
);
2886 /* Produce the string representation of the register offset address operand
2887 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2888 the names of the base and offset registers. */
2890 print_register_offset_address (char *buf
, size_t size
,
2891 const aarch64_opnd_info
*opnd
,
2892 const char *base
, const char *offset
)
2894 char tb
[16]; /* Temporary buffer. */
2895 bfd_boolean print_extend_p
= TRUE
;
2896 bfd_boolean print_amount_p
= TRUE
;
2897 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2899 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2900 || !opnd
->shifter
.amount_present
))
2902 /* Not print the shift/extend amount when the amount is zero and
2903 when it is not the special case of 8-bit load/store instruction. */
2904 print_amount_p
= FALSE
;
2905 /* Likewise, no need to print the shift operator LSL in such a
2907 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2908 print_extend_p
= FALSE
;
2911 /* Prepare for the extend/shift. */
2915 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
2916 opnd
->shifter
.amount
);
2918 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
2923 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
2926 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2927 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2928 PC, PCREL_P and ADDRESS are used to pass in and return information about
2929 the PC-relative address calculation, where the PC value is passed in
2930 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2931 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2932 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2934 The function serves both the disassembler and the assembler diagnostics
2935 issuer, which is the reason why it lives in this file. */
2938 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2939 const aarch64_opcode
*opcode
,
2940 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2943 unsigned int i
, num_conds
;
2944 const char *name
= NULL
;
2945 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2946 enum aarch64_modifier_kind kind
;
2947 uint64_t addr
, enum_value
;
2955 case AARCH64_OPND_Rd
:
2956 case AARCH64_OPND_Rn
:
2957 case AARCH64_OPND_Rm
:
2958 case AARCH64_OPND_Rt
:
2959 case AARCH64_OPND_Rt2
:
2960 case AARCH64_OPND_Rs
:
2961 case AARCH64_OPND_Ra
:
2962 case AARCH64_OPND_Rt_SYS
:
2963 case AARCH64_OPND_PAIRREG
:
2964 case AARCH64_OPND_SVE_Rm
:
2965 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2966 the <ic_op>, therefore we we use opnd->present to override the
2967 generic optional-ness information. */
2968 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2970 /* Omit the operand, e.g. RET. */
2971 if (optional_operand_p (opcode
, idx
)
2972 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2974 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2975 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2976 snprintf (buf
, size
, "%s",
2977 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2980 case AARCH64_OPND_Rd_SP
:
2981 case AARCH64_OPND_Rn_SP
:
2982 case AARCH64_OPND_SVE_Rn_SP
:
2983 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2984 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2985 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2986 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2987 snprintf (buf
, size
, "%s",
2988 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2991 case AARCH64_OPND_Rm_EXT
:
2992 kind
= opnd
->shifter
.kind
;
2993 assert (idx
== 1 || idx
== 2);
2994 if ((aarch64_stack_pointer_p (opnds
)
2995 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2996 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2997 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2998 && kind
== AARCH64_MOD_UXTW
)
2999 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3000 && kind
== AARCH64_MOD_UXTX
)))
3002 /* 'LSL' is the preferred form in this case. */
3003 kind
= AARCH64_MOD_LSL
;
3004 if (opnd
->shifter
.amount
== 0)
3006 /* Shifter omitted. */
3007 snprintf (buf
, size
, "%s",
3008 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3012 if (opnd
->shifter
.amount
)
3013 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3014 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3015 aarch64_operand_modifiers
[kind
].name
,
3016 opnd
->shifter
.amount
);
3018 snprintf (buf
, size
, "%s, %s",
3019 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3020 aarch64_operand_modifiers
[kind
].name
);
3023 case AARCH64_OPND_Rm_SFT
:
3024 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3025 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3026 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3027 snprintf (buf
, size
, "%s",
3028 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3030 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3031 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3032 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3033 opnd
->shifter
.amount
);
3036 case AARCH64_OPND_Fd
:
3037 case AARCH64_OPND_Fn
:
3038 case AARCH64_OPND_Fm
:
3039 case AARCH64_OPND_Fa
:
3040 case AARCH64_OPND_Ft
:
3041 case AARCH64_OPND_Ft2
:
3042 case AARCH64_OPND_Sd
:
3043 case AARCH64_OPND_Sn
:
3044 case AARCH64_OPND_Sm
:
3045 case AARCH64_OPND_SVE_VZn
:
3046 case AARCH64_OPND_SVE_Vd
:
3047 case AARCH64_OPND_SVE_Vm
:
3048 case AARCH64_OPND_SVE_Vn
:
3049 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3053 case AARCH64_OPND_Vd
:
3054 case AARCH64_OPND_Vn
:
3055 case AARCH64_OPND_Vm
:
3056 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3057 aarch64_get_qualifier_name (opnd
->qualifier
));
3060 case AARCH64_OPND_Ed
:
3061 case AARCH64_OPND_En
:
3062 case AARCH64_OPND_Em
:
3063 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3064 aarch64_get_qualifier_name (opnd
->qualifier
),
3065 opnd
->reglane
.index
);
3068 case AARCH64_OPND_VdD1
:
3069 case AARCH64_OPND_VnD1
:
3070 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3073 case AARCH64_OPND_LVn
:
3074 case AARCH64_OPND_LVt
:
3075 case AARCH64_OPND_LVt_AL
:
3076 case AARCH64_OPND_LEt
:
3077 print_register_list (buf
, size
, opnd
, "v");
3080 case AARCH64_OPND_SVE_Pd
:
3081 case AARCH64_OPND_SVE_Pg3
:
3082 case AARCH64_OPND_SVE_Pg4_5
:
3083 case AARCH64_OPND_SVE_Pg4_10
:
3084 case AARCH64_OPND_SVE_Pg4_16
:
3085 case AARCH64_OPND_SVE_Pm
:
3086 case AARCH64_OPND_SVE_Pn
:
3087 case AARCH64_OPND_SVE_Pt
:
3088 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3089 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3090 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3091 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3092 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3093 aarch64_get_qualifier_name (opnd
->qualifier
));
3095 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3096 aarch64_get_qualifier_name (opnd
->qualifier
));
3099 case AARCH64_OPND_SVE_Za_5
:
3100 case AARCH64_OPND_SVE_Za_16
:
3101 case AARCH64_OPND_SVE_Zd
:
3102 case AARCH64_OPND_SVE_Zm_5
:
3103 case AARCH64_OPND_SVE_Zm_16
:
3104 case AARCH64_OPND_SVE_Zn
:
3105 case AARCH64_OPND_SVE_Zt
:
3106 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3107 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3109 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3110 aarch64_get_qualifier_name (opnd
->qualifier
));
3113 case AARCH64_OPND_SVE_ZnxN
:
3114 case AARCH64_OPND_SVE_ZtxN
:
3115 print_register_list (buf
, size
, opnd
, "z");
3118 case AARCH64_OPND_SVE_Zn_INDEX
:
3119 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3120 aarch64_get_qualifier_name (opnd
->qualifier
),
3121 opnd
->reglane
.index
);
3124 case AARCH64_OPND_Cn
:
3125 case AARCH64_OPND_Cm
:
3126 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
3129 case AARCH64_OPND_IDX
:
3130 case AARCH64_OPND_IMM
:
3131 case AARCH64_OPND_WIDTH
:
3132 case AARCH64_OPND_UIMM3_OP1
:
3133 case AARCH64_OPND_UIMM3_OP2
:
3134 case AARCH64_OPND_BIT_NUM
:
3135 case AARCH64_OPND_IMM_VLSL
:
3136 case AARCH64_OPND_IMM_VLSR
:
3137 case AARCH64_OPND_SHLL_IMM
:
3138 case AARCH64_OPND_IMM0
:
3139 case AARCH64_OPND_IMMR
:
3140 case AARCH64_OPND_IMMS
:
3141 case AARCH64_OPND_FBITS
:
3142 case AARCH64_OPND_SIMM5
:
3143 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3144 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3145 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3146 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3147 case AARCH64_OPND_SVE_SIMM5
:
3148 case AARCH64_OPND_SVE_SIMM5B
:
3149 case AARCH64_OPND_SVE_SIMM6
:
3150 case AARCH64_OPND_SVE_SIMM8
:
3151 case AARCH64_OPND_SVE_UIMM3
:
3152 case AARCH64_OPND_SVE_UIMM7
:
3153 case AARCH64_OPND_SVE_UIMM8
:
3154 case AARCH64_OPND_SVE_UIMM8_53
:
3155 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3158 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3159 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3160 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3163 c
.i
= opnd
->imm
.value
;
3164 snprintf (buf
, size
, "#%.1f", c
.f
);
3168 case AARCH64_OPND_SVE_PATTERN
:
3169 if (optional_operand_p (opcode
, idx
)
3170 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3172 enum_value
= opnd
->imm
.value
;
3173 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3174 if (aarch64_sve_pattern_array
[enum_value
])
3175 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3177 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3180 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3181 if (optional_operand_p (opcode
, idx
)
3182 && !opnd
->shifter
.operator_present
3183 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3185 enum_value
= opnd
->imm
.value
;
3186 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3187 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3188 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3190 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3191 if (opnd
->shifter
.operator_present
)
3193 size_t len
= strlen (buf
);
3194 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3195 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3196 opnd
->shifter
.amount
);
3200 case AARCH64_OPND_SVE_PRFOP
:
3201 enum_value
= opnd
->imm
.value
;
3202 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3203 if (aarch64_sve_prfop_array
[enum_value
])
3204 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3206 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3209 case AARCH64_OPND_IMM_MOV
:
3210 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3212 case 4: /* e.g. MOV Wd, #<imm32>. */
3214 int imm32
= opnd
->imm
.value
;
3215 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3218 case 8: /* e.g. MOV Xd, #<imm64>. */
3219 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3220 opnd
->imm
.value
, opnd
->imm
.value
);
3222 default: assert (0);
3226 case AARCH64_OPND_FPIMM0
:
3227 snprintf (buf
, size
, "#0.0");
3230 case AARCH64_OPND_LIMM
:
3231 case AARCH64_OPND_AIMM
:
3232 case AARCH64_OPND_HALF
:
3233 case AARCH64_OPND_SVE_INV_LIMM
:
3234 case AARCH64_OPND_SVE_LIMM
:
3235 case AARCH64_OPND_SVE_LIMM_MOV
:
3236 if (opnd
->shifter
.amount
)
3237 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3238 opnd
->shifter
.amount
);
3240 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3243 case AARCH64_OPND_SIMD_IMM
:
3244 case AARCH64_OPND_SIMD_IMM_SFT
:
3245 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3246 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3247 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3249 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3250 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3251 opnd
->shifter
.amount
);
3254 case AARCH64_OPND_SVE_AIMM
:
3255 case AARCH64_OPND_SVE_ASIMM
:
3256 if (opnd
->shifter
.amount
)
3257 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3258 opnd
->shifter
.amount
);
3260 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3263 case AARCH64_OPND_FPIMM
:
3264 case AARCH64_OPND_SIMD_FPIMM
:
3265 case AARCH64_OPND_SVE_FPIMM8
:
3266 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3268 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3271 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3272 snprintf (buf
, size
, "#%.18e", c
.f
);
3275 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3278 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3279 snprintf (buf
, size
, "#%.18e", c
.f
);
3282 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3285 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3286 snprintf (buf
, size
, "#%.18e", c
.d
);
3289 default: assert (0);
3293 case AARCH64_OPND_CCMP_IMM
:
3294 case AARCH64_OPND_NZCV
:
3295 case AARCH64_OPND_EXCEPTION
:
3296 case AARCH64_OPND_UIMM4
:
3297 case AARCH64_OPND_UIMM7
:
3298 if (optional_operand_p (opcode
, idx
) == TRUE
3299 && (opnd
->imm
.value
==
3300 (int64_t) get_optional_operand_default_value (opcode
)))
3301 /* Omit the operand, e.g. DCPS1. */
3303 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3306 case AARCH64_OPND_COND
:
3307 case AARCH64_OPND_COND1
:
3308 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3309 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3310 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3312 size_t len
= strlen (buf
);
3314 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3315 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3317 snprintf (buf
+ len
, size
- len
, ", %s",
3318 opnd
->cond
->names
[i
]);
3322 case AARCH64_OPND_ADDR_ADRP
:
3323 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3329 /* This is not necessary during the disassembling, as print_address_func
3330 in the disassemble_info will take care of the printing. But some
3331 other callers may be still interested in getting the string in *STR,
3332 so here we do snprintf regardless. */
3333 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3336 case AARCH64_OPND_ADDR_PCREL14
:
3337 case AARCH64_OPND_ADDR_PCREL19
:
3338 case AARCH64_OPND_ADDR_PCREL21
:
3339 case AARCH64_OPND_ADDR_PCREL26
:
3340 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3345 /* This is not necessary during the disassembling, as print_address_func
3346 in the disassemble_info will take care of the printing. But some
3347 other callers may be still interested in getting the string in *STR,
3348 so here we do snprintf regardless. */
3349 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3352 case AARCH64_OPND_ADDR_SIMPLE
:
3353 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3354 case AARCH64_OPND_SIMD_ADDR_POST
:
3355 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3356 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3358 if (opnd
->addr
.offset
.is_reg
)
3359 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3361 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3364 snprintf (buf
, size
, "[%s]", name
);
3367 case AARCH64_OPND_ADDR_REGOFF
:
3368 case AARCH64_OPND_SVE_ADDR_RR
:
3369 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3370 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3371 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3372 case AARCH64_OPND_SVE_ADDR_RX
:
3373 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3374 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3375 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3376 print_register_offset_address
3377 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3378 get_offset_int_reg_name (opnd
));
3381 case AARCH64_OPND_SVE_ADDR_RZ
:
3382 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3383 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3384 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3385 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3386 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3387 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3388 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3389 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3390 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3391 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3392 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3393 print_register_offset_address
3394 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3395 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3398 case AARCH64_OPND_ADDR_SIMM7
:
3399 case AARCH64_OPND_ADDR_SIMM9
:
3400 case AARCH64_OPND_ADDR_SIMM9_2
:
3401 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3402 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3403 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3404 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3405 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3406 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3407 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3408 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3409 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3410 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3411 print_immediate_offset_address
3412 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3415 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3416 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3417 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3418 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3419 print_immediate_offset_address
3421 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3424 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3425 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3426 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3427 print_register_offset_address
3429 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3430 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3433 case AARCH64_OPND_ADDR_UIMM12
:
3434 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3435 if (opnd
->addr
.offset
.imm
)
3436 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3438 snprintf (buf
, size
, "[%s]", name
);
3441 case AARCH64_OPND_SYSREG
:
3442 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3443 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
3444 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
3446 if (aarch64_sys_regs
[i
].name
)
3447 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
3450 /* Implementation defined system register. */
3451 unsigned int value
= opnd
->sysreg
;
3452 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3453 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3458 case AARCH64_OPND_PSTATEFIELD
:
3459 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3460 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3462 assert (aarch64_pstatefields
[i
].name
);
3463 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3466 case AARCH64_OPND_SYSREG_AT
:
3467 case AARCH64_OPND_SYSREG_DC
:
3468 case AARCH64_OPND_SYSREG_IC
:
3469 case AARCH64_OPND_SYSREG_TLBI
:
3470 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3473 case AARCH64_OPND_BARRIER
:
3474 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3477 case AARCH64_OPND_BARRIER_ISB
:
3478 /* Operand can be omitted, e.g. in DCPS1. */
3479 if (! optional_operand_p (opcode
, idx
)
3480 || (opnd
->barrier
->value
3481 != get_optional_operand_default_value (opcode
)))
3482 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3485 case AARCH64_OPND_PRFOP
:
3486 if (opnd
->prfop
->name
!= NULL
)
3487 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3489 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3492 case AARCH64_OPND_BARRIER_PSB
:
3493 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3501 #define CPENC(op0,op1,crn,crm,op2) \
3502 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3503 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3504 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3505 /* for 3.9.10 System Instructions */
3506 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3528 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3533 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3538 #define F_HASXT 0x4 /* System instruction register <Xt>
3542 /* TODO there are two more issues need to be resolved
3543 1. handle read-only and write-only system registers
3544 2. handle cpu-implementation-defined system registers. */
3545 const aarch64_sys_reg aarch64_sys_regs
[] =
3547 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3548 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3549 { "elr_el1", CPEN_(0,C0
,1), 0 },
3550 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3551 { "sp_el0", CPEN_(0,C1
,0), 0 },
3552 { "spsel", CPEN_(0,C2
,0), 0 },
3553 { "daif", CPEN_(3,C2
,1), 0 },
3554 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3555 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3556 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3557 { "nzcv", CPEN_(3,C2
,0), 0 },
3558 { "fpcr", CPEN_(3,C4
,0), 0 },
3559 { "fpsr", CPEN_(3,C4
,1), 0 },
3560 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3561 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3562 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3563 { "elr_el2", CPEN_(4,C0
,1), 0 },
3564 { "sp_el1", CPEN_(4,C1
,0), 0 },
3565 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3566 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3567 { "spsr_und", CPEN_(4,C3
,2), 0 },
3568 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3569 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3570 { "elr_el3", CPEN_(6,C0
,1), 0 },
3571 { "sp_el2", CPEN_(6,C1
,0), 0 },
3572 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3573 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3574 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3575 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3576 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3577 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3578 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3579 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3580 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3581 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3582 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3583 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3584 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3585 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3586 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3587 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3588 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3589 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3590 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3591 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3592 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3593 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3594 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3595 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3596 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3597 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3598 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3599 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3600 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3601 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3602 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3603 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3604 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3605 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3606 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3607 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3608 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3609 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3610 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3611 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3612 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3613 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3614 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3615 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3616 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3617 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3618 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3619 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3620 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3621 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3622 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3623 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3624 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3625 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3626 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3627 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3628 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3629 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3630 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3631 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3632 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3633 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3634 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3635 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3636 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3637 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3638 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3639 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3640 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3641 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3642 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3643 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3644 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3645 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3646 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3647 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3648 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3649 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3650 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3651 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3652 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3653 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3654 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3655 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3656 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3657 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3658 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3659 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3660 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3661 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3662 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3663 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3664 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3665 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3666 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3667 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3668 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3669 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3670 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3671 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3672 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3673 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3674 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3675 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3676 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3677 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3678 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3679 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3680 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3681 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3682 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3683 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3684 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3685 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3686 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3687 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3688 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3689 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3690 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3691 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3692 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3693 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3694 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3695 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3696 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3697 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3698 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3699 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3700 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3701 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3702 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3703 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3704 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3705 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3706 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3707 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3708 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3709 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3710 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3711 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3712 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3713 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3714 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3715 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3716 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3717 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3718 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3719 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3720 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3721 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3722 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3723 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3724 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3725 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3726 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3727 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3728 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3729 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3730 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3731 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3732 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3733 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3734 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3735 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3736 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3737 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3738 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3739 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3740 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3741 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3742 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3743 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3744 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3745 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3746 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3747 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3748 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3749 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3750 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3751 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3752 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3753 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3754 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3755 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3756 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3757 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3758 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3759 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3760 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3761 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3762 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3763 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3764 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3765 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3766 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3767 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3768 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3769 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3770 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3771 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3772 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3773 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3774 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3775 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3776 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3777 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3778 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3779 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3780 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3781 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3782 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3783 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3784 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3785 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3786 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3787 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3788 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3789 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3790 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3791 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3792 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3793 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3794 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3795 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3796 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3797 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3798 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3799 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3800 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3801 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3802 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3803 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3804 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3805 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3806 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3807 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3808 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3809 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3810 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3811 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3812 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3813 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3814 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3815 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3816 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3817 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3818 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3819 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3820 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3821 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3822 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3823 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3824 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3825 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3826 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3827 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3828 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3829 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3830 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3831 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3832 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3833 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3834 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3835 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3836 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3837 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3838 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3839 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3840 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3841 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3842 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3843 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3844 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3845 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3846 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3847 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3848 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3849 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3850 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3851 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3852 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3853 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3854 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3855 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3856 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3857 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3858 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3859 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3860 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3861 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3862 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3863 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3864 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3865 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3866 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3867 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3868 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3869 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3870 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3871 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3872 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3873 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3874 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3875 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3876 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3877 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3878 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3879 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3880 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3881 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3882 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3883 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3884 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3885 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3886 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3887 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3888 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3889 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3890 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3891 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3892 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3893 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3894 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3895 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3896 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3897 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3898 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3899 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3900 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3901 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3902 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3903 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3904 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3905 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3906 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3907 { 0, CPENC(0,0,0,0,0), 0 },
3911 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3913 return (reg
->flags
& F_DEPRECATED
) != 0;
3917 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3918 const aarch64_sys_reg
*reg
)
3920 if (!(reg
->flags
& F_ARCHEXT
))
3923 /* PAN. Values are from aarch64_sys_regs. */
3924 if (reg
->value
== CPEN_(0,C2
,3)
3925 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3928 /* Virtualization host extensions: system registers. */
3929 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3930 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3931 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3932 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3933 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3934 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3937 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3938 if ((reg
->value
== CPEN_ (5, C0
, 0)
3939 || reg
->value
== CPEN_ (5, C0
, 1)
3940 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3941 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3942 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3943 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3944 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3945 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3946 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3947 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3948 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3949 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3950 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3951 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3952 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3953 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3954 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3957 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3958 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3959 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3960 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3961 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3962 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3963 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3964 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3967 /* ARMv8.2 features. */
3969 /* ID_AA64MMFR2_EL1. */
3970 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3971 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3975 if (reg
->value
== CPEN_ (0, C2
, 4)
3976 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3979 /* RAS extension. */
3981 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3982 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3983 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3984 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
3985 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3986 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3987 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
3988 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
3989 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
3990 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
3991 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3992 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3993 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3996 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3997 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3998 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3999 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4000 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4003 /* Statistical Profiling extension. */
4004 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4005 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4006 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4007 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4008 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4009 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4010 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4011 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4012 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4013 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4014 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4015 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4016 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4017 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4023 const aarch64_sys_reg aarch64_pstatefields
[] =
4025 { "spsel", 0x05, 0 },
4026 { "daifset", 0x1e, 0 },
4027 { "daifclr", 0x1f, 0 },
4028 { "pan", 0x04, F_ARCHEXT
},
4029 { "uao", 0x03, F_ARCHEXT
},
4030 { 0, CPENC(0,0,0,0,0), 0 },
4034 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4035 const aarch64_sys_reg
*reg
)
4037 if (!(reg
->flags
& F_ARCHEXT
))
4040 /* PAN. Values are from aarch64_pstatefields. */
4041 if (reg
->value
== 0x04
4042 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4045 /* UAO. Values are from aarch64_pstatefields. */
4046 if (reg
->value
== 0x03
4047 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4053 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4055 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4056 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4057 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4058 { 0, CPENS(0,0,0,0), 0 }
4061 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4063 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4064 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4065 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4066 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4067 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4068 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4069 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4070 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4071 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4072 { 0, CPENS(0,0,0,0), 0 }
4075 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4077 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4078 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4079 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4080 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4081 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4082 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4083 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4084 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4085 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4086 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4087 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4088 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4089 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4090 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4091 { 0, CPENS(0,0,0,0), 0 }
4094 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4096 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4097 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4098 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4099 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4100 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4101 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4102 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4103 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4104 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4105 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4106 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4107 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4108 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4109 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4110 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4111 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4112 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4113 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4114 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4115 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4116 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4117 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4118 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4119 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4120 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4121 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4122 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4123 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4124 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4125 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4126 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4127 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4128 { 0, CPENS(0,0,0,0), 0 }
4132 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4134 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4138 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4139 const aarch64_sys_ins_reg
*reg
)
4141 if (!(reg
->flags
& F_ARCHEXT
))
4144 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4145 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4146 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4149 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4150 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4151 || reg
->value
== CPENS (0, C7
, C9
, 1))
4152 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4175 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4176 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4179 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
4180 const aarch64_insn insn
)
4182 int t
= BITS (insn
, 4, 0);
4183 int n
= BITS (insn
, 9, 5);
4184 int t2
= BITS (insn
, 14, 10);
4188 /* Write back enabled. */
4189 if ((t
== n
|| t2
== n
) && n
!= 31)
4203 /* Return true if VALUE cannot be moved into an SVE register using DUP
4204 (with any element size, not just ESIZE) and if using DUPM would
4205 therefore be OK. ESIZE is the number of bytes in the immediate. */
4208 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
4210 int64_t svalue
= uvalue
;
4211 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
4213 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
4215 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
4217 svalue
= (int32_t) uvalue
;
4218 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
4220 svalue
= (int16_t) uvalue
;
4221 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
4225 if ((svalue
& 0xff) == 0)
4227 return svalue
< -128 || svalue
>= 128;
4230 /* Include the opcode description table as well as the operand description
4232 #define VERIFIER(x) verify_##x
4233 #include "aarch64-tbl.h"