1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
25 #include "bfd_stdint.h"
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
313 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
314 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
315 { 16, 4 }, /* SVE_tsz: triangular size select. */
316 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
317 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
318 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
319 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
320 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
321 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
322 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
323 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
324 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
325 { 22, 1 }, /* sz: 1-bit element size select. */
328 enum aarch64_operand_class
329 aarch64_get_operand_class (enum aarch64_opnd type
)
331 return aarch64_operands
[type
].op_class
;
335 aarch64_get_operand_name (enum aarch64_opnd type
)
337 return aarch64_operands
[type
].name
;
340 /* Get operand description string.
341 This is usually for the diagnosis purpose. */
343 aarch64_get_operand_desc (enum aarch64_opnd type
)
345 return aarch64_operands
[type
].desc
;
348 /* Table of all conditional affixes. */
349 const aarch64_cond aarch64_conds
[16] =
351 {{"eq", "none"}, 0x0},
352 {{"ne", "any"}, 0x1},
353 {{"cs", "hs", "nlast"}, 0x2},
354 {{"cc", "lo", "ul", "last"}, 0x3},
355 {{"mi", "first"}, 0x4},
356 {{"pl", "nfrst"}, 0x5},
359 {{"hi", "pmore"}, 0x8},
360 {{"ls", "plast"}, 0x9},
361 {{"ge", "tcont"}, 0xa},
362 {{"lt", "tstop"}, 0xb},
370 get_cond_from_value (aarch64_insn value
)
373 return &aarch64_conds
[(unsigned int) value
];
377 get_inverted_cond (const aarch64_cond
*cond
)
379 return &aarch64_conds
[cond
->value
^ 0x1];
382 /* Table describing the operand extension/shifting operators; indexed by
383 enum aarch64_modifier_kind.
385 The value column provides the most common values for encoding modifiers,
386 which enables table-driven encoding/decoding for the modifiers. */
387 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
408 enum aarch64_modifier_kind
409 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
411 return desc
- aarch64_operand_modifiers
;
415 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
417 return aarch64_operand_modifiers
[kind
].value
;
420 enum aarch64_modifier_kind
421 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
422 bfd_boolean extend_p
)
424 if (extend_p
== TRUE
)
425 return AARCH64_MOD_UXTB
+ value
;
427 return AARCH64_MOD_LSL
- value
;
431 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
433 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
437 static inline bfd_boolean
438 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
440 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
444 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
464 /* Table describing the operands supported by the aliases of the HINT
467 The name column is the operand that is accepted for the alias. The value
468 column is the hint number of the alias. The list of operands is terminated
469 by NULL in the name column. */
471 const struct aarch64_name_value_pair aarch64_hint_options
[] =
473 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
474 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
475 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
476 { "c", HINT_OPD_C
}, /* BTI C. */
477 { "j", HINT_OPD_J
}, /* BTI J. */
478 { "jc", HINT_OPD_JC
}, /* BTI JC. */
479 { NULL
, HINT_OPD_NULL
},
482 /* op -> op: load = 0 instruction = 1 store = 2
484 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
485 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
486 const struct aarch64_name_value_pair aarch64_prfops
[32] =
488 { "pldl1keep", B(0, 1, 0) },
489 { "pldl1strm", B(0, 1, 1) },
490 { "pldl2keep", B(0, 2, 0) },
491 { "pldl2strm", B(0, 2, 1) },
492 { "pldl3keep", B(0, 3, 0) },
493 { "pldl3strm", B(0, 3, 1) },
496 { "plil1keep", B(1, 1, 0) },
497 { "plil1strm", B(1, 1, 1) },
498 { "plil2keep", B(1, 2, 0) },
499 { "plil2strm", B(1, 2, 1) },
500 { "plil3keep", B(1, 3, 0) },
501 { "plil3strm", B(1, 3, 1) },
504 { "pstl1keep", B(2, 1, 0) },
505 { "pstl1strm", B(2, 1, 1) },
506 { "pstl2keep", B(2, 2, 0) },
507 { "pstl2strm", B(2, 2, 1) },
508 { "pstl3keep", B(2, 3, 0) },
509 { "pstl3strm", B(2, 3, 1) },
523 /* Utilities on value constraint. */
526 value_in_range_p (int64_t value
, int low
, int high
)
528 return (value
>= low
&& value
<= high
) ? 1 : 0;
531 /* Return true if VALUE is a multiple of ALIGN. */
533 value_aligned_p (int64_t value
, int align
)
535 return (value
% align
) == 0;
538 /* A signed value fits in a field. */
540 value_fit_signed_field_p (int64_t value
, unsigned width
)
543 if (width
< sizeof (value
) * 8)
545 int64_t lim
= (int64_t)1 << (width
- 1);
546 if (value
>= -lim
&& value
< lim
)
552 /* An unsigned value fits in a field. */
554 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
557 if (width
< sizeof (value
) * 8)
559 int64_t lim
= (int64_t)1 << width
;
560 if (value
>= 0 && value
< lim
)
566 /* Return 1 if OPERAND is SP or WSP. */
568 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
570 return ((aarch64_get_operand_class (operand
->type
)
571 == AARCH64_OPND_CLASS_INT_REG
)
572 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
573 && operand
->reg
.regno
== 31);
576 /* Return 1 if OPERAND is XZR or WZP. */
578 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
580 return ((aarch64_get_operand_class (operand
->type
)
581 == AARCH64_OPND_CLASS_INT_REG
)
582 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
583 && operand
->reg
.regno
== 31);
586 /* Return true if the operand *OPERAND that has the operand code
587 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
588 qualified by the qualifier TARGET. */
591 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
592 aarch64_opnd_qualifier_t target
)
594 switch (operand
->qualifier
)
596 case AARCH64_OPND_QLF_W
:
597 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
600 case AARCH64_OPND_QLF_X
:
601 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
604 case AARCH64_OPND_QLF_WSP
:
605 if (target
== AARCH64_OPND_QLF_W
606 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
609 case AARCH64_OPND_QLF_SP
:
610 if (target
== AARCH64_OPND_QLF_X
611 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
621 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
622 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
624 Return NIL if more than one expected qualifiers are found. */
626 aarch64_opnd_qualifier_t
627 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
629 const aarch64_opnd_qualifier_t known_qlf
,
636 When the known qualifier is NIL, we have to assume that there is only
637 one qualifier sequence in the *QSEQ_LIST and return the corresponding
638 qualifier directly. One scenario is that for instruction
639 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
640 which has only one possible valid qualifier sequence
642 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
643 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
645 Because the qualifier NIL has dual roles in the qualifier sequence:
646 it can mean no qualifier for the operand, or the qualifer sequence is
647 not in use (when all qualifiers in the sequence are NILs), we have to
648 handle this special case here. */
649 if (known_qlf
== AARCH64_OPND_NIL
)
651 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
652 return qseq_list
[0][idx
];
655 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
657 if (qseq_list
[i
][known_idx
] == known_qlf
)
660 /* More than one sequences are found to have KNOWN_QLF at
662 return AARCH64_OPND_NIL
;
667 return qseq_list
[saved_i
][idx
];
670 enum operand_qualifier_kind
678 /* Operand qualifier description. */
679 struct operand_qualifier_data
681 /* The usage of the three data fields depends on the qualifier kind. */
688 enum operand_qualifier_kind kind
;
691 /* Indexed by the operand qualifier enumerators. */
692 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
694 {0, 0, 0, "NIL", OQK_NIL
},
696 /* Operand variant qualifiers.
698 element size, number of elements and common value for encoding. */
700 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
701 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
702 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
703 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
705 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
706 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
707 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
708 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
709 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
710 {4, 1, 0x0, "4b", OQK_OPD_VARIANT
},
712 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
713 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
714 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
715 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
716 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
717 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
718 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
719 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
720 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
721 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
722 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
724 {0, 0, 0, "z", OQK_OPD_VARIANT
},
725 {0, 0, 0, "m", OQK_OPD_VARIANT
},
727 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
728 {16, 0, 0, "tag", OQK_OPD_VARIANT
},
730 /* Qualifiers constraining the value range.
732 Lower bound, higher bound, unused. */
734 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
735 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
736 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
737 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
738 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
739 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
740 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
742 /* Qualifiers for miscellaneous purpose.
744 unused, unused and unused. */
749 {0, 0, 0, "retrieving", 0},
752 static inline bfd_boolean
753 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
755 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
759 static inline bfd_boolean
760 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
762 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
767 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
769 return aarch64_opnd_qualifiers
[qualifier
].desc
;
772 /* Given an operand qualifier, return the expected data element size
773 of a qualified operand. */
775 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
777 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
778 return aarch64_opnd_qualifiers
[qualifier
].data0
;
782 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
784 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
785 return aarch64_opnd_qualifiers
[qualifier
].data1
;
789 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
791 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
792 return aarch64_opnd_qualifiers
[qualifier
].data2
;
796 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
798 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
799 return aarch64_opnd_qualifiers
[qualifier
].data0
;
803 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
805 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
806 return aarch64_opnd_qualifiers
[qualifier
].data1
;
811 aarch64_verbose (const char *str
, ...)
822 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
826 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
827 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
832 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
833 const aarch64_opnd_qualifier_t
*qualifier
)
836 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
838 aarch64_verbose ("dump_match_qualifiers:");
839 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
840 curr
[i
] = opnd
[i
].qualifier
;
841 dump_qualifier_sequence (curr
);
842 aarch64_verbose ("against");
843 dump_qualifier_sequence (qualifier
);
845 #endif /* DEBUG_AARCH64 */
847 /* This function checks if the given instruction INSN is a destructive
848 instruction based on the usage of the registers. It does not recognize
849 unary destructive instructions. */
851 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
854 const enum aarch64_opnd
*opnds
= opcode
->operands
;
856 if (opnds
[0] == AARCH64_OPND_NIL
)
859 while (opnds
[++i
] != AARCH64_OPND_NIL
)
860 if (opnds
[i
] == opnds
[0])
866 /* TODO improve this, we can have an extra field at the runtime to
867 store the number of operands rather than calculating it every time. */
870 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
873 const enum aarch64_opnd
*opnds
= opcode
->operands
;
874 while (opnds
[i
++] != AARCH64_OPND_NIL
)
877 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
881 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
882 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
884 N.B. on the entry, it is very likely that only some operands in *INST
885 have had their qualifiers been established.
887 If STOP_AT is not -1, the function will only try to match
888 the qualifier sequence for operands before and including the operand
889 of index STOP_AT; and on success *RET will only be filled with the first
890 (STOP_AT+1) qualifiers.
892 A couple examples of the matching algorithm:
900 Apart from serving the main encoding routine, this can also be called
901 during or after the operand decoding. */
904 aarch64_find_best_match (const aarch64_inst
*inst
,
905 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
906 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
910 const aarch64_opnd_qualifier_t
*qualifiers
;
912 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
915 DEBUG_TRACE ("SUCCEED: no operand");
919 if (stop_at
< 0 || stop_at
>= num_opnds
)
920 stop_at
= num_opnds
- 1;
922 /* For each pattern. */
923 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
926 qualifiers
= *qualifiers_list
;
928 /* Start as positive. */
931 DEBUG_TRACE ("%d", i
);
934 dump_match_qualifiers (inst
->operands
, qualifiers
);
937 /* Most opcodes has much fewer patterns in the list.
938 First NIL qualifier indicates the end in the list. */
939 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
941 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
947 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
949 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
951 /* Either the operand does not have qualifier, or the qualifier
952 for the operand needs to be deduced from the qualifier
954 In the latter case, any constraint checking related with
955 the obtained qualifier should be done later in
956 operand_general_constraint_met_p. */
959 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
961 /* Unless the target qualifier can also qualify the operand
962 (which has already had a non-nil qualifier), non-equal
963 qualifiers are generally un-matched. */
964 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
973 continue; /* Equal qualifiers are certainly matched. */
976 /* Qualifiers established. */
983 /* Fill the result in *RET. */
985 qualifiers
= *qualifiers_list
;
987 DEBUG_TRACE ("complete qualifiers using list %d", i
);
990 dump_qualifier_sequence (qualifiers
);
993 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
994 ret
[j
] = *qualifiers
;
995 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
996 ret
[j
] = AARCH64_OPND_QLF_NIL
;
998 DEBUG_TRACE ("SUCCESS");
1002 DEBUG_TRACE ("FAIL");
1006 /* Operand qualifier matching and resolving.
1008 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1009 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1011 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1015 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
1018 aarch64_opnd_qualifier_seq_t qualifiers
;
1020 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1023 DEBUG_TRACE ("matching FAIL");
1027 if (inst
->opcode
->flags
& F_STRICT
)
1029 /* Require an exact qualifier match, even for NIL qualifiers. */
1030 nops
= aarch64_num_of_operands (inst
->opcode
);
1031 for (i
= 0; i
< nops
; ++i
)
1032 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1036 /* Update the qualifiers. */
1037 if (update_p
== TRUE
)
1038 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1040 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1042 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1043 "update %s with %s for operand %d",
1044 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1045 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1046 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1049 DEBUG_TRACE ("matching SUCCESS");
1053 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1056 IS32 indicates whether value is a 32-bit immediate or not.
1057 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1058 amount will be returned in *SHIFT_AMOUNT. */
1061 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1065 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1069 /* Allow all zeros or all ones in top 32-bits, so that
1070 32-bit constant expressions like ~0x80000000 are
1072 uint64_t ext
= value
;
1073 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1074 /* Immediate out of range. */
1076 value
&= (int64_t) 0xffffffff;
1079 /* first, try movz then movn */
1081 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1083 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1085 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1087 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1092 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1096 if (shift_amount
!= NULL
)
1097 *shift_amount
= amount
;
1099 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1104 /* Build the accepted values for immediate logical SIMD instructions.
1106 The standard encodings of the immediate value are:
1107 N imms immr SIMD size R S
1108 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1109 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1110 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1111 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1112 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1113 0 11110s 00000r 2 UInt(r) UInt(s)
1114 where all-ones value of S is reserved.
1116 Let's call E the SIMD size.
1118 The immediate value is: S+1 bits '1' rotated to the right by R.
1120 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1121 (remember S != E - 1). */
1123 #define TOTAL_IMM_NB 5334
1128 aarch64_insn encoding
;
1129 } simd_imm_encoding
;
1131 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1134 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1136 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1137 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1139 if (imm1
->imm
< imm2
->imm
)
1141 if (imm1
->imm
> imm2
->imm
)
1146 /* immediate bitfield standard encoding
1147 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1148 1 ssssss rrrrrr 64 rrrrrr ssssss
1149 0 0sssss 0rrrrr 32 rrrrr sssss
1150 0 10ssss 00rrrr 16 rrrr ssss
1151 0 110sss 000rrr 8 rrr sss
1152 0 1110ss 0000rr 4 rr ss
1153 0 11110s 00000r 2 r s */
1155 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1157 return (is64
<< 12) | (r
<< 6) | s
;
1161 build_immediate_table (void)
1163 uint32_t log_e
, e
, s
, r
, s_mask
;
1169 for (log_e
= 1; log_e
<= 6; log_e
++)
1171 /* Get element size. */
1176 mask
= 0xffffffffffffffffull
;
1182 mask
= (1ull << e
) - 1;
1184 1 ((1 << 4) - 1) << 2 = 111100
1185 2 ((1 << 3) - 1) << 3 = 111000
1186 3 ((1 << 2) - 1) << 4 = 110000
1187 4 ((1 << 1) - 1) << 5 = 100000
1188 5 ((1 << 0) - 1) << 6 = 000000 */
1189 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1191 for (s
= 0; s
< e
- 1; s
++)
1192 for (r
= 0; r
< e
; r
++)
1194 /* s+1 consecutive bits to 1 (s < 63) */
1195 imm
= (1ull << (s
+ 1)) - 1;
1196 /* rotate right by r */
1198 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1199 /* replicate the constant depending on SIMD size */
1202 case 1: imm
= (imm
<< 2) | imm
;
1204 case 2: imm
= (imm
<< 4) | imm
;
1206 case 3: imm
= (imm
<< 8) | imm
;
1208 case 4: imm
= (imm
<< 16) | imm
;
1210 case 5: imm
= (imm
<< 32) | imm
;
1215 simd_immediates
[nb_imms
].imm
= imm
;
1216 simd_immediates
[nb_imms
].encoding
=
1217 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1221 assert (nb_imms
== TOTAL_IMM_NB
);
1222 qsort(simd_immediates
, nb_imms
,
1223 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1226 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1227 be accepted by logical (immediate) instructions
1228 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1230 ESIZE is the number of bytes in the decoded immediate value.
1231 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1232 VALUE will be returned in *ENCODING. */
1235 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1237 simd_imm_encoding imm_enc
;
1238 const simd_imm_encoding
*imm_encoding
;
1239 static bfd_boolean initialized
= FALSE
;
1243 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1248 build_immediate_table ();
1252 /* Allow all zeros or all ones in top bits, so that
1253 constant expressions like ~1 are permitted. */
1254 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1255 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1258 /* Replicate to a full 64-bit value. */
1260 for (i
= esize
* 8; i
< 64; i
*= 2)
1261 value
|= (value
<< i
);
1263 imm_enc
.imm
= value
;
1264 imm_encoding
= (const simd_imm_encoding
*)
1265 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1266 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1267 if (imm_encoding
== NULL
)
1269 DEBUG_TRACE ("exit with FALSE");
1272 if (encoding
!= NULL
)
1273 *encoding
= imm_encoding
->encoding
;
1274 DEBUG_TRACE ("exit with TRUE");
1278 /* If 64-bit immediate IMM is in the format of
1279 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1280 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1281 of value "abcdefgh". Otherwise return -1. */
1283 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1289 for (i
= 0; i
< 8; i
++)
1291 byte
= (imm
>> (8 * i
)) & 0xff;
1294 else if (byte
!= 0x00)
1300 /* Utility inline functions for operand_general_constraint_met_p. */
1303 set_error (aarch64_operand_error
*mismatch_detail
,
1304 enum aarch64_operand_error_kind kind
, int idx
,
1307 if (mismatch_detail
== NULL
)
1309 mismatch_detail
->kind
= kind
;
1310 mismatch_detail
->index
= idx
;
1311 mismatch_detail
->error
= error
;
1315 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1318 if (mismatch_detail
== NULL
)
1320 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1324 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1325 int idx
, int lower_bound
, int upper_bound
,
1328 if (mismatch_detail
== NULL
)
1330 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1331 mismatch_detail
->data
[0] = lower_bound
;
1332 mismatch_detail
->data
[1] = upper_bound
;
1336 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1337 int idx
, int lower_bound
, int upper_bound
)
1339 if (mismatch_detail
== NULL
)
1341 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1342 _("immediate value"));
1346 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1347 int idx
, int lower_bound
, int upper_bound
)
1349 if (mismatch_detail
== NULL
)
1351 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1352 _("immediate offset"));
1356 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1357 int idx
, int lower_bound
, int upper_bound
)
1359 if (mismatch_detail
== NULL
)
1361 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1362 _("register number"));
1366 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1367 int idx
, int lower_bound
, int upper_bound
)
1369 if (mismatch_detail
== NULL
)
1371 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1372 _("register element index"));
1376 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1377 int idx
, int lower_bound
, int upper_bound
)
1379 if (mismatch_detail
== NULL
)
1381 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1385 /* Report that the MUL modifier in operand IDX should be in the range
1386 [LOWER_BOUND, UPPER_BOUND]. */
1388 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1389 int idx
, int lower_bound
, int upper_bound
)
1391 if (mismatch_detail
== NULL
)
1393 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1398 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1401 if (mismatch_detail
== NULL
)
1403 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1404 mismatch_detail
->data
[0] = alignment
;
1408 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1411 if (mismatch_detail
== NULL
)
1413 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1414 mismatch_detail
->data
[0] = expected_num
;
1418 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1421 if (mismatch_detail
== NULL
)
1423 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1426 /* General constraint checking based on operand code.
1428 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1429 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1431 This function has to be called after the qualifiers for all operands
1434 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1435 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1436 of error message during the disassembling where error message is not
1437 wanted. We avoid the dynamic construction of strings of error messages
1438 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1439 use a combination of error code, static string and some integer data to
1440 represent an error. */
1443 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1444 enum aarch64_opnd type
,
1445 const aarch64_opcode
*opcode
,
1446 aarch64_operand_error
*mismatch_detail
)
1448 unsigned num
, modifiers
, shift
;
1450 int64_t imm
, min_value
, max_value
;
1451 uint64_t uvalue
, mask
;
1452 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1453 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1455 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1457 switch (aarch64_operands
[type
].op_class
)
1459 case AARCH64_OPND_CLASS_INT_REG
:
1460 /* Check pair reg constraints for cas* instructions. */
1461 if (type
== AARCH64_OPND_PAIRREG
)
1463 assert (idx
== 1 || idx
== 3);
1464 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1466 set_syntax_error (mismatch_detail
, idx
- 1,
1467 _("reg pair must start from even reg"));
1470 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1472 set_syntax_error (mismatch_detail
, idx
,
1473 _("reg pair must be contiguous"));
1479 /* <Xt> may be optional in some IC and TLBI instructions. */
1480 if (type
== AARCH64_OPND_Rt_SYS
)
1482 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1483 == AARCH64_OPND_CLASS_SYSTEM
));
1484 if (opnds
[1].present
1485 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1487 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1490 if (!opnds
[1].present
1491 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1493 set_other_error (mismatch_detail
, idx
, _("missing register"));
1499 case AARCH64_OPND_QLF_WSP
:
1500 case AARCH64_OPND_QLF_SP
:
1501 if (!aarch64_stack_pointer_p (opnd
))
1503 set_other_error (mismatch_detail
, idx
,
1504 _("stack pointer register expected"));
1513 case AARCH64_OPND_CLASS_SVE_REG
:
1516 case AARCH64_OPND_SVE_Zm3_INDEX
:
1517 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1518 case AARCH64_OPND_SVE_Zm4_INDEX
:
1519 size
= get_operand_fields_width (get_operand_from_code (type
));
1520 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1521 mask
= (1 << shift
) - 1;
1522 if (opnd
->reg
.regno
> mask
)
1524 assert (mask
== 7 || mask
== 15);
1525 set_other_error (mismatch_detail
, idx
,
1527 ? _("z0-z15 expected")
1528 : _("z0-z7 expected"));
1531 mask
= (1 << (size
- shift
)) - 1;
1532 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1534 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1539 case AARCH64_OPND_SVE_Zn_INDEX
:
1540 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1541 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1543 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1549 case AARCH64_OPND_SVE_ZnxN
:
1550 case AARCH64_OPND_SVE_ZtxN
:
1551 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1553 set_other_error (mismatch_detail
, idx
,
1554 _("invalid register list"));
1564 case AARCH64_OPND_CLASS_PRED_REG
:
1565 if (opnd
->reg
.regno
>= 8
1566 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1568 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1573 case AARCH64_OPND_CLASS_COND
:
1574 if (type
== AARCH64_OPND_COND1
1575 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1577 /* Not allow AL or NV. */
1578 set_syntax_error (mismatch_detail
, idx
, NULL
);
1582 case AARCH64_OPND_CLASS_ADDRESS
:
1583 /* Check writeback. */
1584 switch (opcode
->iclass
)
1588 case ldstnapair_offs
:
1591 if (opnd
->addr
.writeback
== 1)
1593 set_syntax_error (mismatch_detail
, idx
,
1594 _("unexpected address writeback"));
1599 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1601 set_syntax_error (mismatch_detail
, idx
,
1602 _("unexpected address writeback"));
1607 case ldstpair_indexed
:
1610 if (opnd
->addr
.writeback
== 0)
1612 set_syntax_error (mismatch_detail
, idx
,
1613 _("address writeback expected"));
1618 assert (opnd
->addr
.writeback
== 0);
1623 case AARCH64_OPND_ADDR_SIMM7
:
1624 /* Scaled signed 7 bits immediate offset. */
1625 /* Get the size of the data element that is accessed, which may be
1626 different from that of the source register size,
1627 e.g. in strb/ldrb. */
1628 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1629 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1631 set_offset_out_of_range_error (mismatch_detail
, idx
,
1632 -64 * size
, 63 * size
);
1635 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1637 set_unaligned_error (mismatch_detail
, idx
, size
);
1641 case AARCH64_OPND_ADDR_OFFSET
:
1642 case AARCH64_OPND_ADDR_SIMM9
:
1643 /* Unscaled signed 9 bits immediate offset. */
1644 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1646 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1651 case AARCH64_OPND_ADDR_SIMM9_2
:
1652 /* Unscaled signed 9 bits immediate offset, which has to be negative
1654 size
= aarch64_get_qualifier_esize (qualifier
);
1655 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1656 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1657 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1659 set_other_error (mismatch_detail
, idx
,
1660 _("negative or unaligned offset expected"));
1663 case AARCH64_OPND_ADDR_SIMM10
:
1664 /* Scaled signed 10 bits immediate offset. */
1665 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1667 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1670 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1672 set_unaligned_error (mismatch_detail
, idx
, 8);
1677 case AARCH64_OPND_ADDR_SIMM11
:
1678 /* Signed 11 bits immediate offset (multiple of 16). */
1679 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -1024, 1008))
1681 set_offset_out_of_range_error (mismatch_detail
, idx
, -1024, 1008);
1685 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1687 set_unaligned_error (mismatch_detail
, idx
, 16);
1692 case AARCH64_OPND_ADDR_SIMM13
:
1693 /* Signed 13 bits immediate offset (multiple of 16). */
1694 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4080))
1696 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4080);
1700 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1702 set_unaligned_error (mismatch_detail
, idx
, 16);
1707 case AARCH64_OPND_SIMD_ADDR_POST
:
1708 /* AdvSIMD load/store multiple structures, post-index. */
1710 if (opnd
->addr
.offset
.is_reg
)
1712 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1716 set_other_error (mismatch_detail
, idx
,
1717 _("invalid register offset"));
1723 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1724 unsigned num_bytes
; /* total number of bytes transferred. */
1725 /* The opcode dependent area stores the number of elements in
1726 each structure to be loaded/stored. */
1727 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1728 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1729 /* Special handling of loading single structure to all lane. */
1730 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1731 * aarch64_get_qualifier_esize (prev
->qualifier
);
1733 num_bytes
= prev
->reglist
.num_regs
1734 * aarch64_get_qualifier_esize (prev
->qualifier
)
1735 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1736 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1738 set_other_error (mismatch_detail
, idx
,
1739 _("invalid post-increment amount"));
1745 case AARCH64_OPND_ADDR_REGOFF
:
1746 /* Get the size of the data element that is accessed, which may be
1747 different from that of the source register size,
1748 e.g. in strb/ldrb. */
1749 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1750 /* It is either no shift or shift by the binary logarithm of SIZE. */
1751 if (opnd
->shifter
.amount
!= 0
1752 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1754 set_other_error (mismatch_detail
, idx
,
1755 _("invalid shift amount"));
1758 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1760 switch (opnd
->shifter
.kind
)
1762 case AARCH64_MOD_UXTW
:
1763 case AARCH64_MOD_LSL
:
1764 case AARCH64_MOD_SXTW
:
1765 case AARCH64_MOD_SXTX
: break;
1767 set_other_error (mismatch_detail
, idx
,
1768 _("invalid extend/shift operator"));
1773 case AARCH64_OPND_ADDR_UIMM12
:
1774 imm
= opnd
->addr
.offset
.imm
;
1775 /* Get the size of the data element that is accessed, which may be
1776 different from that of the source register size,
1777 e.g. in strb/ldrb. */
1778 size
= aarch64_get_qualifier_esize (qualifier
);
1779 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1781 set_offset_out_of_range_error (mismatch_detail
, idx
,
1785 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1787 set_unaligned_error (mismatch_detail
, idx
, size
);
1792 case AARCH64_OPND_ADDR_PCREL14
:
1793 case AARCH64_OPND_ADDR_PCREL19
:
1794 case AARCH64_OPND_ADDR_PCREL21
:
1795 case AARCH64_OPND_ADDR_PCREL26
:
1796 imm
= opnd
->imm
.value
;
1797 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1799 /* The offset value in a PC-relative branch instruction is alway
1800 4-byte aligned and is encoded without the lowest 2 bits. */
1801 if (!value_aligned_p (imm
, 4))
1803 set_unaligned_error (mismatch_detail
, idx
, 4);
1806 /* Right shift by 2 so that we can carry out the following check
1810 size
= get_operand_fields_width (get_operand_from_code (type
));
1811 if (!value_fit_signed_field_p (imm
, size
))
1813 set_other_error (mismatch_detail
, idx
,
1814 _("immediate out of range"));
1819 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1820 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1821 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1822 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1826 assert (!opnd
->addr
.offset
.is_reg
);
1827 assert (opnd
->addr
.preind
);
1828 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1831 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1832 || (opnd
->shifter
.operator_present
1833 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1835 set_other_error (mismatch_detail
, idx
,
1836 _("invalid addressing mode"));
1839 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1841 set_offset_out_of_range_error (mismatch_detail
, idx
,
1842 min_value
, max_value
);
1845 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1847 set_unaligned_error (mismatch_detail
, idx
, num
);
1852 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1855 goto sve_imm_offset_vl
;
1857 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1860 goto sve_imm_offset_vl
;
1862 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1863 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1864 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1865 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1869 assert (!opnd
->addr
.offset
.is_reg
);
1870 assert (opnd
->addr
.preind
);
1871 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1874 if (opnd
->shifter
.operator_present
1875 || opnd
->shifter
.amount_present
)
1877 set_other_error (mismatch_detail
, idx
,
1878 _("invalid addressing mode"));
1881 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1883 set_offset_out_of_range_error (mismatch_detail
, idx
,
1884 min_value
, max_value
);
1887 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1889 set_unaligned_error (mismatch_detail
, idx
, num
);
1894 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1897 goto sve_imm_offset
;
1899 case AARCH64_OPND_SVE_ADDR_R
:
1900 case AARCH64_OPND_SVE_ADDR_RR
:
1901 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1902 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1903 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1904 case AARCH64_OPND_SVE_ADDR_RX
:
1905 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1906 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1907 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1908 case AARCH64_OPND_SVE_ADDR_RZ
:
1909 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1910 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1911 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1912 modifiers
= 1 << AARCH64_MOD_LSL
;
1914 assert (opnd
->addr
.offset
.is_reg
);
1915 assert (opnd
->addr
.preind
);
1916 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1917 && opnd
->addr
.offset
.regno
== 31)
1919 set_other_error (mismatch_detail
, idx
,
1920 _("index register xzr is not allowed"));
1923 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1924 || (opnd
->shifter
.amount
1925 != get_operand_specific_data (&aarch64_operands
[type
])))
1927 set_other_error (mismatch_detail
, idx
,
1928 _("invalid addressing mode"));
1933 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1934 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1935 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1936 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1937 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1938 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1939 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1940 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1941 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1942 goto sve_rr_operand
;
1944 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1945 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1946 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1947 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1950 goto sve_imm_offset
;
1952 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1953 modifiers
= 1 << AARCH64_MOD_LSL
;
1955 assert (opnd
->addr
.offset
.is_reg
);
1956 assert (opnd
->addr
.preind
);
1957 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1958 || opnd
->shifter
.amount
< 0
1959 || opnd
->shifter
.amount
> 3)
1961 set_other_error (mismatch_detail
, idx
,
1962 _("invalid addressing mode"));
1967 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1968 modifiers
= (1 << AARCH64_MOD_SXTW
);
1969 goto sve_zz_operand
;
1971 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1972 modifiers
= 1 << AARCH64_MOD_UXTW
;
1973 goto sve_zz_operand
;
1980 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1981 if (type
== AARCH64_OPND_LEt
)
1983 /* Get the upper bound for the element index. */
1984 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1985 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1987 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1991 /* The opcode dependent area stores the number of elements in
1992 each structure to be loaded/stored. */
1993 num
= get_opcode_dependent_value (opcode
);
1996 case AARCH64_OPND_LVt
:
1997 assert (num
>= 1 && num
<= 4);
1998 /* Unless LD1/ST1, the number of registers should be equal to that
1999 of the structure elements. */
2000 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
2002 set_reg_list_error (mismatch_detail
, idx
, num
);
2006 case AARCH64_OPND_LVt_AL
:
2007 case AARCH64_OPND_LEt
:
2008 assert (num
>= 1 && num
<= 4);
2009 /* The number of registers should be equal to that of the structure
2011 if (opnd
->reglist
.num_regs
!= num
)
2013 set_reg_list_error (mismatch_detail
, idx
, num
);
2022 case AARCH64_OPND_CLASS_IMMEDIATE
:
2023 /* Constraint check on immediate operand. */
2024 imm
= opnd
->imm
.value
;
2025 /* E.g. imm_0_31 constrains value to be 0..31. */
2026 if (qualifier_value_in_range_constraint_p (qualifier
)
2027 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
2028 get_upper_bound (qualifier
)))
2030 set_imm_out_of_range_error (mismatch_detail
, idx
,
2031 get_lower_bound (qualifier
),
2032 get_upper_bound (qualifier
));
2038 case AARCH64_OPND_AIMM
:
2039 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2041 set_other_error (mismatch_detail
, idx
,
2042 _("invalid shift operator"));
2045 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2047 set_other_error (mismatch_detail
, idx
,
2048 _("shift amount must be 0 or 12"));
2051 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2053 set_other_error (mismatch_detail
, idx
,
2054 _("immediate out of range"));
2059 case AARCH64_OPND_HALF
:
2060 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2061 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2063 set_other_error (mismatch_detail
, idx
,
2064 _("invalid shift operator"));
2067 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2068 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2070 set_other_error (mismatch_detail
, idx
,
2071 _("shift amount must be a multiple of 16"));
2074 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2076 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2080 if (opnd
->imm
.value
< 0)
2082 set_other_error (mismatch_detail
, idx
,
2083 _("negative immediate value not allowed"));
2086 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2088 set_other_error (mismatch_detail
, idx
,
2089 _("immediate out of range"));
2094 case AARCH64_OPND_IMM_MOV
:
2096 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2097 imm
= opnd
->imm
.value
;
2101 case OP_MOV_IMM_WIDEN
:
2104 case OP_MOV_IMM_WIDE
:
2105 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2107 set_other_error (mismatch_detail
, idx
,
2108 _("immediate out of range"));
2112 case OP_MOV_IMM_LOG
:
2113 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2115 set_other_error (mismatch_detail
, idx
,
2116 _("immediate out of range"));
2127 case AARCH64_OPND_NZCV
:
2128 case AARCH64_OPND_CCMP_IMM
:
2129 case AARCH64_OPND_EXCEPTION
:
2130 case AARCH64_OPND_TME_UIMM16
:
2131 case AARCH64_OPND_UIMM4
:
2132 case AARCH64_OPND_UIMM4_ADDG
:
2133 case AARCH64_OPND_UIMM7
:
2134 case AARCH64_OPND_UIMM3_OP1
:
2135 case AARCH64_OPND_UIMM3_OP2
:
2136 case AARCH64_OPND_SVE_UIMM3
:
2137 case AARCH64_OPND_SVE_UIMM7
:
2138 case AARCH64_OPND_SVE_UIMM8
:
2139 case AARCH64_OPND_SVE_UIMM8_53
:
2140 size
= get_operand_fields_width (get_operand_from_code (type
));
2142 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2144 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2150 case AARCH64_OPND_UIMM10
:
2151 /* Scaled unsigned 10 bits immediate offset. */
2152 if (!value_in_range_p (opnd
->imm
.value
, 0, 1008))
2154 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1008);
2158 if (!value_aligned_p (opnd
->imm
.value
, 16))
2160 set_unaligned_error (mismatch_detail
, idx
, 16);
2165 case AARCH64_OPND_SIMM5
:
2166 case AARCH64_OPND_SVE_SIMM5
:
2167 case AARCH64_OPND_SVE_SIMM5B
:
2168 case AARCH64_OPND_SVE_SIMM6
:
2169 case AARCH64_OPND_SVE_SIMM8
:
2170 size
= get_operand_fields_width (get_operand_from_code (type
));
2172 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2174 set_imm_out_of_range_error (mismatch_detail
, idx
,
2176 (1 << (size
- 1)) - 1);
2181 case AARCH64_OPND_WIDTH
:
2182 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2183 && opnds
[0].type
== AARCH64_OPND_Rd
);
2184 size
= get_upper_bound (qualifier
);
2185 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2186 /* lsb+width <= reg.size */
2188 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2189 size
- opnds
[idx
-1].imm
.value
);
2194 case AARCH64_OPND_LIMM
:
2195 case AARCH64_OPND_SVE_LIMM
:
2197 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2198 uint64_t uimm
= opnd
->imm
.value
;
2199 if (opcode
->op
== OP_BIC
)
2201 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2203 set_other_error (mismatch_detail
, idx
,
2204 _("immediate out of range"));
2210 case AARCH64_OPND_IMM0
:
2211 case AARCH64_OPND_FPIMM0
:
2212 if (opnd
->imm
.value
!= 0)
2214 set_other_error (mismatch_detail
, idx
,
2215 _("immediate zero expected"));
2220 case AARCH64_OPND_IMM_ROT1
:
2221 case AARCH64_OPND_IMM_ROT2
:
2222 case AARCH64_OPND_SVE_IMM_ROT2
:
2223 if (opnd
->imm
.value
!= 0
2224 && opnd
->imm
.value
!= 90
2225 && opnd
->imm
.value
!= 180
2226 && opnd
->imm
.value
!= 270)
2228 set_other_error (mismatch_detail
, idx
,
2229 _("rotate expected to be 0, 90, 180 or 270"));
2234 case AARCH64_OPND_IMM_ROT3
:
2235 case AARCH64_OPND_SVE_IMM_ROT1
:
2236 case AARCH64_OPND_SVE_IMM_ROT3
:
2237 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2239 set_other_error (mismatch_detail
, idx
,
2240 _("rotate expected to be 90 or 270"));
2245 case AARCH64_OPND_SHLL_IMM
:
2247 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2248 if (opnd
->imm
.value
!= size
)
2250 set_other_error (mismatch_detail
, idx
,
2251 _("invalid shift amount"));
2256 case AARCH64_OPND_IMM_VLSL
:
2257 size
= aarch64_get_qualifier_esize (qualifier
);
2258 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2260 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2266 case AARCH64_OPND_IMM_VLSR
:
2267 size
= aarch64_get_qualifier_esize (qualifier
);
2268 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2270 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2275 case AARCH64_OPND_SIMD_IMM
:
2276 case AARCH64_OPND_SIMD_IMM_SFT
:
2277 /* Qualifier check. */
2280 case AARCH64_OPND_QLF_LSL
:
2281 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2283 set_other_error (mismatch_detail
, idx
,
2284 _("invalid shift operator"));
2288 case AARCH64_OPND_QLF_MSL
:
2289 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2291 set_other_error (mismatch_detail
, idx
,
2292 _("invalid shift operator"));
2296 case AARCH64_OPND_QLF_NIL
:
2297 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2299 set_other_error (mismatch_detail
, idx
,
2300 _("shift is not permitted"));
2308 /* Is the immediate valid? */
2310 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2312 /* uimm8 or simm8 */
2313 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2315 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2319 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2322 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2323 ffffffffgggggggghhhhhhhh'. */
2324 set_other_error (mismatch_detail
, idx
,
2325 _("invalid value for immediate"));
2328 /* Is the shift amount valid? */
2329 switch (opnd
->shifter
.kind
)
2331 case AARCH64_MOD_LSL
:
2332 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2333 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2335 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2339 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2341 set_unaligned_error (mismatch_detail
, idx
, 8);
2345 case AARCH64_MOD_MSL
:
2346 /* Only 8 and 16 are valid shift amount. */
2347 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2349 set_other_error (mismatch_detail
, idx
,
2350 _("shift amount must be 0 or 16"));
2355 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2357 set_other_error (mismatch_detail
, idx
,
2358 _("invalid shift operator"));
2365 case AARCH64_OPND_FPIMM
:
2366 case AARCH64_OPND_SIMD_FPIMM
:
2367 case AARCH64_OPND_SVE_FPIMM8
:
2368 if (opnd
->imm
.is_fp
== 0)
2370 set_other_error (mismatch_detail
, idx
,
2371 _("floating-point immediate expected"));
2374 /* The value is expected to be an 8-bit floating-point constant with
2375 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2376 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2378 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2380 set_other_error (mismatch_detail
, idx
,
2381 _("immediate out of range"));
2384 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2386 set_other_error (mismatch_detail
, idx
,
2387 _("invalid shift operator"));
2392 case AARCH64_OPND_SVE_AIMM
:
2395 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2396 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2397 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2398 uvalue
= opnd
->imm
.value
;
2399 shift
= opnd
->shifter
.amount
;
2404 set_other_error (mismatch_detail
, idx
,
2405 _("no shift amount allowed for"
2406 " 8-bit constants"));
2412 if (shift
!= 0 && shift
!= 8)
2414 set_other_error (mismatch_detail
, idx
,
2415 _("shift amount must be 0 or 8"));
2418 if (shift
== 0 && (uvalue
& 0xff) == 0)
2421 uvalue
= (int64_t) uvalue
/ 256;
2425 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2427 set_other_error (mismatch_detail
, idx
,
2428 _("immediate too big for element size"));
2431 uvalue
= (uvalue
- min_value
) & mask
;
2434 set_other_error (mismatch_detail
, idx
,
2435 _("invalid arithmetic immediate"));
2440 case AARCH64_OPND_SVE_ASIMM
:
2444 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2445 assert (opnd
->imm
.is_fp
);
2446 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2448 set_other_error (mismatch_detail
, idx
,
2449 _("floating-point value must be 0.5 or 1.0"));
2454 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2455 assert (opnd
->imm
.is_fp
);
2456 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2458 set_other_error (mismatch_detail
, idx
,
2459 _("floating-point value must be 0.5 or 2.0"));
2464 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2465 assert (opnd
->imm
.is_fp
);
2466 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2468 set_other_error (mismatch_detail
, idx
,
2469 _("floating-point value must be 0.0 or 1.0"));
2474 case AARCH64_OPND_SVE_INV_LIMM
:
2476 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2477 uint64_t uimm
= ~opnd
->imm
.value
;
2478 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2480 set_other_error (mismatch_detail
, idx
,
2481 _("immediate out of range"));
2487 case AARCH64_OPND_SVE_LIMM_MOV
:
2489 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2490 uint64_t uimm
= opnd
->imm
.value
;
2491 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2493 set_other_error (mismatch_detail
, idx
,
2494 _("immediate out of range"));
2497 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2499 set_other_error (mismatch_detail
, idx
,
2500 _("invalid replicated MOV immediate"));
2506 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2507 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2508 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2510 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2515 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2516 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2517 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2518 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2520 set_imm_out_of_range_error (mismatch_detail
, idx
,
2526 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2527 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2528 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2529 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2531 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2541 case AARCH64_OPND_CLASS_SYSTEM
:
2544 case AARCH64_OPND_PSTATEFIELD
:
2545 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2549 The immediate must be #0 or #1. */
2550 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2551 || opnd
->pstatefield
== 0x04 /* PAN. */
2552 || opnd
->pstatefield
== 0x19 /* SSBS. */
2553 || opnd
->pstatefield
== 0x1a) /* DIT. */
2554 && opnds
[1].imm
.value
> 1)
2556 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2559 /* MSR SPSel, #uimm4
2560 Uses uimm4 as a control value to select the stack pointer: if
2561 bit 0 is set it selects the current exception level's stack
2562 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2563 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2564 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2566 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2575 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2576 /* Get the upper bound for the element index. */
2577 if (opcode
->op
== OP_FCMLA_ELEM
)
2578 /* FCMLA index range depends on the vector size of other operands
2579 and is halfed because complex numbers take two elements. */
2580 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2581 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2584 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2585 assert (aarch64_get_qualifier_nelem (qualifier
) == 1);
2587 /* Index out-of-range. */
2588 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2590 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2593 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2594 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2595 number is encoded in "size:M:Rm":
2601 if (type
== AARCH64_OPND_Em16
&& qualifier
== AARCH64_OPND_QLF_S_H
2602 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2604 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2609 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2610 assert (idx
== 1 || idx
== 2);
2613 case AARCH64_OPND_Rm_EXT
:
2614 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2615 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2617 set_other_error (mismatch_detail
, idx
,
2618 _("extend operator expected"));
2621 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2622 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2623 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2625 if (!aarch64_stack_pointer_p (opnds
+ 0)
2626 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2628 if (!opnd
->shifter
.operator_present
)
2630 set_other_error (mismatch_detail
, idx
,
2631 _("missing extend operator"));
2634 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2636 set_other_error (mismatch_detail
, idx
,
2637 _("'LSL' operator not allowed"));
2641 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2642 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2643 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2645 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2648 /* In the 64-bit form, the final register operand is written as Wm
2649 for all but the (possibly omitted) UXTX/LSL and SXTX
2651 N.B. GAS allows X register to be used with any operator as a
2652 programming convenience. */
2653 if (qualifier
== AARCH64_OPND_QLF_X
2654 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2655 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2656 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2658 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2663 case AARCH64_OPND_Rm_SFT
:
2664 /* ROR is not available to the shifted register operand in
2665 arithmetic instructions. */
2666 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2668 set_other_error (mismatch_detail
, idx
,
2669 _("shift operator expected"));
2672 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2673 && opcode
->iclass
!= log_shift
)
2675 set_other_error (mismatch_detail
, idx
,
2676 _("'ROR' operator not allowed"));
2679 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2680 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2682 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2699 /* Main entrypoint for the operand constraint checking.
2701 Return 1 if operands of *INST meet the constraint applied by the operand
2702 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2703 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2704 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2705 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2706 error kind when it is notified that an instruction does not pass the check).
2708 Un-determined operand qualifiers may get established during the process. */
2711 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2712 aarch64_operand_error
*mismatch_detail
)
2716 DEBUG_TRACE ("enter");
2718 /* Check for cases where a source register needs to be the same as the
2719 destination register. Do this before matching qualifiers since if
2720 an instruction has both invalid tying and invalid qualifiers,
2721 the error about qualifiers would suggest several alternative
2722 instructions that also have invalid tying. */
2723 i
= inst
->opcode
->tied_operand
;
2724 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2726 if (mismatch_detail
)
2728 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2729 mismatch_detail
->index
= i
;
2730 mismatch_detail
->error
= NULL
;
2735 /* Match operands' qualifier.
2736 *INST has already had qualifier establish for some, if not all, of
2737 its operands; we need to find out whether these established
2738 qualifiers match one of the qualifier sequence in
2739 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2740 with the corresponding qualifier in such a sequence.
2741 Only basic operand constraint checking is done here; the more thorough
2742 constraint checking will carried out by operand_general_constraint_met_p,
2743 which has be to called after this in order to get all of the operands'
2744 qualifiers established. */
2745 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2747 DEBUG_TRACE ("FAIL on operand qualifier matching");
2748 if (mismatch_detail
)
2750 /* Return an error type to indicate that it is the qualifier
2751 matching failure; we don't care about which operand as there
2752 are enough information in the opcode table to reproduce it. */
2753 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2754 mismatch_detail
->index
= -1;
2755 mismatch_detail
->error
= NULL
;
2760 /* Match operands' constraint. */
2761 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2763 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2764 if (type
== AARCH64_OPND_NIL
)
2766 if (inst
->operands
[i
].skip
)
2768 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2771 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2772 inst
->opcode
, mismatch_detail
) == 0)
2774 DEBUG_TRACE ("FAIL on operand %d", i
);
2779 DEBUG_TRACE ("PASS");
2784 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2785 Also updates the TYPE of each INST->OPERANDS with the corresponding
2786 value of OPCODE->OPERANDS.
2788 Note that some operand qualifiers may need to be manually cleared by
2789 the caller before it further calls the aarch64_opcode_encode; by
2790 doing this, it helps the qualifier matching facilities work
2793 const aarch64_opcode
*
2794 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2797 const aarch64_opcode
*old
= inst
->opcode
;
2799 inst
->opcode
= opcode
;
2801 /* Update the operand types. */
2802 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2804 inst
->operands
[i
].type
= opcode
->operands
[i
];
2805 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2809 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2815 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2818 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2819 if (operands
[i
] == operand
)
2821 else if (operands
[i
] == AARCH64_OPND_NIL
)
2826 /* R0...R30, followed by FOR31. */
2827 #define BANK(R, FOR31) \
2828 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2829 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2830 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2831 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2832 /* [0][0] 32-bit integer regs with sp Wn
2833 [0][1] 64-bit integer regs with sp Xn sf=1
2834 [1][0] 32-bit integer regs with #0 Wn
2835 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2836 static const char *int_reg
[2][2][32] = {
2837 #define R32(X) "w" #X
2838 #define R64(X) "x" #X
2839 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2840 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2845 /* Names of the SVE vector registers, first with .S suffixes,
2846 then with .D suffixes. */
2848 static const char *sve_reg
[2][32] = {
2849 #define ZS(X) "z" #X ".s"
2850 #define ZD(X) "z" #X ".d"
2851 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2857 /* Return the integer register name.
2858 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2860 static inline const char *
2861 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2863 const int has_zr
= sp_reg_p
? 0 : 1;
2864 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2865 return int_reg
[has_zr
][is_64
][regno
];
2868 /* Like get_int_reg_name, but IS_64 is always 1. */
2870 static inline const char *
2871 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2873 const int has_zr
= sp_reg_p
? 0 : 1;
2874 return int_reg
[has_zr
][1][regno
];
2877 /* Get the name of the integer offset register in OPND, using the shift type
2878 to decide whether it's a word or doubleword. */
2880 static inline const char *
2881 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2883 switch (opnd
->shifter
.kind
)
2885 case AARCH64_MOD_UXTW
:
2886 case AARCH64_MOD_SXTW
:
2887 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2889 case AARCH64_MOD_LSL
:
2890 case AARCH64_MOD_SXTX
:
2891 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2898 /* Get the name of the SVE vector offset register in OPND, using the operand
2899 qualifier to decide whether the suffix should be .S or .D. */
2901 static inline const char *
2902 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2904 assert (qualifier
== AARCH64_OPND_QLF_S_S
2905 || qualifier
== AARCH64_OPND_QLF_S_D
);
2906 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2909 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2929 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2930 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2931 (depending on the type of the instruction). IMM8 will be expanded to a
2932 single-precision floating-point value (SIZE == 4) or a double-precision
2933 floating-point value (SIZE == 8). A half-precision floating-point value
2934 (SIZE == 2) is expanded to a single-precision floating-point value. The
2935 expanded value is returned. */
2938 expand_fp_imm (int size
, uint32_t imm8
)
2941 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2943 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2944 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2945 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2946 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2947 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2950 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2951 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2952 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2953 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2954 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2957 else if (size
== 4 || size
== 2)
2959 imm
= (imm8_7
<< 31) /* imm8<7> */
2960 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2961 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2962 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2966 /* An unsupported size. */
2973 /* Produce the string representation of the register list operand *OPND
2974 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2975 the register name that comes before the register number, such as "v". */
2977 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2980 const int num_regs
= opnd
->reglist
.num_regs
;
2981 const int first_reg
= opnd
->reglist
.first_regno
;
2982 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2983 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2984 char tb
[8]; /* Temporary buffer. */
2986 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2987 assert (num_regs
>= 1 && num_regs
<= 4);
2989 /* Prepare the index if any. */
2990 if (opnd
->reglist
.has_index
)
2991 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2992 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
2996 /* The hyphenated form is preferred for disassembly if there are
2997 more than two registers in the list, and the register numbers
2998 are monotonically increasing in increments of one. */
2999 if (num_regs
> 2 && last_reg
> first_reg
)
3000 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
3001 prefix
, last_reg
, qlf_name
, tb
);
3004 const int reg0
= first_reg
;
3005 const int reg1
= (first_reg
+ 1) & 0x1f;
3006 const int reg2
= (first_reg
+ 2) & 0x1f;
3007 const int reg3
= (first_reg
+ 3) & 0x1f;
3012 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
3015 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
3016 prefix
, reg1
, qlf_name
, tb
);
3019 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3020 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3021 prefix
, reg2
, qlf_name
, tb
);
3024 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3025 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3026 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
3032 /* Print the register+immediate address in OPND to BUF, which has SIZE
3033 characters. BASE is the name of the base register. */
3036 print_immediate_offset_address (char *buf
, size_t size
,
3037 const aarch64_opnd_info
*opnd
,
3040 if (opnd
->addr
.writeback
)
3042 if (opnd
->addr
.preind
)
3043 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
3045 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
3049 if (opnd
->shifter
.operator_present
)
3051 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
3052 snprintf (buf
, size
, "[%s, #%d, mul vl]",
3053 base
, opnd
->addr
.offset
.imm
);
3055 else if (opnd
->addr
.offset
.imm
)
3056 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
3058 snprintf (buf
, size
, "[%s]", base
);
3062 /* Produce the string representation of the register offset address operand
3063 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3064 the names of the base and offset registers. */
3066 print_register_offset_address (char *buf
, size_t size
,
3067 const aarch64_opnd_info
*opnd
,
3068 const char *base
, const char *offset
)
3070 char tb
[16]; /* Temporary buffer. */
3071 bfd_boolean print_extend_p
= TRUE
;
3072 bfd_boolean print_amount_p
= TRUE
;
3073 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3075 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3076 || !opnd
->shifter
.amount_present
))
3078 /* Not print the shift/extend amount when the amount is zero and
3079 when it is not the special case of 8-bit load/store instruction. */
3080 print_amount_p
= FALSE
;
3081 /* Likewise, no need to print the shift operator LSL in such a
3083 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3084 print_extend_p
= FALSE
;
3087 /* Prepare for the extend/shift. */
3091 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3092 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3093 (opnd
->shifter
.amount
% 100));
3095 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3100 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3103 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3104 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3105 PC, PCREL_P and ADDRESS are used to pass in and return information about
3106 the PC-relative address calculation, where the PC value is passed in
3107 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3108 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3109 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3111 The function serves both the disassembler and the assembler diagnostics
3112 issuer, which is the reason why it lives in this file. */
3115 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3116 const aarch64_opcode
*opcode
,
3117 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3118 bfd_vma
*address
, char** notes
)
3120 unsigned int i
, num_conds
;
3121 const char *name
= NULL
;
3122 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3123 enum aarch64_modifier_kind kind
;
3124 uint64_t addr
, enum_value
;
3132 case AARCH64_OPND_Rd
:
3133 case AARCH64_OPND_Rn
:
3134 case AARCH64_OPND_Rm
:
3135 case AARCH64_OPND_Rt
:
3136 case AARCH64_OPND_Rt2
:
3137 case AARCH64_OPND_Rs
:
3138 case AARCH64_OPND_Ra
:
3139 case AARCH64_OPND_Rt_SYS
:
3140 case AARCH64_OPND_PAIRREG
:
3141 case AARCH64_OPND_SVE_Rm
:
3142 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3143 the <ic_op>, therefore we use opnd->present to override the
3144 generic optional-ness information. */
3145 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3150 /* Omit the operand, e.g. RET. */
3151 else if (optional_operand_p (opcode
, idx
)
3153 == get_optional_operand_default_value (opcode
)))
3155 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3156 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3157 snprintf (buf
, size
, "%s",
3158 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3161 case AARCH64_OPND_Rd_SP
:
3162 case AARCH64_OPND_Rn_SP
:
3163 case AARCH64_OPND_Rt_SP
:
3164 case AARCH64_OPND_SVE_Rn_SP
:
3165 case AARCH64_OPND_Rm_SP
:
3166 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3167 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3168 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3169 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3170 snprintf (buf
, size
, "%s",
3171 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3174 case AARCH64_OPND_Rm_EXT
:
3175 kind
= opnd
->shifter
.kind
;
3176 assert (idx
== 1 || idx
== 2);
3177 if ((aarch64_stack_pointer_p (opnds
)
3178 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3179 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3180 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3181 && kind
== AARCH64_MOD_UXTW
)
3182 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3183 && kind
== AARCH64_MOD_UXTX
)))
3185 /* 'LSL' is the preferred form in this case. */
3186 kind
= AARCH64_MOD_LSL
;
3187 if (opnd
->shifter
.amount
== 0)
3189 /* Shifter omitted. */
3190 snprintf (buf
, size
, "%s",
3191 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3195 if (opnd
->shifter
.amount
)
3196 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3197 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3198 aarch64_operand_modifiers
[kind
].name
,
3199 opnd
->shifter
.amount
);
3201 snprintf (buf
, size
, "%s, %s",
3202 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3203 aarch64_operand_modifiers
[kind
].name
);
3206 case AARCH64_OPND_Rm_SFT
:
3207 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3208 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3209 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3210 snprintf (buf
, size
, "%s",
3211 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3213 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3214 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3215 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3216 opnd
->shifter
.amount
);
3219 case AARCH64_OPND_Fd
:
3220 case AARCH64_OPND_Fn
:
3221 case AARCH64_OPND_Fm
:
3222 case AARCH64_OPND_Fa
:
3223 case AARCH64_OPND_Ft
:
3224 case AARCH64_OPND_Ft2
:
3225 case AARCH64_OPND_Sd
:
3226 case AARCH64_OPND_Sn
:
3227 case AARCH64_OPND_Sm
:
3228 case AARCH64_OPND_SVE_VZn
:
3229 case AARCH64_OPND_SVE_Vd
:
3230 case AARCH64_OPND_SVE_Vm
:
3231 case AARCH64_OPND_SVE_Vn
:
3232 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3236 case AARCH64_OPND_Va
:
3237 case AARCH64_OPND_Vd
:
3238 case AARCH64_OPND_Vn
:
3239 case AARCH64_OPND_Vm
:
3240 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3241 aarch64_get_qualifier_name (opnd
->qualifier
));
3244 case AARCH64_OPND_Ed
:
3245 case AARCH64_OPND_En
:
3246 case AARCH64_OPND_Em
:
3247 case AARCH64_OPND_Em16
:
3248 case AARCH64_OPND_SM3_IMM2
:
3249 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3250 aarch64_get_qualifier_name (opnd
->qualifier
),
3251 opnd
->reglane
.index
);
3254 case AARCH64_OPND_VdD1
:
3255 case AARCH64_OPND_VnD1
:
3256 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3259 case AARCH64_OPND_LVn
:
3260 case AARCH64_OPND_LVt
:
3261 case AARCH64_OPND_LVt_AL
:
3262 case AARCH64_OPND_LEt
:
3263 print_register_list (buf
, size
, opnd
, "v");
3266 case AARCH64_OPND_SVE_Pd
:
3267 case AARCH64_OPND_SVE_Pg3
:
3268 case AARCH64_OPND_SVE_Pg4_5
:
3269 case AARCH64_OPND_SVE_Pg4_10
:
3270 case AARCH64_OPND_SVE_Pg4_16
:
3271 case AARCH64_OPND_SVE_Pm
:
3272 case AARCH64_OPND_SVE_Pn
:
3273 case AARCH64_OPND_SVE_Pt
:
3274 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3275 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3276 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3277 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3278 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3279 aarch64_get_qualifier_name (opnd
->qualifier
));
3281 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3282 aarch64_get_qualifier_name (opnd
->qualifier
));
3285 case AARCH64_OPND_SVE_Za_5
:
3286 case AARCH64_OPND_SVE_Za_16
:
3287 case AARCH64_OPND_SVE_Zd
:
3288 case AARCH64_OPND_SVE_Zm_5
:
3289 case AARCH64_OPND_SVE_Zm_16
:
3290 case AARCH64_OPND_SVE_Zn
:
3291 case AARCH64_OPND_SVE_Zt
:
3292 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3293 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3295 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3296 aarch64_get_qualifier_name (opnd
->qualifier
));
3299 case AARCH64_OPND_SVE_ZnxN
:
3300 case AARCH64_OPND_SVE_ZtxN
:
3301 print_register_list (buf
, size
, opnd
, "z");
3304 case AARCH64_OPND_SVE_Zm3_INDEX
:
3305 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3306 case AARCH64_OPND_SVE_Zm4_INDEX
:
3307 case AARCH64_OPND_SVE_Zn_INDEX
:
3308 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3309 aarch64_get_qualifier_name (opnd
->qualifier
),
3310 opnd
->reglane
.index
);
3313 case AARCH64_OPND_CRn
:
3314 case AARCH64_OPND_CRm
:
3315 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3318 case AARCH64_OPND_IDX
:
3319 case AARCH64_OPND_MASK
:
3320 case AARCH64_OPND_IMM
:
3321 case AARCH64_OPND_IMM_2
:
3322 case AARCH64_OPND_WIDTH
:
3323 case AARCH64_OPND_UIMM3_OP1
:
3324 case AARCH64_OPND_UIMM3_OP2
:
3325 case AARCH64_OPND_BIT_NUM
:
3326 case AARCH64_OPND_IMM_VLSL
:
3327 case AARCH64_OPND_IMM_VLSR
:
3328 case AARCH64_OPND_SHLL_IMM
:
3329 case AARCH64_OPND_IMM0
:
3330 case AARCH64_OPND_IMMR
:
3331 case AARCH64_OPND_IMMS
:
3332 case AARCH64_OPND_FBITS
:
3333 case AARCH64_OPND_TME_UIMM16
:
3334 case AARCH64_OPND_SIMM5
:
3335 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3336 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3337 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3338 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3339 case AARCH64_OPND_SVE_SIMM5
:
3340 case AARCH64_OPND_SVE_SIMM5B
:
3341 case AARCH64_OPND_SVE_SIMM6
:
3342 case AARCH64_OPND_SVE_SIMM8
:
3343 case AARCH64_OPND_SVE_UIMM3
:
3344 case AARCH64_OPND_SVE_UIMM7
:
3345 case AARCH64_OPND_SVE_UIMM8
:
3346 case AARCH64_OPND_SVE_UIMM8_53
:
3347 case AARCH64_OPND_IMM_ROT1
:
3348 case AARCH64_OPND_IMM_ROT2
:
3349 case AARCH64_OPND_IMM_ROT3
:
3350 case AARCH64_OPND_SVE_IMM_ROT1
:
3351 case AARCH64_OPND_SVE_IMM_ROT2
:
3352 case AARCH64_OPND_SVE_IMM_ROT3
:
3353 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3356 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3357 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3358 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3361 c
.i
= opnd
->imm
.value
;
3362 snprintf (buf
, size
, "#%.1f", c
.f
);
3366 case AARCH64_OPND_SVE_PATTERN
:
3367 if (optional_operand_p (opcode
, idx
)
3368 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3370 enum_value
= opnd
->imm
.value
;
3371 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3372 if (aarch64_sve_pattern_array
[enum_value
])
3373 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3375 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3378 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3379 if (optional_operand_p (opcode
, idx
)
3380 && !opnd
->shifter
.operator_present
3381 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3383 enum_value
= opnd
->imm
.value
;
3384 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3385 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3386 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3388 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3389 if (opnd
->shifter
.operator_present
)
3391 size_t len
= strlen (buf
);
3392 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3393 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3394 opnd
->shifter
.amount
);
3398 case AARCH64_OPND_SVE_PRFOP
:
3399 enum_value
= opnd
->imm
.value
;
3400 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3401 if (aarch64_sve_prfop_array
[enum_value
])
3402 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3404 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3407 case AARCH64_OPND_IMM_MOV
:
3408 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3410 case 4: /* e.g. MOV Wd, #<imm32>. */
3412 int imm32
= opnd
->imm
.value
;
3413 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3416 case 8: /* e.g. MOV Xd, #<imm64>. */
3417 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3418 opnd
->imm
.value
, opnd
->imm
.value
);
3420 default: assert (0);
3424 case AARCH64_OPND_FPIMM0
:
3425 snprintf (buf
, size
, "#0.0");
3428 case AARCH64_OPND_LIMM
:
3429 case AARCH64_OPND_AIMM
:
3430 case AARCH64_OPND_HALF
:
3431 case AARCH64_OPND_SVE_INV_LIMM
:
3432 case AARCH64_OPND_SVE_LIMM
:
3433 case AARCH64_OPND_SVE_LIMM_MOV
:
3434 if (opnd
->shifter
.amount
)
3435 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3436 opnd
->shifter
.amount
);
3438 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3441 case AARCH64_OPND_SIMD_IMM
:
3442 case AARCH64_OPND_SIMD_IMM_SFT
:
3443 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3444 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3445 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3447 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3448 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3449 opnd
->shifter
.amount
);
3452 case AARCH64_OPND_SVE_AIMM
:
3453 case AARCH64_OPND_SVE_ASIMM
:
3454 if (opnd
->shifter
.amount
)
3455 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3456 opnd
->shifter
.amount
);
3458 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3461 case AARCH64_OPND_FPIMM
:
3462 case AARCH64_OPND_SIMD_FPIMM
:
3463 case AARCH64_OPND_SVE_FPIMM8
:
3464 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3466 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3469 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3470 snprintf (buf
, size
, "#%.18e", c
.f
);
3473 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3476 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3477 snprintf (buf
, size
, "#%.18e", c
.f
);
3480 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3483 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3484 snprintf (buf
, size
, "#%.18e", c
.d
);
3487 default: assert (0);
3491 case AARCH64_OPND_CCMP_IMM
:
3492 case AARCH64_OPND_NZCV
:
3493 case AARCH64_OPND_EXCEPTION
:
3494 case AARCH64_OPND_UIMM4
:
3495 case AARCH64_OPND_UIMM4_ADDG
:
3496 case AARCH64_OPND_UIMM7
:
3497 case AARCH64_OPND_UIMM10
:
3498 if (optional_operand_p (opcode
, idx
) == TRUE
3499 && (opnd
->imm
.value
==
3500 (int64_t) get_optional_operand_default_value (opcode
)))
3501 /* Omit the operand, e.g. DCPS1. */
3503 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3506 case AARCH64_OPND_COND
:
3507 case AARCH64_OPND_COND1
:
3508 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3509 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3510 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3512 size_t len
= strlen (buf
);
3514 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3515 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3517 snprintf (buf
+ len
, size
- len
, ", %s",
3518 opnd
->cond
->names
[i
]);
3522 case AARCH64_OPND_ADDR_ADRP
:
3523 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3529 /* This is not necessary during the disassembling, as print_address_func
3530 in the disassemble_info will take care of the printing. But some
3531 other callers may be still interested in getting the string in *STR,
3532 so here we do snprintf regardless. */
3533 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3536 case AARCH64_OPND_ADDR_PCREL14
:
3537 case AARCH64_OPND_ADDR_PCREL19
:
3538 case AARCH64_OPND_ADDR_PCREL21
:
3539 case AARCH64_OPND_ADDR_PCREL26
:
3540 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3545 /* This is not necessary during the disassembling, as print_address_func
3546 in the disassemble_info will take care of the printing. But some
3547 other callers may be still interested in getting the string in *STR,
3548 so here we do snprintf regardless. */
3549 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3552 case AARCH64_OPND_ADDR_SIMPLE
:
3553 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3554 case AARCH64_OPND_SIMD_ADDR_POST
:
3555 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3556 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3558 if (opnd
->addr
.offset
.is_reg
)
3559 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3561 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3564 snprintf (buf
, size
, "[%s]", name
);
3567 case AARCH64_OPND_ADDR_REGOFF
:
3568 case AARCH64_OPND_SVE_ADDR_R
:
3569 case AARCH64_OPND_SVE_ADDR_RR
:
3570 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3571 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3572 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3573 case AARCH64_OPND_SVE_ADDR_RX
:
3574 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3575 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3576 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3577 print_register_offset_address
3578 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3579 get_offset_int_reg_name (opnd
));
3582 case AARCH64_OPND_SVE_ADDR_RZ
:
3583 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3584 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3585 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3586 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3587 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3588 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3589 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3590 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3591 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3592 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3593 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3594 print_register_offset_address
3595 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3596 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3599 case AARCH64_OPND_ADDR_SIMM7
:
3600 case AARCH64_OPND_ADDR_SIMM9
:
3601 case AARCH64_OPND_ADDR_SIMM9_2
:
3602 case AARCH64_OPND_ADDR_SIMM10
:
3603 case AARCH64_OPND_ADDR_SIMM11
:
3604 case AARCH64_OPND_ADDR_SIMM13
:
3605 case AARCH64_OPND_ADDR_OFFSET
:
3606 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3607 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3608 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3609 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3610 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3611 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3612 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3613 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3614 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3615 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3616 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3617 print_immediate_offset_address
3618 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3621 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3622 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3623 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3624 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3625 print_immediate_offset_address
3627 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3630 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3631 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3632 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3633 print_register_offset_address
3635 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3636 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3639 case AARCH64_OPND_ADDR_UIMM12
:
3640 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3641 if (opnd
->addr
.offset
.imm
)
3642 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3644 snprintf (buf
, size
, "[%s]", name
);
3647 case AARCH64_OPND_SYSREG
:
3648 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3650 bfd_boolean exact_match
3651 = (aarch64_sys_regs
[i
].flags
& opnd
->sysreg
.flags
)
3652 == opnd
->sysreg
.flags
;
3654 /* Try and find an exact match, But if that fails, return the first
3655 partial match that was found. */
3656 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
3657 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
])
3658 && (name
== NULL
|| exact_match
))
3660 name
= aarch64_sys_regs
[i
].name
;
3668 /* If we didn't match exactly, that means the presense of a flag
3669 indicates what we didn't want for this instruction. e.g. If
3670 F_REG_READ is there, that means we were looking for a write
3671 register. See aarch64_ext_sysreg. */
3672 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
3673 *notes
= _("reading from a write-only register");
3674 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
3675 *notes
= _("writing to a read-only register");
3680 snprintf (buf
, size
, "%s", name
);
3683 /* Implementation defined system register. */
3684 unsigned int value
= opnd
->sysreg
.value
;
3685 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3686 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3691 case AARCH64_OPND_PSTATEFIELD
:
3692 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3693 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3695 assert (aarch64_pstatefields
[i
].name
);
3696 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3699 case AARCH64_OPND_SYSREG_AT
:
3700 case AARCH64_OPND_SYSREG_DC
:
3701 case AARCH64_OPND_SYSREG_IC
:
3702 case AARCH64_OPND_SYSREG_TLBI
:
3703 case AARCH64_OPND_SYSREG_SR
:
3704 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3707 case AARCH64_OPND_BARRIER
:
3708 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3711 case AARCH64_OPND_BARRIER_ISB
:
3712 /* Operand can be omitted, e.g. in DCPS1. */
3713 if (! optional_operand_p (opcode
, idx
)
3714 || (opnd
->barrier
->value
3715 != get_optional_operand_default_value (opcode
)))
3716 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3719 case AARCH64_OPND_PRFOP
:
3720 if (opnd
->prfop
->name
!= NULL
)
3721 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3723 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3726 case AARCH64_OPND_BARRIER_PSB
:
3727 case AARCH64_OPND_BTI_TARGET
:
3728 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
3729 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3737 #define CPENC(op0,op1,crn,crm,op2) \
3738 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3739 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3740 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3741 /* for 3.9.10 System Instructions */
3742 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3761 /* TODO there is one more issues need to be resolved
3762 1. handle cpu-implementation-defined system registers. */
3763 const aarch64_sys_reg aarch64_sys_regs
[] =
3765 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3766 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3767 { "elr_el1", CPEN_(0,C0
,1), 0 },
3768 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3769 { "sp_el0", CPEN_(0,C1
,0), 0 },
3770 { "spsel", CPEN_(0,C2
,0), 0 },
3771 { "daif", CPEN_(3,C2
,1), 0 },
3772 { "currentel", CPEN_(0,C2
,2), F_REG_READ
}, /* RO */
3773 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3774 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3775 { "nzcv", CPEN_(3,C2
,0), 0 },
3776 { "ssbs", CPEN_(3,C2
,6), F_ARCHEXT
},
3777 { "fpcr", CPEN_(3,C4
,0), 0 },
3778 { "fpsr", CPEN_(3,C4
,1), 0 },
3779 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3780 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3781 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3782 { "elr_el2", CPEN_(4,C0
,1), 0 },
3783 { "sp_el1", CPEN_(4,C1
,0), 0 },
3784 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3785 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3786 { "spsr_und", CPEN_(4,C3
,2), 0 },
3787 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3788 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3789 { "elr_el3", CPEN_(6,C0
,1), 0 },
3790 { "sp_el2", CPEN_(6,C1
,0), 0 },
3791 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3792 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3793 { "midr_el1", CPENC(3,0,C0
,C0
,0), F_REG_READ
}, /* RO */
3794 { "ctr_el0", CPENC(3,3,C0
,C0
,1), F_REG_READ
}, /* RO */
3795 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), F_REG_READ
}, /* RO */
3796 { "revidr_el1", CPENC(3,0,C0
,C0
,6), F_REG_READ
}, /* RO */
3797 { "aidr_el1", CPENC(3,1,C0
,C0
,7), F_REG_READ
}, /* RO */
3798 { "dczid_el0", CPENC(3,3,C0
,C0
,7), F_REG_READ
}, /* RO */
3799 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), F_REG_READ
}, /* RO */
3800 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), F_REG_READ
}, /* RO */
3801 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), F_REG_READ
}, /* RO */
3802 { "id_pfr2_el1", CPENC(3,0,C0
,C3
,4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3803 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), F_REG_READ
}, /* RO */
3804 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), F_REG_READ
}, /* RO */
3805 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), F_REG_READ
}, /* RO */
3806 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), F_REG_READ
}, /* RO */
3807 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), F_REG_READ
}, /* RO */
3808 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), F_REG_READ
}, /* RO */
3809 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), F_REG_READ
}, /* RO */
3810 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), F_REG_READ
}, /* RO */
3811 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), F_REG_READ
}, /* RO */
3812 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), F_REG_READ
}, /* RO */
3813 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), F_REG_READ
}, /* RO */
3814 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), F_REG_READ
}, /* RO */
3815 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), F_REG_READ
}, /* RO */
3816 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), F_REG_READ
}, /* RO */
3817 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), F_REG_READ
}, /* RO */
3818 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), F_REG_READ
}, /* RO */
3819 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), F_REG_READ
}, /* RO */
3820 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), F_REG_READ
}, /* RO */
3821 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), F_REG_READ
}, /* RO */
3822 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), F_REG_READ
}, /* RO */
3823 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), F_REG_READ
}, /* RO */
3824 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), F_REG_READ
}, /* RO */
3825 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), F_REG_READ
}, /* RO */
3826 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), F_REG_READ
}, /* RO */
3827 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
| F_REG_READ
}, /* RO */
3828 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), F_REG_READ
}, /* RO */
3829 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), F_REG_READ
}, /* RO */
3830 { "id_aa64zfr0_el1", CPENC (3, 0, C0
, C4
, 4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3831 { "clidr_el1", CPENC(3,1,C0
,C0
,1), F_REG_READ
}, /* RO */
3832 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 },
3833 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3834 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3835 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3836 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3837 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3838 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3839 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3840 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3841 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3842 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3843 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3844 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3845 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3846 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3847 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3848 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3849 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3850 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3851 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3852 { "zcr_el1", CPENC (3, 0, C1
, C2
, 0), F_ARCHEXT
},
3853 { "zcr_el12", CPENC (3, 5, C1
, C2
, 0), F_ARCHEXT
},
3854 { "zcr_el2", CPENC (3, 4, C1
, C2
, 0), F_ARCHEXT
},
3855 { "zcr_el3", CPENC (3, 6, C1
, C2
, 0), F_ARCHEXT
},
3856 { "zidr_el1", CPENC (3, 0, C0
, C0
, 7), F_ARCHEXT
},
3857 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3858 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3859 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3860 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3861 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3862 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3863 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3864 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3865 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3866 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3867 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3868 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3869 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3870 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3871 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3872 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3873 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3874 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3875 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3876 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3877 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3878 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3879 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3880 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3881 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3882 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3883 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3884 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3885 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3886 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3887 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3888 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3889 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3890 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3891 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3892 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
},
3893 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3894 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3895 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3896 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3897 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3898 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3899 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3900 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3901 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3902 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3903 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3904 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3905 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3906 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3907 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3908 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3909 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3910 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3911 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3912 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3913 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3914 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3915 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3916 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3917 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3918 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3919 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3920 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), F_REG_READ
}, /* RO */
3921 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), F_REG_READ
}, /* RO */
3922 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), F_REG_READ
}, /* RO */
3923 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3924 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3925 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3926 { "isr_el1", CPENC(3,0,C12
,C1
,0), F_REG_READ
}, /* RO */
3927 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3928 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3929 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3930 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3931 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3932 { "rndr", CPENC(3,3,C2
,C4
,0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3933 { "rndrrs", CPENC(3,3,C2
,C4
,1), F_ARCHEXT
| F_REG_READ
}, /* RO */
3934 { "tco", CPENC(3,3,C4
,C2
,7), F_ARCHEXT
},
3935 { "tfsre0_el1", CPENC(3,0,C6
,C6
,1), F_ARCHEXT
},
3936 { "tfsr_el1", CPENC(3,0,C6
,C5
,0), F_ARCHEXT
},
3937 { "tfsr_el2", CPENC(3,4,C6
,C5
,0), F_ARCHEXT
},
3938 { "tfsr_el3", CPENC(3,6,C6
,C6
,0), F_ARCHEXT
},
3939 { "tfsr_el12", CPENC(3,5,C6
,C6
,0), F_ARCHEXT
},
3940 { "rgsr_el1", CPENC(3,0,C1
,C0
,5), F_ARCHEXT
},
3941 { "gcr_el1", CPENC(3,0,C1
,C0
,6), F_ARCHEXT
},
3942 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3943 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RW */
3944 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3945 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3946 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3947 { "scxtnum_el0", CPENC(3,3,C13
,C0
,7), F_ARCHEXT
},
3948 { "scxtnum_el1", CPENC(3,0,C13
,C0
,7), F_ARCHEXT
},
3949 { "scxtnum_el2", CPENC(3,4,C13
,C0
,7), F_ARCHEXT
},
3950 { "scxtnum_el12", CPENC(3,5,C13
,C0
,7), F_ARCHEXT
},
3951 { "scxtnum_el3", CPENC(3,6,C13
,C0
,7), F_ARCHEXT
},
3952 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3953 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RW */
3954 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), F_REG_READ
}, /* RO */
3955 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), F_REG_READ
}, /* RO */
3956 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3957 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3958 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3959 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3960 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3961 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3962 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3963 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3964 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3965 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3966 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3967 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3968 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3969 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3970 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3971 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3972 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3973 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3974 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3975 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3976 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3977 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3978 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3979 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3980 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3981 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3982 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3983 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3984 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3985 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3986 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), F_REG_READ
}, /* r */
3987 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3988 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3989 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_READ
}, /* r */
3990 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_WRITE
}, /* w */
3991 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 },
3992 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 },
3993 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3994 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3995 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3996 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3997 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3998 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3999 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
4000 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
4001 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
4002 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
4003 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
4004 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
4005 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
4006 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
4007 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
4008 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
4009 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
4010 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
4011 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
4012 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
4013 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
4014 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
4015 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
4016 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
4017 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
4018 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
4019 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
4020 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
4021 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
4022 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
4023 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
4024 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
4025 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
4026 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
4027 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
4028 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
4029 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
4030 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
4031 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
4032 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
4033 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
4034 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
4035 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
4036 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
4037 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
4038 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
4039 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
4040 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
4041 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
4042 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
4043 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
4044 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
4045 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
4046 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
4047 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
4048 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
4049 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
4050 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
4051 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
4052 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
4053 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
4054 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
4055 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
4056 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
4057 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
4058 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
4059 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), F_REG_READ
}, /* r */
4060 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), F_REG_WRITE
}, /* w */
4061 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), F_REG_READ
}, /* r */
4062 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
4063 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
4064 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
4065 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
4066 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), F_REG_READ
}, /* r */
4067 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
4068 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
4069 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
4070 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
| F_REG_READ
}, /* ro */
4071 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4072 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
4073 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
4074 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
4075 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
4076 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
4077 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* rw */
4078 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4079 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4080 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
4081 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
4082 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
4083 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
4084 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), F_REG_WRITE
}, /* w */
4085 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
4086 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), F_REG_READ
}, /* r */
4087 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), F_REG_READ
}, /* r */
4088 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
4089 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
4090 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
4091 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
4092 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
4093 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
4094 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
4095 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
4096 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
4097 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
4098 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
4099 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
4100 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
4101 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
4102 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
4103 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
4104 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
4105 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
4106 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
4107 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
4108 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
4109 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
4110 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
4111 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
4112 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
4113 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
4114 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
4115 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
4116 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
4117 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
4118 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
4119 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
4120 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
4121 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
4122 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
4123 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
4124 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
4125 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
4126 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
4127 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
4128 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
4129 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
4130 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
4131 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
4132 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
4133 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
4134 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
4135 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
4136 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
4137 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
4138 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
4139 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
4140 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
4141 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
4142 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
4143 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
4144 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
4145 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
4146 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
4147 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
4148 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
4149 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
4150 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
4151 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
4152 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
4153 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
4154 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
4155 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
4156 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
4157 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
4159 { "dit", CPEN_ (3, C2
, 5), F_ARCHEXT
},
4160 { "vstcr_el2", CPENC(3, 4, C2
, C6
, 2), F_ARCHEXT
},
4161 { "vsttbr_el2", CPENC(3, 4, C2
, C6
, 0), F_ARCHEXT
},
4162 { "cnthvs_tval_el2", CPENC(3, 4, C14
, C4
, 0), F_ARCHEXT
},
4163 { "cnthvs_cval_el2", CPENC(3, 4, C14
, C4
, 2), F_ARCHEXT
},
4164 { "cnthvs_ctl_el2", CPENC(3, 4, C14
, C4
, 1), F_ARCHEXT
},
4165 { "cnthps_tval_el2", CPENC(3, 4, C14
, C5
, 0), F_ARCHEXT
},
4166 { "cnthps_cval_el2", CPENC(3, 4, C14
, C5
, 2), F_ARCHEXT
},
4167 { "cnthps_ctl_el2", CPENC(3, 4, C14
, C5
, 1), F_ARCHEXT
},
4168 { "sder32_el2", CPENC(3, 4, C1
, C3
, 1), F_ARCHEXT
},
4169 { "vncr_el2", CPENC(3, 4, C2
, C2
, 0), F_ARCHEXT
},
4170 { 0, CPENC(0,0,0,0,0), 0 },
4174 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
4176 return (reg
->flags
& F_DEPRECATED
) != 0;
4180 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
4181 const aarch64_sys_reg
*reg
)
4183 if (!(reg
->flags
& F_ARCHEXT
))
4186 /* PAN. Values are from aarch64_sys_regs. */
4187 if (reg
->value
== CPEN_(0,C2
,3)
4188 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4191 /* SCXTNUM_ELx registers. */
4192 if ((reg
->value
== CPENC (3, 3, C13
, C0
, 7)
4193 || reg
->value
== CPENC (3, 0, C13
, C0
, 7)
4194 || reg
->value
== CPENC (3, 4, C13
, C0
, 7)
4195 || reg
->value
== CPENC (3, 6, C13
, C0
, 7)
4196 || reg
->value
== CPENC (3, 5, C13
, C0
, 7))
4197 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SCXTNUM
))
4200 /* ID_PFR2_EL1 register. */
4201 if (reg
->value
== CPENC(3, 0, C0
, C3
, 4)
4202 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_ID_PFR2
))
4205 /* SSBS. Values are from aarch64_sys_regs. */
4206 if (reg
->value
== CPEN_(3,C2
,6)
4207 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4210 /* Virtualization host extensions: system registers. */
4211 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4212 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4213 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4214 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4215 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4216 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4219 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4220 if ((reg
->value
== CPEN_ (5, C0
, 0)
4221 || reg
->value
== CPEN_ (5, C0
, 1)
4222 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4223 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4224 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4225 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4226 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4227 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4228 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4229 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4230 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4231 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4232 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4233 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4234 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4235 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4236 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4239 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4240 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4241 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4242 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4243 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4244 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4245 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4246 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4249 /* ARMv8.2 features. */
4251 /* ID_AA64MMFR2_EL1. */
4252 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4253 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4257 if (reg
->value
== CPEN_ (0, C2
, 4)
4258 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4261 /* RAS extension. */
4263 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4264 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4265 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4266 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4267 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4268 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4269 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4270 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4271 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4272 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4273 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4274 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4275 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4278 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4279 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4280 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4281 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4282 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4285 /* Statistical Profiling extension. */
4286 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4287 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4288 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4289 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4290 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4291 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4292 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4293 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4294 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4295 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4296 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4297 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4298 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4299 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4302 /* ARMv8.3 Pointer authentication keys. */
4303 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4304 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4305 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4306 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4307 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4308 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4309 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4310 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4311 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4312 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4313 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4317 if ((reg
->value
== CPENC (3, 0, C0
, C4
, 4)
4318 || reg
->value
== CPENC (3, 0, C1
, C2
, 0)
4319 || reg
->value
== CPENC (3, 4, C1
, C2
, 0)
4320 || reg
->value
== CPENC (3, 6, C1
, C2
, 0)
4321 || reg
->value
== CPENC (3, 5, C1
, C2
, 0)
4322 || reg
->value
== CPENC (3, 0, C0
, C0
, 7))
4323 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SVE
))
4326 /* ARMv8.4 features. */
4329 if (reg
->value
== CPEN_ (3, C2
, 5)
4330 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4333 /* Virtualization extensions. */
4334 if ((reg
->value
== CPENC(3, 4, C2
, C6
, 2)
4335 || reg
->value
== CPENC(3, 4, C2
, C6
, 0)
4336 || reg
->value
== CPENC(3, 4, C14
, C4
, 0)
4337 || reg
->value
== CPENC(3, 4, C14
, C4
, 2)
4338 || reg
->value
== CPENC(3, 4, C14
, C4
, 1)
4339 || reg
->value
== CPENC(3, 4, C14
, C5
, 0)
4340 || reg
->value
== CPENC(3, 4, C14
, C5
, 2)
4341 || reg
->value
== CPENC(3, 4, C14
, C5
, 1)
4342 || reg
->value
== CPENC(3, 4, C1
, C3
, 1)
4343 || reg
->value
== CPENC(3, 4, C2
, C2
, 0))
4344 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4347 /* ARMv8.4 TLB instructions. */
4348 if ((reg
->value
== CPENS (0, C8
, C1
, 0)
4349 || reg
->value
== CPENS (0, C8
, C1
, 1)
4350 || reg
->value
== CPENS (0, C8
, C1
, 2)
4351 || reg
->value
== CPENS (0, C8
, C1
, 3)
4352 || reg
->value
== CPENS (0, C8
, C1
, 5)
4353 || reg
->value
== CPENS (0, C8
, C1
, 7)
4354 || reg
->value
== CPENS (4, C8
, C4
, 0)
4355 || reg
->value
== CPENS (4, C8
, C4
, 4)
4356 || reg
->value
== CPENS (4, C8
, C1
, 1)
4357 || reg
->value
== CPENS (4, C8
, C1
, 5)
4358 || reg
->value
== CPENS (4, C8
, C1
, 6)
4359 || reg
->value
== CPENS (6, C8
, C1
, 1)
4360 || reg
->value
== CPENS (6, C8
, C1
, 5)
4361 || reg
->value
== CPENS (4, C8
, C1
, 0)
4362 || reg
->value
== CPENS (4, C8
, C1
, 4)
4363 || reg
->value
== CPENS (6, C8
, C1
, 0)
4364 || reg
->value
== CPENS (0, C8
, C6
, 1)
4365 || reg
->value
== CPENS (0, C8
, C6
, 3)
4366 || reg
->value
== CPENS (0, C8
, C6
, 5)
4367 || reg
->value
== CPENS (0, C8
, C6
, 7)
4368 || reg
->value
== CPENS (0, C8
, C2
, 1)
4369 || reg
->value
== CPENS (0, C8
, C2
, 3)
4370 || reg
->value
== CPENS (0, C8
, C2
, 5)
4371 || reg
->value
== CPENS (0, C8
, C2
, 7)
4372 || reg
->value
== CPENS (0, C8
, C5
, 1)
4373 || reg
->value
== CPENS (0, C8
, C5
, 3)
4374 || reg
->value
== CPENS (0, C8
, C5
, 5)
4375 || reg
->value
== CPENS (0, C8
, C5
, 7)
4376 || reg
->value
== CPENS (4, C8
, C0
, 2)
4377 || reg
->value
== CPENS (4, C8
, C0
, 6)
4378 || reg
->value
== CPENS (4, C8
, C4
, 2)
4379 || reg
->value
== CPENS (4, C8
, C4
, 6)
4380 || reg
->value
== CPENS (4, C8
, C4
, 3)
4381 || reg
->value
== CPENS (4, C8
, C4
, 7)
4382 || reg
->value
== CPENS (4, C8
, C6
, 1)
4383 || reg
->value
== CPENS (4, C8
, C6
, 5)
4384 || reg
->value
== CPENS (4, C8
, C2
, 1)
4385 || reg
->value
== CPENS (4, C8
, C2
, 5)
4386 || reg
->value
== CPENS (4, C8
, C5
, 1)
4387 || reg
->value
== CPENS (4, C8
, C5
, 5)
4388 || reg
->value
== CPENS (6, C8
, C6
, 1)
4389 || reg
->value
== CPENS (6, C8
, C6
, 5)
4390 || reg
->value
== CPENS (6, C8
, C2
, 1)
4391 || reg
->value
== CPENS (6, C8
, C2
, 5)
4392 || reg
->value
== CPENS (6, C8
, C5
, 1)
4393 || reg
->value
== CPENS (6, C8
, C5
, 5))
4394 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4397 /* Random Number Instructions. For now they are available
4398 (and optional) only with ARMv8.5-A. */
4399 if ((reg
->value
== CPENC (3, 3, C2
, C4
, 0)
4400 || reg
->value
== CPENC (3, 3, C2
, C4
, 1))
4401 && !(AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RNG
)
4402 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_5
)))
4405 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4406 if ((reg
->value
== CPENC (3, 3, C4
, C2
, 7)
4407 || reg
->value
== CPENC (3, 0, C6
, C6
, 1)
4408 || reg
->value
== CPENC (3, 0, C6
, C5
, 0)
4409 || reg
->value
== CPENC (3, 4, C6
, C5
, 0)
4410 || reg
->value
== CPENC (3, 6, C6
, C6
, 0)
4411 || reg
->value
== CPENC (3, 5, C6
, C6
, 0)
4412 || reg
->value
== CPENC (3, 0, C1
, C0
, 5)
4413 || reg
->value
== CPENC (3, 0, C1
, C0
, 6))
4414 && !(AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
)))
4420 /* The CPENC below is fairly misleading, the fields
4421 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4422 by ins_pstatefield, which just shifts the value by the width of the fields
4423 in a loop. So if you CPENC them only the first value will be set, the rest
4424 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4425 value of 0b110000000001000000 (0x30040) while what you want is
4427 const aarch64_sys_reg aarch64_pstatefields
[] =
4429 { "spsel", 0x05, 0 },
4430 { "daifset", 0x1e, 0 },
4431 { "daifclr", 0x1f, 0 },
4432 { "pan", 0x04, F_ARCHEXT
},
4433 { "uao", 0x03, F_ARCHEXT
},
4434 { "ssbs", 0x19, F_ARCHEXT
},
4435 { "dit", 0x1a, F_ARCHEXT
},
4436 { "tco", 0x1c, F_ARCHEXT
},
4437 { 0, CPENC(0,0,0,0,0), 0 },
4441 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4442 const aarch64_sys_reg
*reg
)
4444 if (!(reg
->flags
& F_ARCHEXT
))
4447 /* PAN. Values are from aarch64_pstatefields. */
4448 if (reg
->value
== 0x04
4449 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4452 /* UAO. Values are from aarch64_pstatefields. */
4453 if (reg
->value
== 0x03
4454 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4457 /* SSBS. Values are from aarch64_pstatefields. */
4458 if (reg
->value
== 0x19
4459 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4462 /* DIT. Values are from aarch64_pstatefields. */
4463 if (reg
->value
== 0x1a
4464 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4467 /* TCO. Values are from aarch64_pstatefields. */
4468 if (reg
->value
== 0x1c
4469 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
))
4475 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4477 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4478 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4479 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4480 { 0, CPENS(0,0,0,0), 0 }
4483 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4485 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4486 { "gva", CPENS (3, C7
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4487 { "gzva", CPENS (3, C7
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4488 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4489 { "igvac", CPENS (0, C7
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4490 { "igsw", CPENS (0, C7
, C6
, 4), F_HASXT
| F_ARCHEXT
},
4491 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4492 { "igdvac", CPENS (0, C7
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4493 { "igdsw", CPENS (0, C7
, C6
, 6), F_HASXT
| F_ARCHEXT
},
4494 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4495 { "cgvac", CPENS (3, C7
, C10
, 3), F_HASXT
| F_ARCHEXT
},
4496 { "cgdvac", CPENS (3, C7
, C10
, 5), F_HASXT
| F_ARCHEXT
},
4497 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4498 { "cgsw", CPENS (0, C7
, C10
, 4), F_HASXT
| F_ARCHEXT
},
4499 { "cgdsw", CPENS (0, C7
, C10
, 6), F_HASXT
| F_ARCHEXT
},
4500 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4501 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4502 { "cgvap", CPENS (3, C7
, C12
, 3), F_HASXT
| F_ARCHEXT
},
4503 { "cgdvap", CPENS (3, C7
, C12
, 5), F_HASXT
| F_ARCHEXT
},
4504 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
},
4505 { "cgvadp", CPENS (3, C7
, C13
, 3), F_HASXT
| F_ARCHEXT
},
4506 { "cgdvadp", CPENS (3, C7
, C13
, 5), F_HASXT
| F_ARCHEXT
},
4507 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4508 { "cigvac", CPENS (3, C7
, C14
, 3), F_HASXT
| F_ARCHEXT
},
4509 { "cigdvac", CPENS (3, C7
, C14
, 5), F_HASXT
| F_ARCHEXT
},
4510 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4511 { "cigsw", CPENS (0, C7
, C14
, 4), F_HASXT
| F_ARCHEXT
},
4512 { "cigdsw", CPENS (0, C7
, C14
, 6), F_HASXT
| F_ARCHEXT
},
4513 { 0, CPENS(0,0,0,0), 0 }
4516 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4518 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4519 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4520 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4521 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4522 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4523 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4524 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4525 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4526 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4527 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4528 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4529 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4530 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4531 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4532 { 0, CPENS(0,0,0,0), 0 }
4535 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4537 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4538 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4539 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4540 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4541 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4542 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4543 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4544 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4545 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4546 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4547 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4548 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4549 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4550 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4551 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4552 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4553 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4554 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4555 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4556 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4557 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4558 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4559 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4560 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4561 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4562 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4563 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4564 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4565 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4566 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4567 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4568 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4570 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4571 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4572 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4573 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4574 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4575 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4576 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4577 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4578 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4579 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4580 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4581 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4582 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4583 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4584 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4585 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4587 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4588 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4589 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4590 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4591 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4592 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4593 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4594 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4595 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4596 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4597 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4598 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4599 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4600 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4601 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4602 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4603 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4604 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4605 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4606 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4607 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4608 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4609 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4610 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4611 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4612 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4613 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4614 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4615 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4616 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4618 { 0, CPENS(0,0,0,0), 0 }
4621 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
4623 /* RCTX is somewhat unique in a way that it has different values
4624 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4625 Thus op2 is masked out and instead encoded directly in the
4626 aarch64_opcode_table entries for the respective instructions. */
4627 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
}, /* WO */
4629 { 0, CPENS(0,0,0,0), 0 }
4633 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4635 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4639 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4640 const aarch64_sys_ins_reg
*reg
)
4642 if (!(reg
->flags
& F_ARCHEXT
))
4645 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4646 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4647 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4650 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4651 if (reg
->value
== CPENS (3, C7
, C13
, 1)
4652 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_CVADP
))
4655 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4656 if ((reg
->value
== CPENS (0, C7
, C6
, 3)
4657 || reg
->value
== CPENS (0, C7
, C6
, 4)
4658 || reg
->value
== CPENS (0, C7
, C10
, 4)
4659 || reg
->value
== CPENS (0, C7
, C14
, 4)
4660 || reg
->value
== CPENS (3, C7
, C10
, 3)
4661 || reg
->value
== CPENS (3, C7
, C12
, 3)
4662 || reg
->value
== CPENS (3, C7
, C13
, 3)
4663 || reg
->value
== CPENS (3, C7
, C14
, 3)
4664 || reg
->value
== CPENS (3, C7
, C4
, 3)
4665 || reg
->value
== CPENS (0, C7
, C6
, 5)
4666 || reg
->value
== CPENS (0, C7
, C6
, 6)
4667 || reg
->value
== CPENS (0, C7
, C10
, 6)
4668 || reg
->value
== CPENS (0, C7
, C14
, 6)
4669 || reg
->value
== CPENS (3, C7
, C10
, 5)
4670 || reg
->value
== CPENS (3, C7
, C12
, 5)
4671 || reg
->value
== CPENS (3, C7
, C13
, 5)
4672 || reg
->value
== CPENS (3, C7
, C14
, 5)
4673 || reg
->value
== CPENS (3, C7
, C4
, 4))
4674 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
))
4677 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4678 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4679 || reg
->value
== CPENS (0, C7
, C9
, 1))
4680 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4683 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4684 if (reg
->value
== CPENS (3, C7
, C3
, 0)
4685 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PREDRES
))
4708 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4709 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4711 static enum err_type
4712 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
4713 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
4714 bfd_boolean encoding ATTRIBUTE_UNUSED
,
4715 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
4716 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
4718 int t
= BITS (insn
, 4, 0);
4719 int n
= BITS (insn
, 9, 5);
4720 int t2
= BITS (insn
, 14, 10);
4724 /* Write back enabled. */
4725 if ((t
== n
|| t2
== n
) && n
!= 31)
4739 /* Verifier for vector by element 3 operands functions where the
4740 conditions `if sz:L == 11 then UNDEFINED` holds. */
4742 static enum err_type
4743 verify_elem_sd (const struct aarch64_inst
*inst
, const aarch64_insn insn
,
4744 bfd_vma pc ATTRIBUTE_UNUSED
, bfd_boolean encoding
,
4745 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
4746 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
4748 const aarch64_insn undef_pattern
= 0x3;
4751 assert (inst
->opcode
);
4752 assert (inst
->opcode
->operands
[2] == AARCH64_OPND_Em
);
4753 value
= encoding
? inst
->value
: insn
;
4756 if (undef_pattern
== extract_fields (value
, 0, 2, FLD_sz
, FLD_L
))
4762 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4763 If INST is NULL the given insn_sequence is cleared and the sequence is left
4767 init_insn_sequence (const struct aarch64_inst
*inst
,
4768 aarch64_instr_sequence
*insn_sequence
)
4770 int num_req_entries
= 0;
4771 insn_sequence
->next_insn
= 0;
4772 insn_sequence
->num_insns
= num_req_entries
;
4773 if (insn_sequence
->instr
)
4774 XDELETE (insn_sequence
->instr
);
4775 insn_sequence
->instr
= NULL
;
4779 insn_sequence
->instr
= XNEW (aarch64_inst
);
4780 memcpy (insn_sequence
->instr
, inst
, sizeof (aarch64_inst
));
4783 /* Handle all the cases here. May need to think of something smarter than
4784 a giant if/else chain if this grows. At that time, a lookup table may be
4786 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
4787 num_req_entries
= 1;
4789 if (insn_sequence
->current_insns
)
4790 XDELETEVEC (insn_sequence
->current_insns
);
4791 insn_sequence
->current_insns
= NULL
;
4793 if (num_req_entries
!= 0)
4795 size_t size
= num_req_entries
* sizeof (aarch64_inst
);
4796 insn_sequence
->current_insns
4797 = (aarch64_inst
**) XNEWVEC (aarch64_inst
, num_req_entries
);
4798 memset (insn_sequence
->current_insns
, 0, size
);
4803 /* This function verifies that the instruction INST adheres to its specified
4804 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4805 returned and MISMATCH_DETAIL contains the reason why verification failed.
4807 The function is called both during assembly and disassembly. If assembling
4808 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4809 and will contain the PC of the current instruction w.r.t to the section.
4811 If ENCODING and PC=0 then you are at a start of a section. The constraints
4812 are verified against the given state insn_sequence which is updated as it
4813 transitions through the verification. */
4816 verify_constraints (const struct aarch64_inst
*inst
,
4817 const aarch64_insn insn ATTRIBUTE_UNUSED
,
4819 bfd_boolean encoding
,
4820 aarch64_operand_error
*mismatch_detail
,
4821 aarch64_instr_sequence
*insn_sequence
)
4824 assert (inst
->opcode
);
4826 const struct aarch64_opcode
*opcode
= inst
->opcode
;
4827 if (!opcode
->constraints
&& !insn_sequence
->instr
)
4830 assert (insn_sequence
);
4832 enum err_type res
= ERR_OK
;
4834 /* This instruction puts a constraint on the insn_sequence. */
4835 if (opcode
->flags
& F_SCAN
)
4837 if (insn_sequence
->instr
)
4839 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4840 mismatch_detail
->error
= _("instruction opens new dependency "
4841 "sequence without ending previous one");
4842 mismatch_detail
->index
= -1;
4843 mismatch_detail
->non_fatal
= TRUE
;
4847 init_insn_sequence (inst
, insn_sequence
);
4851 /* Verify constraints on an existing sequence. */
4852 if (insn_sequence
->instr
)
4854 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
4855 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4856 closed a previous one that we should have. */
4857 if (!encoding
&& pc
== 0)
4859 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4860 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
4861 mismatch_detail
->index
= -1;
4862 mismatch_detail
->non_fatal
= TRUE
;
4864 /* Reset the sequence. */
4865 init_insn_sequence (NULL
, insn_sequence
);
4869 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4870 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
4872 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4873 instruction for better error messages. */
4874 if (!opcode
->avariant
4875 || !(*opcode
->avariant
&
4876 (AARCH64_FEATURE_SVE
| AARCH64_FEATURE_SVE2
)))
4878 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4879 mismatch_detail
->error
= _("SVE instruction expected after "
4881 mismatch_detail
->index
= -1;
4882 mismatch_detail
->non_fatal
= TRUE
;
4887 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4888 instruction that is allowed to be used with a MOVPRFX. */
4889 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
4891 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4892 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
4894 mismatch_detail
->index
= -1;
4895 mismatch_detail
->non_fatal
= TRUE
;
4900 /* Next check for usage of the predicate register. */
4901 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
4902 aarch64_opnd_info blk_pred
, inst_pred
;
4903 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
4904 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
4905 bfd_boolean predicated
= FALSE
;
4906 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
4908 /* Determine if the movprfx instruction used is predicated or not. */
4909 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
4912 blk_pred
= insn_sequence
->instr
->operands
[1];
4915 unsigned char max_elem_size
= 0;
4916 unsigned char current_elem_size
;
4917 int num_op_used
= 0, last_op_usage
= 0;
4918 int i
, inst_pred_idx
= -1;
4919 int num_ops
= aarch64_num_of_operands (opcode
);
4920 for (i
= 0; i
< num_ops
; i
++)
4922 aarch64_opnd_info inst_op
= inst
->operands
[i
];
4923 switch (inst_op
.type
)
4925 case AARCH64_OPND_SVE_Zd
:
4926 case AARCH64_OPND_SVE_Zm_5
:
4927 case AARCH64_OPND_SVE_Zm_16
:
4928 case AARCH64_OPND_SVE_Zn
:
4929 case AARCH64_OPND_SVE_Zt
:
4930 case AARCH64_OPND_SVE_Vm
:
4931 case AARCH64_OPND_SVE_Vn
:
4932 case AARCH64_OPND_Va
:
4933 case AARCH64_OPND_Vn
:
4934 case AARCH64_OPND_Vm
:
4935 case AARCH64_OPND_Sn
:
4936 case AARCH64_OPND_Sm
:
4937 case AARCH64_OPND_Rn
:
4938 case AARCH64_OPND_Rm
:
4939 case AARCH64_OPND_Rn_SP
:
4940 case AARCH64_OPND_Rt_SP
:
4941 case AARCH64_OPND_Rm_SP
:
4942 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
4948 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
4949 if (current_elem_size
> max_elem_size
)
4950 max_elem_size
= current_elem_size
;
4952 case AARCH64_OPND_SVE_Pd
:
4953 case AARCH64_OPND_SVE_Pg3
:
4954 case AARCH64_OPND_SVE_Pg4_5
:
4955 case AARCH64_OPND_SVE_Pg4_10
:
4956 case AARCH64_OPND_SVE_Pg4_16
:
4957 case AARCH64_OPND_SVE_Pm
:
4958 case AARCH64_OPND_SVE_Pn
:
4959 case AARCH64_OPND_SVE_Pt
:
4960 inst_pred
= inst_op
;
4968 assert (max_elem_size
!= 0);
4969 aarch64_opnd_info inst_dest
= inst
->operands
[0];
4970 /* Determine the size that should be used to compare against the
4973 = opcode
->constraints
& C_MAX_ELEM
4975 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
4977 /* If movprfx is predicated do some extra checks. */
4980 /* The instruction must be predicated. */
4981 if (inst_pred_idx
< 0)
4983 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4984 mismatch_detail
->error
= _("predicated instruction expected "
4986 mismatch_detail
->index
= -1;
4987 mismatch_detail
->non_fatal
= TRUE
;
4992 /* The instruction must have a merging predicate. */
4993 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
4995 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4996 mismatch_detail
->error
= _("merging predicate expected due "
4997 "to preceding `movprfx'");
4998 mismatch_detail
->index
= inst_pred_idx
;
4999 mismatch_detail
->non_fatal
= TRUE
;
5004 /* The same register must be used in instruction. */
5005 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
5007 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5008 mismatch_detail
->error
= _("predicate register differs "
5009 "from that in preceding "
5011 mismatch_detail
->index
= inst_pred_idx
;
5012 mismatch_detail
->non_fatal
= TRUE
;
5018 /* Destructive operations by definition must allow one usage of the
5021 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
5023 /* Operand is not used at all. */
5024 if (num_op_used
== 0)
5026 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5027 mismatch_detail
->error
= _("output register of preceding "
5028 "`movprfx' not used in current "
5030 mismatch_detail
->index
= 0;
5031 mismatch_detail
->non_fatal
= TRUE
;
5036 /* We now know it's used, now determine exactly where it's used. */
5037 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
5039 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5040 mismatch_detail
->error
= _("output register of preceding "
5041 "`movprfx' expected as output");
5042 mismatch_detail
->index
= 0;
5043 mismatch_detail
->non_fatal
= TRUE
;
5048 /* Operand used more than allowed for the specific opcode type. */
5049 if (num_op_used
> allowed_usage
)
5051 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5052 mismatch_detail
->error
= _("output register of preceding "
5053 "`movprfx' used as input");
5054 mismatch_detail
->index
= last_op_usage
;
5055 mismatch_detail
->non_fatal
= TRUE
;
5060 /* Now the only thing left is the qualifiers checks. The register
5061 must have the same maximum element size. */
5062 if (inst_dest
.qualifier
5063 && blk_dest
.qualifier
5064 && current_elem_size
5065 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
5067 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5068 mismatch_detail
->error
= _("register size not compatible with "
5069 "previous `movprfx'");
5070 mismatch_detail
->index
= 0;
5071 mismatch_detail
->non_fatal
= TRUE
;
5078 /* Add the new instruction to the sequence. */
5079 memcpy (insn_sequence
->current_insns
+ insn_sequence
->next_insn
++,
5080 inst
, sizeof (aarch64_inst
));
5082 /* Check if sequence is now full. */
5083 if (insn_sequence
->next_insn
>= insn_sequence
->num_insns
)
5085 /* Sequence is full, but we don't have anything special to do for now,
5086 so clear and reset it. */
5087 init_insn_sequence (NULL
, insn_sequence
);
5095 /* Return true if VALUE cannot be moved into an SVE register using DUP
5096 (with any element size, not just ESIZE) and if using DUPM would
5097 therefore be OK. ESIZE is the number of bytes in the immediate. */
5100 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
5102 int64_t svalue
= uvalue
;
5103 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
5105 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
5107 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
5109 svalue
= (int32_t) uvalue
;
5110 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
5112 svalue
= (int16_t) uvalue
;
5113 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
5117 if ((svalue
& 0xff) == 0)
5119 return svalue
< -128 || svalue
>= 128;
5122 /* Include the opcode description table as well as the operand description
5124 #define VERIFIER(x) verify_##x
5125 #include "aarch64-tbl.h"