1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
25 #include "bfd_stdint.h"
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
313 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
314 { 16, 4 }, /* SVE_tsz: triangular size select. */
315 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
316 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
317 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
318 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
319 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
320 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
321 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
322 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
323 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 { 22, 1 }, /* sz: 1-bit element size select. */
327 enum aarch64_operand_class
328 aarch64_get_operand_class (enum aarch64_opnd type
)
330 return aarch64_operands
[type
].op_class
;
334 aarch64_get_operand_name (enum aarch64_opnd type
)
336 return aarch64_operands
[type
].name
;
339 /* Get operand description string.
340 This is usually for the diagnosis purpose. */
342 aarch64_get_operand_desc (enum aarch64_opnd type
)
344 return aarch64_operands
[type
].desc
;
347 /* Table of all conditional affixes. */
348 const aarch64_cond aarch64_conds
[16] =
350 {{"eq", "none"}, 0x0},
351 {{"ne", "any"}, 0x1},
352 {{"cs", "hs", "nlast"}, 0x2},
353 {{"cc", "lo", "ul", "last"}, 0x3},
354 {{"mi", "first"}, 0x4},
355 {{"pl", "nfrst"}, 0x5},
358 {{"hi", "pmore"}, 0x8},
359 {{"ls", "plast"}, 0x9},
360 {{"ge", "tcont"}, 0xa},
361 {{"lt", "tstop"}, 0xb},
369 get_cond_from_value (aarch64_insn value
)
372 return &aarch64_conds
[(unsigned int) value
];
376 get_inverted_cond (const aarch64_cond
*cond
)
378 return &aarch64_conds
[cond
->value
^ 0x1];
381 /* Table describing the operand extension/shifting operators; indexed by
382 enum aarch64_modifier_kind.
384 The value column provides the most common values for encoding modifiers,
385 which enables table-driven encoding/decoding for the modifiers. */
386 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
407 enum aarch64_modifier_kind
408 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
410 return desc
- aarch64_operand_modifiers
;
414 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
416 return aarch64_operand_modifiers
[kind
].value
;
419 enum aarch64_modifier_kind
420 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
421 bfd_boolean extend_p
)
423 if (extend_p
== TRUE
)
424 return AARCH64_MOD_UXTB
+ value
;
426 return AARCH64_MOD_LSL
- value
;
430 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
432 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
436 static inline bfd_boolean
437 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
439 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
443 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
463 /* Table describing the operands supported by the aliases of the HINT
466 The name column is the operand that is accepted for the alias. The value
467 column is the hint number of the alias. The list of operands is terminated
468 by NULL in the name column. */
470 const struct aarch64_name_value_pair aarch64_hint_options
[] =
472 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
473 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
474 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
475 { "c", HINT_OPD_C
}, /* BTI C. */
476 { "j", HINT_OPD_J
}, /* BTI J. */
477 { "jc", HINT_OPD_JC
}, /* BTI JC. */
478 { NULL
, HINT_OPD_NULL
},
481 /* op -> op: load = 0 instruction = 1 store = 2
483 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
484 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
485 const struct aarch64_name_value_pair aarch64_prfops
[32] =
487 { "pldl1keep", B(0, 1, 0) },
488 { "pldl1strm", B(0, 1, 1) },
489 { "pldl2keep", B(0, 2, 0) },
490 { "pldl2strm", B(0, 2, 1) },
491 { "pldl3keep", B(0, 3, 0) },
492 { "pldl3strm", B(0, 3, 1) },
495 { "plil1keep", B(1, 1, 0) },
496 { "plil1strm", B(1, 1, 1) },
497 { "plil2keep", B(1, 2, 0) },
498 { "plil2strm", B(1, 2, 1) },
499 { "plil3keep", B(1, 3, 0) },
500 { "plil3strm", B(1, 3, 1) },
503 { "pstl1keep", B(2, 1, 0) },
504 { "pstl1strm", B(2, 1, 1) },
505 { "pstl2keep", B(2, 2, 0) },
506 { "pstl2strm", B(2, 2, 1) },
507 { "pstl3keep", B(2, 3, 0) },
508 { "pstl3strm", B(2, 3, 1) },
522 /* Utilities on value constraint. */
525 value_in_range_p (int64_t value
, int low
, int high
)
527 return (value
>= low
&& value
<= high
) ? 1 : 0;
530 /* Return true if VALUE is a multiple of ALIGN. */
532 value_aligned_p (int64_t value
, int align
)
534 return (value
% align
) == 0;
537 /* A signed value fits in a field. */
539 value_fit_signed_field_p (int64_t value
, unsigned width
)
542 if (width
< sizeof (value
) * 8)
544 int64_t lim
= (int64_t)1 << (width
- 1);
545 if (value
>= -lim
&& value
< lim
)
551 /* An unsigned value fits in a field. */
553 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
556 if (width
< sizeof (value
) * 8)
558 int64_t lim
= (int64_t)1 << width
;
559 if (value
>= 0 && value
< lim
)
565 /* Return 1 if OPERAND is SP or WSP. */
567 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
569 return ((aarch64_get_operand_class (operand
->type
)
570 == AARCH64_OPND_CLASS_INT_REG
)
571 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
572 && operand
->reg
.regno
== 31);
575 /* Return 1 if OPERAND is XZR or WZP. */
577 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
579 return ((aarch64_get_operand_class (operand
->type
)
580 == AARCH64_OPND_CLASS_INT_REG
)
581 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
582 && operand
->reg
.regno
== 31);
585 /* Return true if the operand *OPERAND that has the operand code
586 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
587 qualified by the qualifier TARGET. */
590 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
591 aarch64_opnd_qualifier_t target
)
593 switch (operand
->qualifier
)
595 case AARCH64_OPND_QLF_W
:
596 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
599 case AARCH64_OPND_QLF_X
:
600 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
603 case AARCH64_OPND_QLF_WSP
:
604 if (target
== AARCH64_OPND_QLF_W
605 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
608 case AARCH64_OPND_QLF_SP
:
609 if (target
== AARCH64_OPND_QLF_X
610 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
620 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
621 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
623 Return NIL if more than one expected qualifiers are found. */
625 aarch64_opnd_qualifier_t
626 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
628 const aarch64_opnd_qualifier_t known_qlf
,
635 When the known qualifier is NIL, we have to assume that there is only
636 one qualifier sequence in the *QSEQ_LIST and return the corresponding
637 qualifier directly. One scenario is that for instruction
638 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
639 which has only one possible valid qualifier sequence
641 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
642 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
644 Because the qualifier NIL has dual roles in the qualifier sequence:
645 it can mean no qualifier for the operand, or the qualifer sequence is
646 not in use (when all qualifiers in the sequence are NILs), we have to
647 handle this special case here. */
648 if (known_qlf
== AARCH64_OPND_NIL
)
650 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
651 return qseq_list
[0][idx
];
654 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
656 if (qseq_list
[i
][known_idx
] == known_qlf
)
659 /* More than one sequences are found to have KNOWN_QLF at
661 return AARCH64_OPND_NIL
;
666 return qseq_list
[saved_i
][idx
];
669 enum operand_qualifier_kind
677 /* Operand qualifier description. */
678 struct operand_qualifier_data
680 /* The usage of the three data fields depends on the qualifier kind. */
687 enum operand_qualifier_kind kind
;
690 /* Indexed by the operand qualifier enumerators. */
691 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
693 {0, 0, 0, "NIL", OQK_NIL
},
695 /* Operand variant qualifiers.
697 element size, number of elements and common value for encoding. */
699 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
700 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
701 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
702 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
704 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
705 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
706 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
707 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
708 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
709 {4, 1, 0x0, "4b", OQK_OPD_VARIANT
},
711 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
712 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
713 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
714 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
715 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
716 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
717 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
718 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
719 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
720 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
721 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
723 {0, 0, 0, "z", OQK_OPD_VARIANT
},
724 {0, 0, 0, "m", OQK_OPD_VARIANT
},
726 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
727 {16, 0, 0, "tag", OQK_OPD_VARIANT
},
729 /* Qualifiers constraining the value range.
731 Lower bound, higher bound, unused. */
733 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
734 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
735 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
736 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
737 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
738 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
739 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
741 /* Qualifiers for miscellaneous purpose.
743 unused, unused and unused. */
748 {0, 0, 0, "retrieving", 0},
751 static inline bfd_boolean
752 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
754 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
758 static inline bfd_boolean
759 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
761 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
766 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
768 return aarch64_opnd_qualifiers
[qualifier
].desc
;
771 /* Given an operand qualifier, return the expected data element size
772 of a qualified operand. */
774 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
776 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
777 return aarch64_opnd_qualifiers
[qualifier
].data0
;
781 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
783 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
784 return aarch64_opnd_qualifiers
[qualifier
].data1
;
788 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
790 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
791 return aarch64_opnd_qualifiers
[qualifier
].data2
;
795 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
797 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
798 return aarch64_opnd_qualifiers
[qualifier
].data0
;
802 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
804 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
805 return aarch64_opnd_qualifiers
[qualifier
].data1
;
810 aarch64_verbose (const char *str
, ...)
821 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
825 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
826 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
831 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
832 const aarch64_opnd_qualifier_t
*qualifier
)
835 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
837 aarch64_verbose ("dump_match_qualifiers:");
838 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
839 curr
[i
] = opnd
[i
].qualifier
;
840 dump_qualifier_sequence (curr
);
841 aarch64_verbose ("against");
842 dump_qualifier_sequence (qualifier
);
844 #endif /* DEBUG_AARCH64 */
846 /* This function checks if the given instruction INSN is a destructive
847 instruction based on the usage of the registers. It does not recognize
848 unary destructive instructions. */
850 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
853 const enum aarch64_opnd
*opnds
= opcode
->operands
;
855 if (opnds
[0] == AARCH64_OPND_NIL
)
858 while (opnds
[++i
] != AARCH64_OPND_NIL
)
859 if (opnds
[i
] == opnds
[0])
865 /* TODO improve this, we can have an extra field at the runtime to
866 store the number of operands rather than calculating it every time. */
869 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
872 const enum aarch64_opnd
*opnds
= opcode
->operands
;
873 while (opnds
[i
++] != AARCH64_OPND_NIL
)
876 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
880 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
881 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
883 N.B. on the entry, it is very likely that only some operands in *INST
884 have had their qualifiers been established.
886 If STOP_AT is not -1, the function will only try to match
887 the qualifier sequence for operands before and including the operand
888 of index STOP_AT; and on success *RET will only be filled with the first
889 (STOP_AT+1) qualifiers.
891 A couple examples of the matching algorithm:
899 Apart from serving the main encoding routine, this can also be called
900 during or after the operand decoding. */
903 aarch64_find_best_match (const aarch64_inst
*inst
,
904 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
905 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
909 const aarch64_opnd_qualifier_t
*qualifiers
;
911 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
914 DEBUG_TRACE ("SUCCEED: no operand");
918 if (stop_at
< 0 || stop_at
>= num_opnds
)
919 stop_at
= num_opnds
- 1;
921 /* For each pattern. */
922 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
925 qualifiers
= *qualifiers_list
;
927 /* Start as positive. */
930 DEBUG_TRACE ("%d", i
);
933 dump_match_qualifiers (inst
->operands
, qualifiers
);
936 /* Most opcodes has much fewer patterns in the list.
937 First NIL qualifier indicates the end in the list. */
938 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
940 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
946 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
948 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
950 /* Either the operand does not have qualifier, or the qualifier
951 for the operand needs to be deduced from the qualifier
953 In the latter case, any constraint checking related with
954 the obtained qualifier should be done later in
955 operand_general_constraint_met_p. */
958 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
960 /* Unless the target qualifier can also qualify the operand
961 (which has already had a non-nil qualifier), non-equal
962 qualifiers are generally un-matched. */
963 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
972 continue; /* Equal qualifiers are certainly matched. */
975 /* Qualifiers established. */
982 /* Fill the result in *RET. */
984 qualifiers
= *qualifiers_list
;
986 DEBUG_TRACE ("complete qualifiers using list %d", i
);
989 dump_qualifier_sequence (qualifiers
);
992 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
993 ret
[j
] = *qualifiers
;
994 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
995 ret
[j
] = AARCH64_OPND_QLF_NIL
;
997 DEBUG_TRACE ("SUCCESS");
1001 DEBUG_TRACE ("FAIL");
1005 /* Operand qualifier matching and resolving.
1007 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1008 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1010 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1014 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
1017 aarch64_opnd_qualifier_seq_t qualifiers
;
1019 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1022 DEBUG_TRACE ("matching FAIL");
1026 if (inst
->opcode
->flags
& F_STRICT
)
1028 /* Require an exact qualifier match, even for NIL qualifiers. */
1029 nops
= aarch64_num_of_operands (inst
->opcode
);
1030 for (i
= 0; i
< nops
; ++i
)
1031 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1035 /* Update the qualifiers. */
1036 if (update_p
== TRUE
)
1037 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1039 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1041 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1042 "update %s with %s for operand %d",
1043 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1044 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1045 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1048 DEBUG_TRACE ("matching SUCCESS");
1052 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1055 IS32 indicates whether value is a 32-bit immediate or not.
1056 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1057 amount will be returned in *SHIFT_AMOUNT. */
1060 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
1064 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1068 /* Allow all zeros or all ones in top 32-bits, so that
1069 32-bit constant expressions like ~0x80000000 are
1071 uint64_t ext
= value
;
1072 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1073 /* Immediate out of range. */
1075 value
&= (int64_t) 0xffffffff;
1078 /* first, try movz then movn */
1080 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1082 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1084 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1086 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1091 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1095 if (shift_amount
!= NULL
)
1096 *shift_amount
= amount
;
1098 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1103 /* Build the accepted values for immediate logical SIMD instructions.
1105 The standard encodings of the immediate value are:
1106 N imms immr SIMD size R S
1107 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1108 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1109 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1110 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1111 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1112 0 11110s 00000r 2 UInt(r) UInt(s)
1113 where all-ones value of S is reserved.
1115 Let's call E the SIMD size.
1117 The immediate value is: S+1 bits '1' rotated to the right by R.
1119 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1120 (remember S != E - 1). */
1122 #define TOTAL_IMM_NB 5334
1127 aarch64_insn encoding
;
1128 } simd_imm_encoding
;
1130 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1133 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1135 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1136 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1138 if (imm1
->imm
< imm2
->imm
)
1140 if (imm1
->imm
> imm2
->imm
)
1145 /* immediate bitfield standard encoding
1146 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1147 1 ssssss rrrrrr 64 rrrrrr ssssss
1148 0 0sssss 0rrrrr 32 rrrrr sssss
1149 0 10ssss 00rrrr 16 rrrr ssss
1150 0 110sss 000rrr 8 rrr sss
1151 0 1110ss 0000rr 4 rr ss
1152 0 11110s 00000r 2 r s */
1154 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1156 return (is64
<< 12) | (r
<< 6) | s
;
1160 build_immediate_table (void)
1162 uint32_t log_e
, e
, s
, r
, s_mask
;
1168 for (log_e
= 1; log_e
<= 6; log_e
++)
1170 /* Get element size. */
1175 mask
= 0xffffffffffffffffull
;
1181 mask
= (1ull << e
) - 1;
1183 1 ((1 << 4) - 1) << 2 = 111100
1184 2 ((1 << 3) - 1) << 3 = 111000
1185 3 ((1 << 2) - 1) << 4 = 110000
1186 4 ((1 << 1) - 1) << 5 = 100000
1187 5 ((1 << 0) - 1) << 6 = 000000 */
1188 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1190 for (s
= 0; s
< e
- 1; s
++)
1191 for (r
= 0; r
< e
; r
++)
1193 /* s+1 consecutive bits to 1 (s < 63) */
1194 imm
= (1ull << (s
+ 1)) - 1;
1195 /* rotate right by r */
1197 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1198 /* replicate the constant depending on SIMD size */
1201 case 1: imm
= (imm
<< 2) | imm
;
1203 case 2: imm
= (imm
<< 4) | imm
;
1205 case 3: imm
= (imm
<< 8) | imm
;
1207 case 4: imm
= (imm
<< 16) | imm
;
1209 case 5: imm
= (imm
<< 32) | imm
;
1214 simd_immediates
[nb_imms
].imm
= imm
;
1215 simd_immediates
[nb_imms
].encoding
=
1216 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1220 assert (nb_imms
== TOTAL_IMM_NB
);
1221 qsort(simd_immediates
, nb_imms
,
1222 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1225 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1226 be accepted by logical (immediate) instructions
1227 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1229 ESIZE is the number of bytes in the decoded immediate value.
1230 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1231 VALUE will be returned in *ENCODING. */
1234 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1236 simd_imm_encoding imm_enc
;
1237 const simd_imm_encoding
*imm_encoding
;
1238 static bfd_boolean initialized
= FALSE
;
1242 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1247 build_immediate_table ();
1251 /* Allow all zeros or all ones in top bits, so that
1252 constant expressions like ~1 are permitted. */
1253 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1254 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1257 /* Replicate to a full 64-bit value. */
1259 for (i
= esize
* 8; i
< 64; i
*= 2)
1260 value
|= (value
<< i
);
1262 imm_enc
.imm
= value
;
1263 imm_encoding
= (const simd_imm_encoding
*)
1264 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1265 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1266 if (imm_encoding
== NULL
)
1268 DEBUG_TRACE ("exit with FALSE");
1271 if (encoding
!= NULL
)
1272 *encoding
= imm_encoding
->encoding
;
1273 DEBUG_TRACE ("exit with TRUE");
1277 /* If 64-bit immediate IMM is in the format of
1278 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1279 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1280 of value "abcdefgh". Otherwise return -1. */
1282 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1288 for (i
= 0; i
< 8; i
++)
1290 byte
= (imm
>> (8 * i
)) & 0xff;
1293 else if (byte
!= 0x00)
1299 /* Utility inline functions for operand_general_constraint_met_p. */
1302 set_error (aarch64_operand_error
*mismatch_detail
,
1303 enum aarch64_operand_error_kind kind
, int idx
,
1306 if (mismatch_detail
== NULL
)
1308 mismatch_detail
->kind
= kind
;
1309 mismatch_detail
->index
= idx
;
1310 mismatch_detail
->error
= error
;
1314 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1317 if (mismatch_detail
== NULL
)
1319 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1323 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1324 int idx
, int lower_bound
, int upper_bound
,
1327 if (mismatch_detail
== NULL
)
1329 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1330 mismatch_detail
->data
[0] = lower_bound
;
1331 mismatch_detail
->data
[1] = upper_bound
;
1335 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1336 int idx
, int lower_bound
, int upper_bound
)
1338 if (mismatch_detail
== NULL
)
1340 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1341 _("immediate value"));
1345 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1346 int idx
, int lower_bound
, int upper_bound
)
1348 if (mismatch_detail
== NULL
)
1350 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1351 _("immediate offset"));
1355 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1356 int idx
, int lower_bound
, int upper_bound
)
1358 if (mismatch_detail
== NULL
)
1360 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1361 _("register number"));
1365 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1366 int idx
, int lower_bound
, int upper_bound
)
1368 if (mismatch_detail
== NULL
)
1370 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1371 _("register element index"));
1375 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1376 int idx
, int lower_bound
, int upper_bound
)
1378 if (mismatch_detail
== NULL
)
1380 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1384 /* Report that the MUL modifier in operand IDX should be in the range
1385 [LOWER_BOUND, UPPER_BOUND]. */
1387 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1388 int idx
, int lower_bound
, int upper_bound
)
1390 if (mismatch_detail
== NULL
)
1392 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1397 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1400 if (mismatch_detail
== NULL
)
1402 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1403 mismatch_detail
->data
[0] = alignment
;
1407 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1410 if (mismatch_detail
== NULL
)
1412 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1413 mismatch_detail
->data
[0] = expected_num
;
1417 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1420 if (mismatch_detail
== NULL
)
1422 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1425 /* General constraint checking based on operand code.
1427 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1428 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1430 This function has to be called after the qualifiers for all operands
1433 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1434 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1435 of error message during the disassembling where error message is not
1436 wanted. We avoid the dynamic construction of strings of error messages
1437 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1438 use a combination of error code, static string and some integer data to
1439 represent an error. */
1442 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1443 enum aarch64_opnd type
,
1444 const aarch64_opcode
*opcode
,
1445 aarch64_operand_error
*mismatch_detail
)
1447 unsigned num
, modifiers
, shift
;
1449 int64_t imm
, min_value
, max_value
;
1450 uint64_t uvalue
, mask
;
1451 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1452 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1454 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1456 switch (aarch64_operands
[type
].op_class
)
1458 case AARCH64_OPND_CLASS_INT_REG
:
1459 /* Check pair reg constraints for cas* instructions. */
1460 if (type
== AARCH64_OPND_PAIRREG
)
1462 assert (idx
== 1 || idx
== 3);
1463 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1465 set_syntax_error (mismatch_detail
, idx
- 1,
1466 _("reg pair must start from even reg"));
1469 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1471 set_syntax_error (mismatch_detail
, idx
,
1472 _("reg pair must be contiguous"));
1478 /* <Xt> may be optional in some IC and TLBI instructions. */
1479 if (type
== AARCH64_OPND_Rt_SYS
)
1481 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1482 == AARCH64_OPND_CLASS_SYSTEM
));
1483 if (opnds
[1].present
1484 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1486 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1489 if (!opnds
[1].present
1490 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1492 set_other_error (mismatch_detail
, idx
, _("missing register"));
1498 case AARCH64_OPND_QLF_WSP
:
1499 case AARCH64_OPND_QLF_SP
:
1500 if (!aarch64_stack_pointer_p (opnd
))
1502 set_other_error (mismatch_detail
, idx
,
1503 _("stack pointer register expected"));
1512 case AARCH64_OPND_CLASS_SVE_REG
:
1515 case AARCH64_OPND_SVE_Zm3_INDEX
:
1516 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1517 case AARCH64_OPND_SVE_Zm4_INDEX
:
1518 size
= get_operand_fields_width (get_operand_from_code (type
));
1519 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1520 mask
= (1 << shift
) - 1;
1521 if (opnd
->reg
.regno
> mask
)
1523 assert (mask
== 7 || mask
== 15);
1524 set_other_error (mismatch_detail
, idx
,
1526 ? _("z0-z15 expected")
1527 : _("z0-z7 expected"));
1530 mask
= (1 << (size
- shift
)) - 1;
1531 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1533 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1538 case AARCH64_OPND_SVE_Zn_INDEX
:
1539 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1540 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1542 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1548 case AARCH64_OPND_SVE_ZnxN
:
1549 case AARCH64_OPND_SVE_ZtxN
:
1550 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1552 set_other_error (mismatch_detail
, idx
,
1553 _("invalid register list"));
1563 case AARCH64_OPND_CLASS_PRED_REG
:
1564 if (opnd
->reg
.regno
>= 8
1565 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1567 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1572 case AARCH64_OPND_CLASS_COND
:
1573 if (type
== AARCH64_OPND_COND1
1574 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1576 /* Not allow AL or NV. */
1577 set_syntax_error (mismatch_detail
, idx
, NULL
);
1581 case AARCH64_OPND_CLASS_ADDRESS
:
1582 /* Check writeback. */
1583 switch (opcode
->iclass
)
1587 case ldstnapair_offs
:
1590 if (opnd
->addr
.writeback
== 1)
1592 set_syntax_error (mismatch_detail
, idx
,
1593 _("unexpected address writeback"));
1598 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1600 set_syntax_error (mismatch_detail
, idx
,
1601 _("unexpected address writeback"));
1606 case ldstpair_indexed
:
1609 if (opnd
->addr
.writeback
== 0)
1611 set_syntax_error (mismatch_detail
, idx
,
1612 _("address writeback expected"));
1617 assert (opnd
->addr
.writeback
== 0);
1622 case AARCH64_OPND_ADDR_SIMM7
:
1623 /* Scaled signed 7 bits immediate offset. */
1624 /* Get the size of the data element that is accessed, which may be
1625 different from that of the source register size,
1626 e.g. in strb/ldrb. */
1627 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1628 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1630 set_offset_out_of_range_error (mismatch_detail
, idx
,
1631 -64 * size
, 63 * size
);
1634 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1636 set_unaligned_error (mismatch_detail
, idx
, size
);
1640 case AARCH64_OPND_ADDR_OFFSET
:
1641 case AARCH64_OPND_ADDR_SIMM9
:
1642 /* Unscaled signed 9 bits immediate offset. */
1643 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1645 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1650 case AARCH64_OPND_ADDR_SIMM9_2
:
1651 /* Unscaled signed 9 bits immediate offset, which has to be negative
1653 size
= aarch64_get_qualifier_esize (qualifier
);
1654 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1655 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1656 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1658 set_other_error (mismatch_detail
, idx
,
1659 _("negative or unaligned offset expected"));
1662 case AARCH64_OPND_ADDR_SIMM10
:
1663 /* Scaled signed 10 bits immediate offset. */
1664 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1666 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1669 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1671 set_unaligned_error (mismatch_detail
, idx
, 8);
1676 case AARCH64_OPND_ADDR_SIMM11
:
1677 /* Signed 11 bits immediate offset (multiple of 16). */
1678 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -1024, 1008))
1680 set_offset_out_of_range_error (mismatch_detail
, idx
, -1024, 1008);
1684 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1686 set_unaligned_error (mismatch_detail
, idx
, 16);
1691 case AARCH64_OPND_ADDR_SIMM13
:
1692 /* Signed 13 bits immediate offset (multiple of 16). */
1693 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4080))
1695 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4080);
1699 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1701 set_unaligned_error (mismatch_detail
, idx
, 16);
1706 case AARCH64_OPND_SIMD_ADDR_POST
:
1707 /* AdvSIMD load/store multiple structures, post-index. */
1709 if (opnd
->addr
.offset
.is_reg
)
1711 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1715 set_other_error (mismatch_detail
, idx
,
1716 _("invalid register offset"));
1722 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1723 unsigned num_bytes
; /* total number of bytes transferred. */
1724 /* The opcode dependent area stores the number of elements in
1725 each structure to be loaded/stored. */
1726 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1727 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1728 /* Special handling of loading single structure to all lane. */
1729 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1730 * aarch64_get_qualifier_esize (prev
->qualifier
);
1732 num_bytes
= prev
->reglist
.num_regs
1733 * aarch64_get_qualifier_esize (prev
->qualifier
)
1734 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1735 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1737 set_other_error (mismatch_detail
, idx
,
1738 _("invalid post-increment amount"));
1744 case AARCH64_OPND_ADDR_REGOFF
:
1745 /* Get the size of the data element that is accessed, which may be
1746 different from that of the source register size,
1747 e.g. in strb/ldrb. */
1748 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1749 /* It is either no shift or shift by the binary logarithm of SIZE. */
1750 if (opnd
->shifter
.amount
!= 0
1751 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1753 set_other_error (mismatch_detail
, idx
,
1754 _("invalid shift amount"));
1757 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1759 switch (opnd
->shifter
.kind
)
1761 case AARCH64_MOD_UXTW
:
1762 case AARCH64_MOD_LSL
:
1763 case AARCH64_MOD_SXTW
:
1764 case AARCH64_MOD_SXTX
: break;
1766 set_other_error (mismatch_detail
, idx
,
1767 _("invalid extend/shift operator"));
1772 case AARCH64_OPND_ADDR_UIMM12
:
1773 imm
= opnd
->addr
.offset
.imm
;
1774 /* Get the size of the data element that is accessed, which may be
1775 different from that of the source register size,
1776 e.g. in strb/ldrb. */
1777 size
= aarch64_get_qualifier_esize (qualifier
);
1778 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1780 set_offset_out_of_range_error (mismatch_detail
, idx
,
1784 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1786 set_unaligned_error (mismatch_detail
, idx
, size
);
1791 case AARCH64_OPND_ADDR_PCREL14
:
1792 case AARCH64_OPND_ADDR_PCREL19
:
1793 case AARCH64_OPND_ADDR_PCREL21
:
1794 case AARCH64_OPND_ADDR_PCREL26
:
1795 imm
= opnd
->imm
.value
;
1796 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1798 /* The offset value in a PC-relative branch instruction is alway
1799 4-byte aligned and is encoded without the lowest 2 bits. */
1800 if (!value_aligned_p (imm
, 4))
1802 set_unaligned_error (mismatch_detail
, idx
, 4);
1805 /* Right shift by 2 so that we can carry out the following check
1809 size
= get_operand_fields_width (get_operand_from_code (type
));
1810 if (!value_fit_signed_field_p (imm
, size
))
1812 set_other_error (mismatch_detail
, idx
,
1813 _("immediate out of range"));
1818 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1819 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1820 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1821 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1825 assert (!opnd
->addr
.offset
.is_reg
);
1826 assert (opnd
->addr
.preind
);
1827 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1830 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1831 || (opnd
->shifter
.operator_present
1832 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1834 set_other_error (mismatch_detail
, idx
,
1835 _("invalid addressing mode"));
1838 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1840 set_offset_out_of_range_error (mismatch_detail
, idx
,
1841 min_value
, max_value
);
1844 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1846 set_unaligned_error (mismatch_detail
, idx
, num
);
1851 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1854 goto sve_imm_offset_vl
;
1856 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1859 goto sve_imm_offset_vl
;
1861 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1862 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1863 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1864 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1868 assert (!opnd
->addr
.offset
.is_reg
);
1869 assert (opnd
->addr
.preind
);
1870 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1873 if (opnd
->shifter
.operator_present
1874 || opnd
->shifter
.amount_present
)
1876 set_other_error (mismatch_detail
, idx
,
1877 _("invalid addressing mode"));
1880 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1882 set_offset_out_of_range_error (mismatch_detail
, idx
,
1883 min_value
, max_value
);
1886 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1888 set_unaligned_error (mismatch_detail
, idx
, num
);
1893 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1896 goto sve_imm_offset
;
1898 case AARCH64_OPND_SVE_ADDR_R
:
1899 case AARCH64_OPND_SVE_ADDR_RR
:
1900 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1901 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1902 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1903 case AARCH64_OPND_SVE_ADDR_RX
:
1904 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1905 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1906 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1907 case AARCH64_OPND_SVE_ADDR_RZ
:
1908 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1909 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1910 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1911 modifiers
= 1 << AARCH64_MOD_LSL
;
1913 assert (opnd
->addr
.offset
.is_reg
);
1914 assert (opnd
->addr
.preind
);
1915 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1916 && opnd
->addr
.offset
.regno
== 31)
1918 set_other_error (mismatch_detail
, idx
,
1919 _("index register xzr is not allowed"));
1922 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1923 || (opnd
->shifter
.amount
1924 != get_operand_specific_data (&aarch64_operands
[type
])))
1926 set_other_error (mismatch_detail
, idx
,
1927 _("invalid addressing mode"));
1932 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1933 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1934 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1935 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1936 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1937 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1938 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1939 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1940 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1941 goto sve_rr_operand
;
1943 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1944 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1945 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1946 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1949 goto sve_imm_offset
;
1951 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1952 modifiers
= 1 << AARCH64_MOD_LSL
;
1954 assert (opnd
->addr
.offset
.is_reg
);
1955 assert (opnd
->addr
.preind
);
1956 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1957 || opnd
->shifter
.amount
< 0
1958 || opnd
->shifter
.amount
> 3)
1960 set_other_error (mismatch_detail
, idx
,
1961 _("invalid addressing mode"));
1966 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1967 modifiers
= (1 << AARCH64_MOD_SXTW
);
1968 goto sve_zz_operand
;
1970 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
1971 modifiers
= 1 << AARCH64_MOD_UXTW
;
1972 goto sve_zz_operand
;
1979 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1980 if (type
== AARCH64_OPND_LEt
)
1982 /* Get the upper bound for the element index. */
1983 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1984 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1986 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1990 /* The opcode dependent area stores the number of elements in
1991 each structure to be loaded/stored. */
1992 num
= get_opcode_dependent_value (opcode
);
1995 case AARCH64_OPND_LVt
:
1996 assert (num
>= 1 && num
<= 4);
1997 /* Unless LD1/ST1, the number of registers should be equal to that
1998 of the structure elements. */
1999 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
2001 set_reg_list_error (mismatch_detail
, idx
, num
);
2005 case AARCH64_OPND_LVt_AL
:
2006 case AARCH64_OPND_LEt
:
2007 assert (num
>= 1 && num
<= 4);
2008 /* The number of registers should be equal to that of the structure
2010 if (opnd
->reglist
.num_regs
!= num
)
2012 set_reg_list_error (mismatch_detail
, idx
, num
);
2021 case AARCH64_OPND_CLASS_IMMEDIATE
:
2022 /* Constraint check on immediate operand. */
2023 imm
= opnd
->imm
.value
;
2024 /* E.g. imm_0_31 constrains value to be 0..31. */
2025 if (qualifier_value_in_range_constraint_p (qualifier
)
2026 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
2027 get_upper_bound (qualifier
)))
2029 set_imm_out_of_range_error (mismatch_detail
, idx
,
2030 get_lower_bound (qualifier
),
2031 get_upper_bound (qualifier
));
2037 case AARCH64_OPND_AIMM
:
2038 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2040 set_other_error (mismatch_detail
, idx
,
2041 _("invalid shift operator"));
2044 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2046 set_other_error (mismatch_detail
, idx
,
2047 _("shift amount must be 0 or 12"));
2050 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2052 set_other_error (mismatch_detail
, idx
,
2053 _("immediate out of range"));
2058 case AARCH64_OPND_HALF
:
2059 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2060 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2062 set_other_error (mismatch_detail
, idx
,
2063 _("invalid shift operator"));
2066 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2067 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2069 set_other_error (mismatch_detail
, idx
,
2070 _("shift amount must be a multiple of 16"));
2073 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2075 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2079 if (opnd
->imm
.value
< 0)
2081 set_other_error (mismatch_detail
, idx
,
2082 _("negative immediate value not allowed"));
2085 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2087 set_other_error (mismatch_detail
, idx
,
2088 _("immediate out of range"));
2093 case AARCH64_OPND_IMM_MOV
:
2095 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2096 imm
= opnd
->imm
.value
;
2100 case OP_MOV_IMM_WIDEN
:
2103 case OP_MOV_IMM_WIDE
:
2104 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2106 set_other_error (mismatch_detail
, idx
,
2107 _("immediate out of range"));
2111 case OP_MOV_IMM_LOG
:
2112 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2114 set_other_error (mismatch_detail
, idx
,
2115 _("immediate out of range"));
2126 case AARCH64_OPND_NZCV
:
2127 case AARCH64_OPND_CCMP_IMM
:
2128 case AARCH64_OPND_EXCEPTION
:
2129 case AARCH64_OPND_TME_UIMM16
:
2130 case AARCH64_OPND_UIMM4
:
2131 case AARCH64_OPND_UIMM4_ADDG
:
2132 case AARCH64_OPND_UIMM7
:
2133 case AARCH64_OPND_UIMM3_OP1
:
2134 case AARCH64_OPND_UIMM3_OP2
:
2135 case AARCH64_OPND_SVE_UIMM3
:
2136 case AARCH64_OPND_SVE_UIMM7
:
2137 case AARCH64_OPND_SVE_UIMM8
:
2138 case AARCH64_OPND_SVE_UIMM8_53
:
2139 size
= get_operand_fields_width (get_operand_from_code (type
));
2141 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2143 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2149 case AARCH64_OPND_UIMM10
:
2150 /* Scaled unsigned 10 bits immediate offset. */
2151 if (!value_in_range_p (opnd
->imm
.value
, 0, 1008))
2153 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1008);
2157 if (!value_aligned_p (opnd
->imm
.value
, 16))
2159 set_unaligned_error (mismatch_detail
, idx
, 16);
2164 case AARCH64_OPND_SIMM5
:
2165 case AARCH64_OPND_SVE_SIMM5
:
2166 case AARCH64_OPND_SVE_SIMM5B
:
2167 case AARCH64_OPND_SVE_SIMM6
:
2168 case AARCH64_OPND_SVE_SIMM8
:
2169 size
= get_operand_fields_width (get_operand_from_code (type
));
2171 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2173 set_imm_out_of_range_error (mismatch_detail
, idx
,
2175 (1 << (size
- 1)) - 1);
2180 case AARCH64_OPND_WIDTH
:
2181 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2182 && opnds
[0].type
== AARCH64_OPND_Rd
);
2183 size
= get_upper_bound (qualifier
);
2184 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2185 /* lsb+width <= reg.size */
2187 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2188 size
- opnds
[idx
-1].imm
.value
);
2193 case AARCH64_OPND_LIMM
:
2194 case AARCH64_OPND_SVE_LIMM
:
2196 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2197 uint64_t uimm
= opnd
->imm
.value
;
2198 if (opcode
->op
== OP_BIC
)
2200 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2202 set_other_error (mismatch_detail
, idx
,
2203 _("immediate out of range"));
2209 case AARCH64_OPND_IMM0
:
2210 case AARCH64_OPND_FPIMM0
:
2211 if (opnd
->imm
.value
!= 0)
2213 set_other_error (mismatch_detail
, idx
,
2214 _("immediate zero expected"));
2219 case AARCH64_OPND_IMM_ROT1
:
2220 case AARCH64_OPND_IMM_ROT2
:
2221 case AARCH64_OPND_SVE_IMM_ROT2
:
2222 if (opnd
->imm
.value
!= 0
2223 && opnd
->imm
.value
!= 90
2224 && opnd
->imm
.value
!= 180
2225 && opnd
->imm
.value
!= 270)
2227 set_other_error (mismatch_detail
, idx
,
2228 _("rotate expected to be 0, 90, 180 or 270"));
2233 case AARCH64_OPND_IMM_ROT3
:
2234 case AARCH64_OPND_SVE_IMM_ROT1
:
2235 case AARCH64_OPND_SVE_IMM_ROT3
:
2236 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2238 set_other_error (mismatch_detail
, idx
,
2239 _("rotate expected to be 90 or 270"));
2244 case AARCH64_OPND_SHLL_IMM
:
2246 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2247 if (opnd
->imm
.value
!= size
)
2249 set_other_error (mismatch_detail
, idx
,
2250 _("invalid shift amount"));
2255 case AARCH64_OPND_IMM_VLSL
:
2256 size
= aarch64_get_qualifier_esize (qualifier
);
2257 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2259 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2265 case AARCH64_OPND_IMM_VLSR
:
2266 size
= aarch64_get_qualifier_esize (qualifier
);
2267 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2269 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2274 case AARCH64_OPND_SIMD_IMM
:
2275 case AARCH64_OPND_SIMD_IMM_SFT
:
2276 /* Qualifier check. */
2279 case AARCH64_OPND_QLF_LSL
:
2280 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2282 set_other_error (mismatch_detail
, idx
,
2283 _("invalid shift operator"));
2287 case AARCH64_OPND_QLF_MSL
:
2288 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2290 set_other_error (mismatch_detail
, idx
,
2291 _("invalid shift operator"));
2295 case AARCH64_OPND_QLF_NIL
:
2296 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2298 set_other_error (mismatch_detail
, idx
,
2299 _("shift is not permitted"));
2307 /* Is the immediate valid? */
2309 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2311 /* uimm8 or simm8 */
2312 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2314 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2318 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2321 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2322 ffffffffgggggggghhhhhhhh'. */
2323 set_other_error (mismatch_detail
, idx
,
2324 _("invalid value for immediate"));
2327 /* Is the shift amount valid? */
2328 switch (opnd
->shifter
.kind
)
2330 case AARCH64_MOD_LSL
:
2331 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2332 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2334 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2338 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2340 set_unaligned_error (mismatch_detail
, idx
, 8);
2344 case AARCH64_MOD_MSL
:
2345 /* Only 8 and 16 are valid shift amount. */
2346 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2348 set_other_error (mismatch_detail
, idx
,
2349 _("shift amount must be 0 or 16"));
2354 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2356 set_other_error (mismatch_detail
, idx
,
2357 _("invalid shift operator"));
2364 case AARCH64_OPND_FPIMM
:
2365 case AARCH64_OPND_SIMD_FPIMM
:
2366 case AARCH64_OPND_SVE_FPIMM8
:
2367 if (opnd
->imm
.is_fp
== 0)
2369 set_other_error (mismatch_detail
, idx
,
2370 _("floating-point immediate expected"));
2373 /* The value is expected to be an 8-bit floating-point constant with
2374 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2375 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2377 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2379 set_other_error (mismatch_detail
, idx
,
2380 _("immediate out of range"));
2383 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2385 set_other_error (mismatch_detail
, idx
,
2386 _("invalid shift operator"));
2391 case AARCH64_OPND_SVE_AIMM
:
2394 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2395 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2396 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2397 uvalue
= opnd
->imm
.value
;
2398 shift
= opnd
->shifter
.amount
;
2403 set_other_error (mismatch_detail
, idx
,
2404 _("no shift amount allowed for"
2405 " 8-bit constants"));
2411 if (shift
!= 0 && shift
!= 8)
2413 set_other_error (mismatch_detail
, idx
,
2414 _("shift amount must be 0 or 8"));
2417 if (shift
== 0 && (uvalue
& 0xff) == 0)
2420 uvalue
= (int64_t) uvalue
/ 256;
2424 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2426 set_other_error (mismatch_detail
, idx
,
2427 _("immediate too big for element size"));
2430 uvalue
= (uvalue
- min_value
) & mask
;
2433 set_other_error (mismatch_detail
, idx
,
2434 _("invalid arithmetic immediate"));
2439 case AARCH64_OPND_SVE_ASIMM
:
2443 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2444 assert (opnd
->imm
.is_fp
);
2445 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2447 set_other_error (mismatch_detail
, idx
,
2448 _("floating-point value must be 0.5 or 1.0"));
2453 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2454 assert (opnd
->imm
.is_fp
);
2455 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2457 set_other_error (mismatch_detail
, idx
,
2458 _("floating-point value must be 0.5 or 2.0"));
2463 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2464 assert (opnd
->imm
.is_fp
);
2465 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2467 set_other_error (mismatch_detail
, idx
,
2468 _("floating-point value must be 0.0 or 1.0"));
2473 case AARCH64_OPND_SVE_INV_LIMM
:
2475 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2476 uint64_t uimm
= ~opnd
->imm
.value
;
2477 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2479 set_other_error (mismatch_detail
, idx
,
2480 _("immediate out of range"));
2486 case AARCH64_OPND_SVE_LIMM_MOV
:
2488 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2489 uint64_t uimm
= opnd
->imm
.value
;
2490 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2492 set_other_error (mismatch_detail
, idx
,
2493 _("immediate out of range"));
2496 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2498 set_other_error (mismatch_detail
, idx
,
2499 _("invalid replicated MOV immediate"));
2505 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2506 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2507 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2509 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2514 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2515 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2516 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2517 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2519 set_imm_out_of_range_error (mismatch_detail
, idx
,
2525 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2526 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2527 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2528 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2530 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8 * size
);
2540 case AARCH64_OPND_CLASS_SYSTEM
:
2543 case AARCH64_OPND_PSTATEFIELD
:
2544 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2548 The immediate must be #0 or #1. */
2549 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2550 || opnd
->pstatefield
== 0x04 /* PAN. */
2551 || opnd
->pstatefield
== 0x19 /* SSBS. */
2552 || opnd
->pstatefield
== 0x1a) /* DIT. */
2553 && opnds
[1].imm
.value
> 1)
2555 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2558 /* MSR SPSel, #uimm4
2559 Uses uimm4 as a control value to select the stack pointer: if
2560 bit 0 is set it selects the current exception level's stack
2561 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2562 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2563 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2565 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2574 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2575 /* Get the upper bound for the element index. */
2576 if (opcode
->op
== OP_FCMLA_ELEM
)
2577 /* FCMLA index range depends on the vector size of other operands
2578 and is halfed because complex numbers take two elements. */
2579 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2580 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2583 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2584 assert (aarch64_get_qualifier_nelem (qualifier
) == 1);
2586 /* Index out-of-range. */
2587 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2589 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2592 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2593 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2594 number is encoded in "size:M:Rm":
2600 if (type
== AARCH64_OPND_Em16
&& qualifier
== AARCH64_OPND_QLF_S_H
2601 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2603 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2608 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2609 assert (idx
== 1 || idx
== 2);
2612 case AARCH64_OPND_Rm_EXT
:
2613 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2614 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2616 set_other_error (mismatch_detail
, idx
,
2617 _("extend operator expected"));
2620 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2621 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2622 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2624 if (!aarch64_stack_pointer_p (opnds
+ 0)
2625 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2627 if (!opnd
->shifter
.operator_present
)
2629 set_other_error (mismatch_detail
, idx
,
2630 _("missing extend operator"));
2633 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2635 set_other_error (mismatch_detail
, idx
,
2636 _("'LSL' operator not allowed"));
2640 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2641 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2642 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2644 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2647 /* In the 64-bit form, the final register operand is written as Wm
2648 for all but the (possibly omitted) UXTX/LSL and SXTX
2650 N.B. GAS allows X register to be used with any operator as a
2651 programming convenience. */
2652 if (qualifier
== AARCH64_OPND_QLF_X
2653 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2654 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2655 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2657 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2662 case AARCH64_OPND_Rm_SFT
:
2663 /* ROR is not available to the shifted register operand in
2664 arithmetic instructions. */
2665 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2667 set_other_error (mismatch_detail
, idx
,
2668 _("shift operator expected"));
2671 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2672 && opcode
->iclass
!= log_shift
)
2674 set_other_error (mismatch_detail
, idx
,
2675 _("'ROR' operator not allowed"));
2678 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2679 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2681 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2698 /* Main entrypoint for the operand constraint checking.
2700 Return 1 if operands of *INST meet the constraint applied by the operand
2701 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2702 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2703 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2704 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2705 error kind when it is notified that an instruction does not pass the check).
2707 Un-determined operand qualifiers may get established during the process. */
2710 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2711 aarch64_operand_error
*mismatch_detail
)
2715 DEBUG_TRACE ("enter");
2717 /* Check for cases where a source register needs to be the same as the
2718 destination register. Do this before matching qualifiers since if
2719 an instruction has both invalid tying and invalid qualifiers,
2720 the error about qualifiers would suggest several alternative
2721 instructions that also have invalid tying. */
2722 i
= inst
->opcode
->tied_operand
;
2723 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2725 if (mismatch_detail
)
2727 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2728 mismatch_detail
->index
= i
;
2729 mismatch_detail
->error
= NULL
;
2734 /* Match operands' qualifier.
2735 *INST has already had qualifier establish for some, if not all, of
2736 its operands; we need to find out whether these established
2737 qualifiers match one of the qualifier sequence in
2738 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2739 with the corresponding qualifier in such a sequence.
2740 Only basic operand constraint checking is done here; the more thorough
2741 constraint checking will carried out by operand_general_constraint_met_p,
2742 which has be to called after this in order to get all of the operands'
2743 qualifiers established. */
2744 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2746 DEBUG_TRACE ("FAIL on operand qualifier matching");
2747 if (mismatch_detail
)
2749 /* Return an error type to indicate that it is the qualifier
2750 matching failure; we don't care about which operand as there
2751 are enough information in the opcode table to reproduce it. */
2752 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2753 mismatch_detail
->index
= -1;
2754 mismatch_detail
->error
= NULL
;
2759 /* Match operands' constraint. */
2760 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2762 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2763 if (type
== AARCH64_OPND_NIL
)
2765 if (inst
->operands
[i
].skip
)
2767 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2770 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2771 inst
->opcode
, mismatch_detail
) == 0)
2773 DEBUG_TRACE ("FAIL on operand %d", i
);
2778 DEBUG_TRACE ("PASS");
2783 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2784 Also updates the TYPE of each INST->OPERANDS with the corresponding
2785 value of OPCODE->OPERANDS.
2787 Note that some operand qualifiers may need to be manually cleared by
2788 the caller before it further calls the aarch64_opcode_encode; by
2789 doing this, it helps the qualifier matching facilities work
2792 const aarch64_opcode
*
2793 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2796 const aarch64_opcode
*old
= inst
->opcode
;
2798 inst
->opcode
= opcode
;
2800 /* Update the operand types. */
2801 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2803 inst
->operands
[i
].type
= opcode
->operands
[i
];
2804 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2808 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2814 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2817 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2818 if (operands
[i
] == operand
)
2820 else if (operands
[i
] == AARCH64_OPND_NIL
)
2825 /* R0...R30, followed by FOR31. */
2826 #define BANK(R, FOR31) \
2827 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2828 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2829 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2830 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2831 /* [0][0] 32-bit integer regs with sp Wn
2832 [0][1] 64-bit integer regs with sp Xn sf=1
2833 [1][0] 32-bit integer regs with #0 Wn
2834 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2835 static const char *int_reg
[2][2][32] = {
2836 #define R32(X) "w" #X
2837 #define R64(X) "x" #X
2838 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2839 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2844 /* Names of the SVE vector registers, first with .S suffixes,
2845 then with .D suffixes. */
2847 static const char *sve_reg
[2][32] = {
2848 #define ZS(X) "z" #X ".s"
2849 #define ZD(X) "z" #X ".d"
2850 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2856 /* Return the integer register name.
2857 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2859 static inline const char *
2860 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2862 const int has_zr
= sp_reg_p
? 0 : 1;
2863 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2864 return int_reg
[has_zr
][is_64
][regno
];
2867 /* Like get_int_reg_name, but IS_64 is always 1. */
2869 static inline const char *
2870 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2872 const int has_zr
= sp_reg_p
? 0 : 1;
2873 return int_reg
[has_zr
][1][regno
];
2876 /* Get the name of the integer offset register in OPND, using the shift type
2877 to decide whether it's a word or doubleword. */
2879 static inline const char *
2880 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2882 switch (opnd
->shifter
.kind
)
2884 case AARCH64_MOD_UXTW
:
2885 case AARCH64_MOD_SXTW
:
2886 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2888 case AARCH64_MOD_LSL
:
2889 case AARCH64_MOD_SXTX
:
2890 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2897 /* Get the name of the SVE vector offset register in OPND, using the operand
2898 qualifier to decide whether the suffix should be .S or .D. */
2900 static inline const char *
2901 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2903 assert (qualifier
== AARCH64_OPND_QLF_S_S
2904 || qualifier
== AARCH64_OPND_QLF_S_D
);
2905 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2908 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2928 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2929 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2930 (depending on the type of the instruction). IMM8 will be expanded to a
2931 single-precision floating-point value (SIZE == 4) or a double-precision
2932 floating-point value (SIZE == 8). A half-precision floating-point value
2933 (SIZE == 2) is expanded to a single-precision floating-point value. The
2934 expanded value is returned. */
2937 expand_fp_imm (int size
, uint32_t imm8
)
2940 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2942 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2943 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2944 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2945 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2946 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2949 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2950 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2951 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2952 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2953 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2956 else if (size
== 4 || size
== 2)
2958 imm
= (imm8_7
<< 31) /* imm8<7> */
2959 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2960 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2961 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2965 /* An unsupported size. */
2972 /* Produce the string representation of the register list operand *OPND
2973 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2974 the register name that comes before the register number, such as "v". */
2976 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2979 const int num_regs
= opnd
->reglist
.num_regs
;
2980 const int first_reg
= opnd
->reglist
.first_regno
;
2981 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2982 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2983 char tb
[8]; /* Temporary buffer. */
2985 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2986 assert (num_regs
>= 1 && num_regs
<= 4);
2988 /* Prepare the index if any. */
2989 if (opnd
->reglist
.has_index
)
2990 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2991 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
2995 /* The hyphenated form is preferred for disassembly if there are
2996 more than two registers in the list, and the register numbers
2997 are monotonically increasing in increments of one. */
2998 if (num_regs
> 2 && last_reg
> first_reg
)
2999 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
3000 prefix
, last_reg
, qlf_name
, tb
);
3003 const int reg0
= first_reg
;
3004 const int reg1
= (first_reg
+ 1) & 0x1f;
3005 const int reg2
= (first_reg
+ 2) & 0x1f;
3006 const int reg3
= (first_reg
+ 3) & 0x1f;
3011 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
3014 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
3015 prefix
, reg1
, qlf_name
, tb
);
3018 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3019 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3020 prefix
, reg2
, qlf_name
, tb
);
3023 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3024 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3025 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
3031 /* Print the register+immediate address in OPND to BUF, which has SIZE
3032 characters. BASE is the name of the base register. */
3035 print_immediate_offset_address (char *buf
, size_t size
,
3036 const aarch64_opnd_info
*opnd
,
3039 if (opnd
->addr
.writeback
)
3041 if (opnd
->addr
.preind
)
3042 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
3044 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
3048 if (opnd
->shifter
.operator_present
)
3050 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
3051 snprintf (buf
, size
, "[%s, #%d, mul vl]",
3052 base
, opnd
->addr
.offset
.imm
);
3054 else if (opnd
->addr
.offset
.imm
)
3055 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
3057 snprintf (buf
, size
, "[%s]", base
);
3061 /* Produce the string representation of the register offset address operand
3062 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3063 the names of the base and offset registers. */
3065 print_register_offset_address (char *buf
, size_t size
,
3066 const aarch64_opnd_info
*opnd
,
3067 const char *base
, const char *offset
)
3069 char tb
[16]; /* Temporary buffer. */
3070 bfd_boolean print_extend_p
= TRUE
;
3071 bfd_boolean print_amount_p
= TRUE
;
3072 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3074 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3075 || !opnd
->shifter
.amount_present
))
3077 /* Not print the shift/extend amount when the amount is zero and
3078 when it is not the special case of 8-bit load/store instruction. */
3079 print_amount_p
= FALSE
;
3080 /* Likewise, no need to print the shift operator LSL in such a
3082 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3083 print_extend_p
= FALSE
;
3086 /* Prepare for the extend/shift. */
3090 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3091 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3092 (opnd
->shifter
.amount
% 100));
3094 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3099 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3102 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3103 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3104 PC, PCREL_P and ADDRESS are used to pass in and return information about
3105 the PC-relative address calculation, where the PC value is passed in
3106 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3107 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3108 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3110 The function serves both the disassembler and the assembler diagnostics
3111 issuer, which is the reason why it lives in this file. */
3114 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3115 const aarch64_opcode
*opcode
,
3116 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3117 bfd_vma
*address
, char** notes
)
3119 unsigned int i
, num_conds
;
3120 const char *name
= NULL
;
3121 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3122 enum aarch64_modifier_kind kind
;
3123 uint64_t addr
, enum_value
;
3131 case AARCH64_OPND_Rd
:
3132 case AARCH64_OPND_Rn
:
3133 case AARCH64_OPND_Rm
:
3134 case AARCH64_OPND_Rt
:
3135 case AARCH64_OPND_Rt2
:
3136 case AARCH64_OPND_Rs
:
3137 case AARCH64_OPND_Ra
:
3138 case AARCH64_OPND_Rt_SYS
:
3139 case AARCH64_OPND_PAIRREG
:
3140 case AARCH64_OPND_SVE_Rm
:
3141 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3142 the <ic_op>, therefore we use opnd->present to override the
3143 generic optional-ness information. */
3144 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3149 /* Omit the operand, e.g. RET. */
3150 else if (optional_operand_p (opcode
, idx
)
3152 == get_optional_operand_default_value (opcode
)))
3154 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3155 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3156 snprintf (buf
, size
, "%s",
3157 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3160 case AARCH64_OPND_Rd_SP
:
3161 case AARCH64_OPND_Rn_SP
:
3162 case AARCH64_OPND_Rt_SP
:
3163 case AARCH64_OPND_SVE_Rn_SP
:
3164 case AARCH64_OPND_Rm_SP
:
3165 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3166 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3167 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3168 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3169 snprintf (buf
, size
, "%s",
3170 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3173 case AARCH64_OPND_Rm_EXT
:
3174 kind
= opnd
->shifter
.kind
;
3175 assert (idx
== 1 || idx
== 2);
3176 if ((aarch64_stack_pointer_p (opnds
)
3177 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3178 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3179 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3180 && kind
== AARCH64_MOD_UXTW
)
3181 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3182 && kind
== AARCH64_MOD_UXTX
)))
3184 /* 'LSL' is the preferred form in this case. */
3185 kind
= AARCH64_MOD_LSL
;
3186 if (opnd
->shifter
.amount
== 0)
3188 /* Shifter omitted. */
3189 snprintf (buf
, size
, "%s",
3190 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3194 if (opnd
->shifter
.amount
)
3195 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3196 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3197 aarch64_operand_modifiers
[kind
].name
,
3198 opnd
->shifter
.amount
);
3200 snprintf (buf
, size
, "%s, %s",
3201 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3202 aarch64_operand_modifiers
[kind
].name
);
3205 case AARCH64_OPND_Rm_SFT
:
3206 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3207 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3208 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3209 snprintf (buf
, size
, "%s",
3210 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3212 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3213 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3214 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3215 opnd
->shifter
.amount
);
3218 case AARCH64_OPND_Fd
:
3219 case AARCH64_OPND_Fn
:
3220 case AARCH64_OPND_Fm
:
3221 case AARCH64_OPND_Fa
:
3222 case AARCH64_OPND_Ft
:
3223 case AARCH64_OPND_Ft2
:
3224 case AARCH64_OPND_Sd
:
3225 case AARCH64_OPND_Sn
:
3226 case AARCH64_OPND_Sm
:
3227 case AARCH64_OPND_SVE_VZn
:
3228 case AARCH64_OPND_SVE_Vd
:
3229 case AARCH64_OPND_SVE_Vm
:
3230 case AARCH64_OPND_SVE_Vn
:
3231 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3235 case AARCH64_OPND_Va
:
3236 case AARCH64_OPND_Vd
:
3237 case AARCH64_OPND_Vn
:
3238 case AARCH64_OPND_Vm
:
3239 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3240 aarch64_get_qualifier_name (opnd
->qualifier
));
3243 case AARCH64_OPND_Ed
:
3244 case AARCH64_OPND_En
:
3245 case AARCH64_OPND_Em
:
3246 case AARCH64_OPND_Em16
:
3247 case AARCH64_OPND_SM3_IMM2
:
3248 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3249 aarch64_get_qualifier_name (opnd
->qualifier
),
3250 opnd
->reglane
.index
);
3253 case AARCH64_OPND_VdD1
:
3254 case AARCH64_OPND_VnD1
:
3255 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3258 case AARCH64_OPND_LVn
:
3259 case AARCH64_OPND_LVt
:
3260 case AARCH64_OPND_LVt_AL
:
3261 case AARCH64_OPND_LEt
:
3262 print_register_list (buf
, size
, opnd
, "v");
3265 case AARCH64_OPND_SVE_Pd
:
3266 case AARCH64_OPND_SVE_Pg3
:
3267 case AARCH64_OPND_SVE_Pg4_5
:
3268 case AARCH64_OPND_SVE_Pg4_10
:
3269 case AARCH64_OPND_SVE_Pg4_16
:
3270 case AARCH64_OPND_SVE_Pm
:
3271 case AARCH64_OPND_SVE_Pn
:
3272 case AARCH64_OPND_SVE_Pt
:
3273 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3274 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3275 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3276 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3277 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3278 aarch64_get_qualifier_name (opnd
->qualifier
));
3280 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3281 aarch64_get_qualifier_name (opnd
->qualifier
));
3284 case AARCH64_OPND_SVE_Za_5
:
3285 case AARCH64_OPND_SVE_Za_16
:
3286 case AARCH64_OPND_SVE_Zd
:
3287 case AARCH64_OPND_SVE_Zm_5
:
3288 case AARCH64_OPND_SVE_Zm_16
:
3289 case AARCH64_OPND_SVE_Zn
:
3290 case AARCH64_OPND_SVE_Zt
:
3291 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3292 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3294 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3295 aarch64_get_qualifier_name (opnd
->qualifier
));
3298 case AARCH64_OPND_SVE_ZnxN
:
3299 case AARCH64_OPND_SVE_ZtxN
:
3300 print_register_list (buf
, size
, opnd
, "z");
3303 case AARCH64_OPND_SVE_Zm3_INDEX
:
3304 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3305 case AARCH64_OPND_SVE_Zm4_INDEX
:
3306 case AARCH64_OPND_SVE_Zn_INDEX
:
3307 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3308 aarch64_get_qualifier_name (opnd
->qualifier
),
3309 opnd
->reglane
.index
);
3312 case AARCH64_OPND_CRn
:
3313 case AARCH64_OPND_CRm
:
3314 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3317 case AARCH64_OPND_IDX
:
3318 case AARCH64_OPND_MASK
:
3319 case AARCH64_OPND_IMM
:
3320 case AARCH64_OPND_IMM_2
:
3321 case AARCH64_OPND_WIDTH
:
3322 case AARCH64_OPND_UIMM3_OP1
:
3323 case AARCH64_OPND_UIMM3_OP2
:
3324 case AARCH64_OPND_BIT_NUM
:
3325 case AARCH64_OPND_IMM_VLSL
:
3326 case AARCH64_OPND_IMM_VLSR
:
3327 case AARCH64_OPND_SHLL_IMM
:
3328 case AARCH64_OPND_IMM0
:
3329 case AARCH64_OPND_IMMR
:
3330 case AARCH64_OPND_IMMS
:
3331 case AARCH64_OPND_FBITS
:
3332 case AARCH64_OPND_TME_UIMM16
:
3333 case AARCH64_OPND_SIMM5
:
3334 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3335 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3336 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3337 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3338 case AARCH64_OPND_SVE_SIMM5
:
3339 case AARCH64_OPND_SVE_SIMM5B
:
3340 case AARCH64_OPND_SVE_SIMM6
:
3341 case AARCH64_OPND_SVE_SIMM8
:
3342 case AARCH64_OPND_SVE_UIMM3
:
3343 case AARCH64_OPND_SVE_UIMM7
:
3344 case AARCH64_OPND_SVE_UIMM8
:
3345 case AARCH64_OPND_SVE_UIMM8_53
:
3346 case AARCH64_OPND_IMM_ROT1
:
3347 case AARCH64_OPND_IMM_ROT2
:
3348 case AARCH64_OPND_IMM_ROT3
:
3349 case AARCH64_OPND_SVE_IMM_ROT1
:
3350 case AARCH64_OPND_SVE_IMM_ROT2
:
3351 case AARCH64_OPND_SVE_IMM_ROT3
:
3352 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3355 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3356 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3357 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3360 c
.i
= opnd
->imm
.value
;
3361 snprintf (buf
, size
, "#%.1f", c
.f
);
3365 case AARCH64_OPND_SVE_PATTERN
:
3366 if (optional_operand_p (opcode
, idx
)
3367 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3369 enum_value
= opnd
->imm
.value
;
3370 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3371 if (aarch64_sve_pattern_array
[enum_value
])
3372 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3374 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3377 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3378 if (optional_operand_p (opcode
, idx
)
3379 && !opnd
->shifter
.operator_present
3380 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3382 enum_value
= opnd
->imm
.value
;
3383 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3384 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3385 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3387 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3388 if (opnd
->shifter
.operator_present
)
3390 size_t len
= strlen (buf
);
3391 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3392 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3393 opnd
->shifter
.amount
);
3397 case AARCH64_OPND_SVE_PRFOP
:
3398 enum_value
= opnd
->imm
.value
;
3399 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3400 if (aarch64_sve_prfop_array
[enum_value
])
3401 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3403 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3406 case AARCH64_OPND_IMM_MOV
:
3407 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3409 case 4: /* e.g. MOV Wd, #<imm32>. */
3411 int imm32
= opnd
->imm
.value
;
3412 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3415 case 8: /* e.g. MOV Xd, #<imm64>. */
3416 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3417 opnd
->imm
.value
, opnd
->imm
.value
);
3419 default: assert (0);
3423 case AARCH64_OPND_FPIMM0
:
3424 snprintf (buf
, size
, "#0.0");
3427 case AARCH64_OPND_LIMM
:
3428 case AARCH64_OPND_AIMM
:
3429 case AARCH64_OPND_HALF
:
3430 case AARCH64_OPND_SVE_INV_LIMM
:
3431 case AARCH64_OPND_SVE_LIMM
:
3432 case AARCH64_OPND_SVE_LIMM_MOV
:
3433 if (opnd
->shifter
.amount
)
3434 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3435 opnd
->shifter
.amount
);
3437 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3440 case AARCH64_OPND_SIMD_IMM
:
3441 case AARCH64_OPND_SIMD_IMM_SFT
:
3442 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3443 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3444 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3446 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3447 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3448 opnd
->shifter
.amount
);
3451 case AARCH64_OPND_SVE_AIMM
:
3452 case AARCH64_OPND_SVE_ASIMM
:
3453 if (opnd
->shifter
.amount
)
3454 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3455 opnd
->shifter
.amount
);
3457 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3460 case AARCH64_OPND_FPIMM
:
3461 case AARCH64_OPND_SIMD_FPIMM
:
3462 case AARCH64_OPND_SVE_FPIMM8
:
3463 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3465 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3468 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3469 snprintf (buf
, size
, "#%.18e", c
.f
);
3472 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3475 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3476 snprintf (buf
, size
, "#%.18e", c
.f
);
3479 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3482 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3483 snprintf (buf
, size
, "#%.18e", c
.d
);
3486 default: assert (0);
3490 case AARCH64_OPND_CCMP_IMM
:
3491 case AARCH64_OPND_NZCV
:
3492 case AARCH64_OPND_EXCEPTION
:
3493 case AARCH64_OPND_UIMM4
:
3494 case AARCH64_OPND_UIMM4_ADDG
:
3495 case AARCH64_OPND_UIMM7
:
3496 case AARCH64_OPND_UIMM10
:
3497 if (optional_operand_p (opcode
, idx
) == TRUE
3498 && (opnd
->imm
.value
==
3499 (int64_t) get_optional_operand_default_value (opcode
)))
3500 /* Omit the operand, e.g. DCPS1. */
3502 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3505 case AARCH64_OPND_COND
:
3506 case AARCH64_OPND_COND1
:
3507 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3508 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3509 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3511 size_t len
= strlen (buf
);
3513 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3514 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3516 snprintf (buf
+ len
, size
- len
, ", %s",
3517 opnd
->cond
->names
[i
]);
3521 case AARCH64_OPND_ADDR_ADRP
:
3522 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3528 /* This is not necessary during the disassembling, as print_address_func
3529 in the disassemble_info will take care of the printing. But some
3530 other callers may be still interested in getting the string in *STR,
3531 so here we do snprintf regardless. */
3532 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3535 case AARCH64_OPND_ADDR_PCREL14
:
3536 case AARCH64_OPND_ADDR_PCREL19
:
3537 case AARCH64_OPND_ADDR_PCREL21
:
3538 case AARCH64_OPND_ADDR_PCREL26
:
3539 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3544 /* This is not necessary during the disassembling, as print_address_func
3545 in the disassemble_info will take care of the printing. But some
3546 other callers may be still interested in getting the string in *STR,
3547 so here we do snprintf regardless. */
3548 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3551 case AARCH64_OPND_ADDR_SIMPLE
:
3552 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3553 case AARCH64_OPND_SIMD_ADDR_POST
:
3554 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3555 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3557 if (opnd
->addr
.offset
.is_reg
)
3558 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3560 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3563 snprintf (buf
, size
, "[%s]", name
);
3566 case AARCH64_OPND_ADDR_REGOFF
:
3567 case AARCH64_OPND_SVE_ADDR_R
:
3568 case AARCH64_OPND_SVE_ADDR_RR
:
3569 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3570 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3571 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3572 case AARCH64_OPND_SVE_ADDR_RX
:
3573 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3574 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3575 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3576 print_register_offset_address
3577 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3578 get_offset_int_reg_name (opnd
));
3581 case AARCH64_OPND_SVE_ADDR_RZ
:
3582 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3583 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3584 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3585 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3586 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3587 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3588 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3589 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3590 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3591 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3592 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3593 print_register_offset_address
3594 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3595 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3598 case AARCH64_OPND_ADDR_SIMM7
:
3599 case AARCH64_OPND_ADDR_SIMM9
:
3600 case AARCH64_OPND_ADDR_SIMM9_2
:
3601 case AARCH64_OPND_ADDR_SIMM10
:
3602 case AARCH64_OPND_ADDR_SIMM11
:
3603 case AARCH64_OPND_ADDR_SIMM13
:
3604 case AARCH64_OPND_ADDR_OFFSET
:
3605 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3606 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3607 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3608 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3609 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3610 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3611 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3612 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3613 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3614 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3615 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3616 print_immediate_offset_address
3617 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3620 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3621 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3622 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3623 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3624 print_immediate_offset_address
3626 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3629 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3630 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3631 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3632 print_register_offset_address
3634 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3635 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3638 case AARCH64_OPND_ADDR_UIMM12
:
3639 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3640 if (opnd
->addr
.offset
.imm
)
3641 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3643 snprintf (buf
, size
, "[%s]", name
);
3646 case AARCH64_OPND_SYSREG
:
3647 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3649 bfd_boolean exact_match
3650 = (aarch64_sys_regs
[i
].flags
& opnd
->sysreg
.flags
)
3651 == opnd
->sysreg
.flags
;
3653 /* Try and find an exact match, But if that fails, return the first
3654 partial match that was found. */
3655 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
3656 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
])
3657 && (name
== NULL
|| exact_match
))
3659 name
= aarch64_sys_regs
[i
].name
;
3667 /* If we didn't match exactly, that means the presense of a flag
3668 indicates what we didn't want for this instruction. e.g. If
3669 F_REG_READ is there, that means we were looking for a write
3670 register. See aarch64_ext_sysreg. */
3671 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
3672 *notes
= _("reading from a write-only register");
3673 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
3674 *notes
= _("writing to a read-only register");
3679 snprintf (buf
, size
, "%s", name
);
3682 /* Implementation defined system register. */
3683 unsigned int value
= opnd
->sysreg
.value
;
3684 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3685 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3690 case AARCH64_OPND_PSTATEFIELD
:
3691 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3692 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3694 assert (aarch64_pstatefields
[i
].name
);
3695 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3698 case AARCH64_OPND_SYSREG_AT
:
3699 case AARCH64_OPND_SYSREG_DC
:
3700 case AARCH64_OPND_SYSREG_IC
:
3701 case AARCH64_OPND_SYSREG_TLBI
:
3702 case AARCH64_OPND_SYSREG_SR
:
3703 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3706 case AARCH64_OPND_BARRIER
:
3707 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3710 case AARCH64_OPND_BARRIER_ISB
:
3711 /* Operand can be omitted, e.g. in DCPS1. */
3712 if (! optional_operand_p (opcode
, idx
)
3713 || (opnd
->barrier
->value
3714 != get_optional_operand_default_value (opcode
)))
3715 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3718 case AARCH64_OPND_PRFOP
:
3719 if (opnd
->prfop
->name
!= NULL
)
3720 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3722 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3725 case AARCH64_OPND_BARRIER_PSB
:
3726 case AARCH64_OPND_BTI_TARGET
:
3727 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
3728 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3736 #define CPENC(op0,op1,crn,crm,op2) \
3737 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3738 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3739 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3740 /* for 3.9.10 System Instructions */
3741 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3760 /* TODO there is one more issues need to be resolved
3761 1. handle cpu-implementation-defined system registers. */
3762 const aarch64_sys_reg aarch64_sys_regs
[] =
3764 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3765 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3766 { "elr_el1", CPEN_(0,C0
,1), 0 },
3767 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3768 { "sp_el0", CPEN_(0,C1
,0), 0 },
3769 { "spsel", CPEN_(0,C2
,0), 0 },
3770 { "daif", CPEN_(3,C2
,1), 0 },
3771 { "currentel", CPEN_(0,C2
,2), F_REG_READ
}, /* RO */
3772 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3773 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3774 { "nzcv", CPEN_(3,C2
,0), 0 },
3775 { "ssbs", CPEN_(3,C2
,6), F_ARCHEXT
},
3776 { "fpcr", CPEN_(3,C4
,0), 0 },
3777 { "fpsr", CPEN_(3,C4
,1), 0 },
3778 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3779 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3780 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3781 { "elr_el2", CPEN_(4,C0
,1), 0 },
3782 { "sp_el1", CPEN_(4,C1
,0), 0 },
3783 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3784 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3785 { "spsr_und", CPEN_(4,C3
,2), 0 },
3786 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3787 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3788 { "elr_el3", CPEN_(6,C0
,1), 0 },
3789 { "sp_el2", CPEN_(6,C1
,0), 0 },
3790 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3791 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3792 { "midr_el1", CPENC(3,0,C0
,C0
,0), F_REG_READ
}, /* RO */
3793 { "ctr_el0", CPENC(3,3,C0
,C0
,1), F_REG_READ
}, /* RO */
3794 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), F_REG_READ
}, /* RO */
3795 { "revidr_el1", CPENC(3,0,C0
,C0
,6), F_REG_READ
}, /* RO */
3796 { "aidr_el1", CPENC(3,1,C0
,C0
,7), F_REG_READ
}, /* RO */
3797 { "dczid_el0", CPENC(3,3,C0
,C0
,7), F_REG_READ
}, /* RO */
3798 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), F_REG_READ
}, /* RO */
3799 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), F_REG_READ
}, /* RO */
3800 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), F_REG_READ
}, /* RO */
3801 { "id_pfr2_el1", CPENC(3,0,C0
,C3
,4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3802 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), F_REG_READ
}, /* RO */
3803 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), F_REG_READ
}, /* RO */
3804 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), F_REG_READ
}, /* RO */
3805 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), F_REG_READ
}, /* RO */
3806 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), F_REG_READ
}, /* RO */
3807 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), F_REG_READ
}, /* RO */
3808 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), F_REG_READ
}, /* RO */
3809 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), F_REG_READ
}, /* RO */
3810 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), F_REG_READ
}, /* RO */
3811 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), F_REG_READ
}, /* RO */
3812 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), F_REG_READ
}, /* RO */
3813 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), F_REG_READ
}, /* RO */
3814 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), F_REG_READ
}, /* RO */
3815 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), F_REG_READ
}, /* RO */
3816 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), F_REG_READ
}, /* RO */
3817 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), F_REG_READ
}, /* RO */
3818 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), F_REG_READ
}, /* RO */
3819 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), F_REG_READ
}, /* RO */
3820 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), F_REG_READ
}, /* RO */
3821 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), F_REG_READ
}, /* RO */
3822 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), F_REG_READ
}, /* RO */
3823 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), F_REG_READ
}, /* RO */
3824 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), F_REG_READ
}, /* RO */
3825 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), F_REG_READ
}, /* RO */
3826 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
| F_REG_READ
}, /* RO */
3827 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), F_REG_READ
}, /* RO */
3828 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), F_REG_READ
}, /* RO */
3829 { "id_aa64zfr0_el1", CPENC (3, 0, C0
, C4
, 4), F_ARCHEXT
| F_REG_READ
}, /* RO */
3830 { "clidr_el1", CPENC(3,1,C0
,C0
,1), F_REG_READ
}, /* RO */
3831 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 },
3832 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3833 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3834 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3835 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3836 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3837 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3838 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3839 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3840 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3841 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3842 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3843 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3844 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3845 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3846 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3847 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3848 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3849 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3850 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3851 { "zcr_el1", CPENC (3, 0, C1
, C2
, 0), F_ARCHEXT
},
3852 { "zcr_el12", CPENC (3, 5, C1
, C2
, 0), F_ARCHEXT
},
3853 { "zcr_el2", CPENC (3, 4, C1
, C2
, 0), F_ARCHEXT
},
3854 { "zcr_el3", CPENC (3, 6, C1
, C2
, 0), F_ARCHEXT
},
3855 { "zidr_el1", CPENC (3, 0, C0
, C0
, 7), F_ARCHEXT
},
3856 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3857 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3858 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3859 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3860 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3861 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3862 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3863 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3864 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3865 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3866 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3867 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3868 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3869 { "apiakeylo_el1", CPENC (3, 0, C2
, C1
, 0), F_ARCHEXT
},
3870 { "apiakeyhi_el1", CPENC (3, 0, C2
, C1
, 1), F_ARCHEXT
},
3871 { "apibkeylo_el1", CPENC (3, 0, C2
, C1
, 2), F_ARCHEXT
},
3872 { "apibkeyhi_el1", CPENC (3, 0, C2
, C1
, 3), F_ARCHEXT
},
3873 { "apdakeylo_el1", CPENC (3, 0, C2
, C2
, 0), F_ARCHEXT
},
3874 { "apdakeyhi_el1", CPENC (3, 0, C2
, C2
, 1), F_ARCHEXT
},
3875 { "apdbkeylo_el1", CPENC (3, 0, C2
, C2
, 2), F_ARCHEXT
},
3876 { "apdbkeyhi_el1", CPENC (3, 0, C2
, C2
, 3), F_ARCHEXT
},
3877 { "apgakeylo_el1", CPENC (3, 0, C2
, C3
, 0), F_ARCHEXT
},
3878 { "apgakeyhi_el1", CPENC (3, 0, C2
, C3
, 1), F_ARCHEXT
},
3879 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3880 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3881 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3882 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3883 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3884 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3885 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3886 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3887 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3888 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3889 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3890 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3891 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
},
3892 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3893 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3894 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3895 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3896 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3897 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3898 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3899 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3900 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3901 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3902 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3903 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3904 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3905 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3906 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3907 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3908 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3909 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3910 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3911 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3912 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3913 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3914 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3915 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3916 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3917 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3918 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3919 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), F_REG_READ
}, /* RO */
3920 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), F_REG_READ
}, /* RO */
3921 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), F_REG_READ
}, /* RO */
3922 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3923 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3924 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3925 { "isr_el1", CPENC(3,0,C12
,C1
,0), F_REG_READ
}, /* RO */
3926 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3927 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3928 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3929 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3930 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3931 { "rndr", CPENC(3,3,C2
,C4
,0), F_ARCHEXT
| F_REG_READ
}, /* RO */
3932 { "rndrrs", CPENC(3,3,C2
,C4
,1), F_ARCHEXT
| F_REG_READ
}, /* RO */
3933 { "tco", CPENC(3,3,C4
,C2
,7), F_ARCHEXT
},
3934 { "tfsre0_el1", CPENC(3,0,C6
,C6
,1), F_ARCHEXT
},
3935 { "tfsr_el1", CPENC(3,0,C6
,C5
,0), F_ARCHEXT
},
3936 { "tfsr_el2", CPENC(3,4,C6
,C5
,0), F_ARCHEXT
},
3937 { "tfsr_el3", CPENC(3,6,C6
,C6
,0), F_ARCHEXT
},
3938 { "tfsr_el12", CPENC(3,5,C6
,C6
,0), F_ARCHEXT
},
3939 { "rgsr_el1", CPENC(3,0,C1
,C0
,5), F_ARCHEXT
},
3940 { "gcr_el1", CPENC(3,0,C1
,C0
,6), F_ARCHEXT
},
3941 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3942 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RW */
3943 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3944 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3945 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3946 { "scxtnum_el0", CPENC(3,3,C13
,C0
,7), F_ARCHEXT
},
3947 { "scxtnum_el1", CPENC(3,0,C13
,C0
,7), F_ARCHEXT
},
3948 { "scxtnum_el2", CPENC(3,4,C13
,C0
,7), F_ARCHEXT
},
3949 { "scxtnum_el12", CPENC(3,5,C13
,C0
,7), F_ARCHEXT
},
3950 { "scxtnum_el3", CPENC(3,6,C13
,C0
,7), F_ARCHEXT
},
3951 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3952 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RW */
3953 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), F_REG_READ
}, /* RO */
3954 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), F_REG_READ
}, /* RO */
3955 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3956 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3957 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3958 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3959 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3960 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3961 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3962 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3963 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3964 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3965 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3966 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3967 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3968 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3969 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3970 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3971 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3972 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3973 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3974 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3975 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3976 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3977 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3978 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3979 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3980 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3981 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3982 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3983 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3984 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3985 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), F_REG_READ
}, /* r */
3986 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3987 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3988 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_READ
}, /* r */
3989 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), F_REG_WRITE
}, /* w */
3990 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 },
3991 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 },
3992 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3993 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3994 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3995 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3996 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3997 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3998 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3999 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
4000 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
4001 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
4002 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
4003 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
4004 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
4005 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
4006 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
4007 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
4008 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
4009 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
4010 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
4011 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
4012 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
4013 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
4014 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
4015 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
4016 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
4017 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
4018 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
4019 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
4020 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
4021 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
4022 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
4023 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
4024 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
4025 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
4026 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
4027 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
4028 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
4029 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
4030 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
4031 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
4032 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
4033 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
4034 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
4035 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
4036 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
4037 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
4038 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
4039 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
4040 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
4041 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
4042 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
4043 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
4044 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
4045 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
4046 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
4047 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
4048 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
4049 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
4050 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
4051 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
4052 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
4053 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
4054 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
4055 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
4056 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
4057 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
4058 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), F_REG_READ
}, /* r */
4059 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), F_REG_WRITE
}, /* w */
4060 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), F_REG_READ
}, /* r */
4061 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
4062 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
4063 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
4064 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
4065 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), F_REG_READ
}, /* r */
4066 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
4067 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
4068 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
4069 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
| F_REG_READ
}, /* ro */
4070 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4071 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
4072 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
4073 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
4074 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
4075 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
4076 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* rw */
4077 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4078 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
4079 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
4080 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
4081 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
4082 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
4083 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), F_REG_WRITE
}, /* w */
4084 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
4085 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), F_REG_READ
}, /* r */
4086 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), F_REG_READ
}, /* r */
4087 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
4088 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
4089 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
4090 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
4091 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
4092 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
4093 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
4094 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
4095 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
4096 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
4097 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
4098 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
4099 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
4100 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
4101 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
4102 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
4103 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
4104 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
4105 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
4106 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
4107 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
4108 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
4109 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
4110 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
4111 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
4112 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
4113 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
4114 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
4115 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
4116 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
4117 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
4118 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
4119 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
4120 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
4121 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
4122 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
4123 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
4124 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
4125 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
4126 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
4127 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
4128 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
4129 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
4130 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
4131 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
4132 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
4133 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
4134 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
4135 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
4136 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
4137 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
4138 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
4139 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
4140 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
4141 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
4142 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
4143 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
4144 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
4145 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
4146 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
4147 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
4148 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
4149 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
4150 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
4151 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
4152 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
4153 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
4154 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
4155 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
4156 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
4158 { "dit", CPEN_ (3, C2
, 5), F_ARCHEXT
},
4159 { "vstcr_el2", CPENC(3, 4, C2
, C6
, 2), F_ARCHEXT
},
4160 { "vsttbr_el2", CPENC(3, 4, C2
, C6
, 0), F_ARCHEXT
},
4161 { "cnthvs_tval_el2", CPENC(3, 4, C14
, C4
, 0), F_ARCHEXT
},
4162 { "cnthvs_cval_el2", CPENC(3, 4, C14
, C4
, 2), F_ARCHEXT
},
4163 { "cnthvs_ctl_el2", CPENC(3, 4, C14
, C4
, 1), F_ARCHEXT
},
4164 { "cnthps_tval_el2", CPENC(3, 4, C14
, C5
, 0), F_ARCHEXT
},
4165 { "cnthps_cval_el2", CPENC(3, 4, C14
, C5
, 2), F_ARCHEXT
},
4166 { "cnthps_ctl_el2", CPENC(3, 4, C14
, C5
, 1), F_ARCHEXT
},
4167 { "sder32_el2", CPENC(3, 4, C1
, C3
, 1), F_ARCHEXT
},
4168 { "vncr_el2", CPENC(3, 4, C2
, C2
, 0), F_ARCHEXT
},
4169 { 0, CPENC(0,0,0,0,0), 0 },
4173 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
4175 return (reg
->flags
& F_DEPRECATED
) != 0;
4179 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
4180 const aarch64_sys_reg
*reg
)
4182 if (!(reg
->flags
& F_ARCHEXT
))
4185 /* PAN. Values are from aarch64_sys_regs. */
4186 if (reg
->value
== CPEN_(0,C2
,3)
4187 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4190 /* SCXTNUM_ELx registers. */
4191 if ((reg
->value
== CPENC (3, 3, C13
, C0
, 7)
4192 || reg
->value
== CPENC (3, 0, C13
, C0
, 7)
4193 || reg
->value
== CPENC (3, 4, C13
, C0
, 7)
4194 || reg
->value
== CPENC (3, 6, C13
, C0
, 7)
4195 || reg
->value
== CPENC (3, 5, C13
, C0
, 7))
4196 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SCXTNUM
))
4199 /* ID_PFR2_EL1 register. */
4200 if (reg
->value
== CPENC(3, 0, C0
, C3
, 4)
4201 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_ID_PFR2
))
4204 /* SSBS. Values are from aarch64_sys_regs. */
4205 if (reg
->value
== CPEN_(3,C2
,6)
4206 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4209 /* Virtualization host extensions: system registers. */
4210 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
4211 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
4212 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
4213 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
4214 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
4215 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4218 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4219 if ((reg
->value
== CPEN_ (5, C0
, 0)
4220 || reg
->value
== CPEN_ (5, C0
, 1)
4221 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
4222 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
4223 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
4224 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
4225 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
4226 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
4227 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
4228 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
4229 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
4230 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
4231 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
4232 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
4233 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
4234 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
4235 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4238 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4239 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
4240 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
4241 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
4242 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
4243 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
4244 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
4245 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
4248 /* ARMv8.2 features. */
4250 /* ID_AA64MMFR2_EL1. */
4251 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
4252 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4256 if (reg
->value
== CPEN_ (0, C2
, 4)
4257 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4260 /* RAS extension. */
4262 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4263 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4264 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
4265 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
4266 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
4267 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
4268 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
4269 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
4270 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
4271 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
4272 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
4273 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
4274 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4277 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4278 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
4279 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
4280 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
4281 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
4284 /* Statistical Profiling extension. */
4285 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
4286 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
4287 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
4288 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
4289 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
4290 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
4291 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
4292 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
4293 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
4294 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
4295 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
4296 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
4297 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
4298 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
4301 /* ARMv8.3 Pointer authentication keys. */
4302 if ((reg
->value
== CPENC (3, 0, C2
, C1
, 0)
4303 || reg
->value
== CPENC (3, 0, C2
, C1
, 1)
4304 || reg
->value
== CPENC (3, 0, C2
, C1
, 2)
4305 || reg
->value
== CPENC (3, 0, C2
, C1
, 3)
4306 || reg
->value
== CPENC (3, 0, C2
, C2
, 0)
4307 || reg
->value
== CPENC (3, 0, C2
, C2
, 1)
4308 || reg
->value
== CPENC (3, 0, C2
, C2
, 2)
4309 || reg
->value
== CPENC (3, 0, C2
, C2
, 3)
4310 || reg
->value
== CPENC (3, 0, C2
, C3
, 0)
4311 || reg
->value
== CPENC (3, 0, C2
, C3
, 1))
4312 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_3
))
4316 if ((reg
->value
== CPENC (3, 0, C0
, C4
, 4)
4317 || reg
->value
== CPENC (3, 0, C1
, C2
, 0)
4318 || reg
->value
== CPENC (3, 4, C1
, C2
, 0)
4319 || reg
->value
== CPENC (3, 6, C1
, C2
, 0)
4320 || reg
->value
== CPENC (3, 5, C1
, C2
, 0)
4321 || reg
->value
== CPENC (3, 0, C0
, C0
, 7))
4322 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SVE
))
4325 /* ARMv8.4 features. */
4328 if (reg
->value
== CPEN_ (3, C2
, 5)
4329 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4332 /* Virtualization extensions. */
4333 if ((reg
->value
== CPENC(3, 4, C2
, C6
, 2)
4334 || reg
->value
== CPENC(3, 4, C2
, C6
, 0)
4335 || reg
->value
== CPENC(3, 4, C14
, C4
, 0)
4336 || reg
->value
== CPENC(3, 4, C14
, C4
, 2)
4337 || reg
->value
== CPENC(3, 4, C14
, C4
, 1)
4338 || reg
->value
== CPENC(3, 4, C14
, C5
, 0)
4339 || reg
->value
== CPENC(3, 4, C14
, C5
, 2)
4340 || reg
->value
== CPENC(3, 4, C14
, C5
, 1)
4341 || reg
->value
== CPENC(3, 4, C1
, C3
, 1)
4342 || reg
->value
== CPENC(3, 4, C2
, C2
, 0))
4343 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4346 /* ARMv8.4 TLB instructions. */
4347 if ((reg
->value
== CPENS (0, C8
, C1
, 0)
4348 || reg
->value
== CPENS (0, C8
, C1
, 1)
4349 || reg
->value
== CPENS (0, C8
, C1
, 2)
4350 || reg
->value
== CPENS (0, C8
, C1
, 3)
4351 || reg
->value
== CPENS (0, C8
, C1
, 5)
4352 || reg
->value
== CPENS (0, C8
, C1
, 7)
4353 || reg
->value
== CPENS (4, C8
, C4
, 0)
4354 || reg
->value
== CPENS (4, C8
, C4
, 4)
4355 || reg
->value
== CPENS (4, C8
, C1
, 1)
4356 || reg
->value
== CPENS (4, C8
, C1
, 5)
4357 || reg
->value
== CPENS (4, C8
, C1
, 6)
4358 || reg
->value
== CPENS (6, C8
, C1
, 1)
4359 || reg
->value
== CPENS (6, C8
, C1
, 5)
4360 || reg
->value
== CPENS (4, C8
, C1
, 0)
4361 || reg
->value
== CPENS (4, C8
, C1
, 4)
4362 || reg
->value
== CPENS (6, C8
, C1
, 0)
4363 || reg
->value
== CPENS (0, C8
, C6
, 1)
4364 || reg
->value
== CPENS (0, C8
, C6
, 3)
4365 || reg
->value
== CPENS (0, C8
, C6
, 5)
4366 || reg
->value
== CPENS (0, C8
, C6
, 7)
4367 || reg
->value
== CPENS (0, C8
, C2
, 1)
4368 || reg
->value
== CPENS (0, C8
, C2
, 3)
4369 || reg
->value
== CPENS (0, C8
, C2
, 5)
4370 || reg
->value
== CPENS (0, C8
, C2
, 7)
4371 || reg
->value
== CPENS (0, C8
, C5
, 1)
4372 || reg
->value
== CPENS (0, C8
, C5
, 3)
4373 || reg
->value
== CPENS (0, C8
, C5
, 5)
4374 || reg
->value
== CPENS (0, C8
, C5
, 7)
4375 || reg
->value
== CPENS (4, C8
, C0
, 2)
4376 || reg
->value
== CPENS (4, C8
, C0
, 6)
4377 || reg
->value
== CPENS (4, C8
, C4
, 2)
4378 || reg
->value
== CPENS (4, C8
, C4
, 6)
4379 || reg
->value
== CPENS (4, C8
, C4
, 3)
4380 || reg
->value
== CPENS (4, C8
, C4
, 7)
4381 || reg
->value
== CPENS (4, C8
, C6
, 1)
4382 || reg
->value
== CPENS (4, C8
, C6
, 5)
4383 || reg
->value
== CPENS (4, C8
, C2
, 1)
4384 || reg
->value
== CPENS (4, C8
, C2
, 5)
4385 || reg
->value
== CPENS (4, C8
, C5
, 1)
4386 || reg
->value
== CPENS (4, C8
, C5
, 5)
4387 || reg
->value
== CPENS (6, C8
, C6
, 1)
4388 || reg
->value
== CPENS (6, C8
, C6
, 5)
4389 || reg
->value
== CPENS (6, C8
, C2
, 1)
4390 || reg
->value
== CPENS (6, C8
, C2
, 5)
4391 || reg
->value
== CPENS (6, C8
, C5
, 1)
4392 || reg
->value
== CPENS (6, C8
, C5
, 5))
4393 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4396 /* Random Number Instructions. For now they are available
4397 (and optional) only with ARMv8.5-A. */
4398 if ((reg
->value
== CPENC (3, 3, C2
, C4
, 0)
4399 || reg
->value
== CPENC (3, 3, C2
, C4
, 1))
4400 && !(AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RNG
)
4401 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_5
)))
4404 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4405 if ((reg
->value
== CPENC (3, 3, C4
, C2
, 7)
4406 || reg
->value
== CPENC (3, 0, C6
, C6
, 1)
4407 || reg
->value
== CPENC (3, 0, C6
, C5
, 0)
4408 || reg
->value
== CPENC (3, 4, C6
, C5
, 0)
4409 || reg
->value
== CPENC (3, 6, C6
, C6
, 0)
4410 || reg
->value
== CPENC (3, 5, C6
, C6
, 0)
4411 || reg
->value
== CPENC (3, 0, C1
, C0
, 5)
4412 || reg
->value
== CPENC (3, 0, C1
, C0
, 6))
4413 && !(AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
)))
4419 /* The CPENC below is fairly misleading, the fields
4420 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4421 by ins_pstatefield, which just shifts the value by the width of the fields
4422 in a loop. So if you CPENC them only the first value will be set, the rest
4423 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4424 value of 0b110000000001000000 (0x30040) while what you want is
4426 const aarch64_sys_reg aarch64_pstatefields
[] =
4428 { "spsel", 0x05, 0 },
4429 { "daifset", 0x1e, 0 },
4430 { "daifclr", 0x1f, 0 },
4431 { "pan", 0x04, F_ARCHEXT
},
4432 { "uao", 0x03, F_ARCHEXT
},
4433 { "ssbs", 0x19, F_ARCHEXT
},
4434 { "dit", 0x1a, F_ARCHEXT
},
4435 { "tco", 0x1c, F_ARCHEXT
},
4436 { 0, CPENC(0,0,0,0,0), 0 },
4440 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4441 const aarch64_sys_reg
*reg
)
4443 if (!(reg
->flags
& F_ARCHEXT
))
4446 /* PAN. Values are from aarch64_pstatefields. */
4447 if (reg
->value
== 0x04
4448 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
4451 /* UAO. Values are from aarch64_pstatefields. */
4452 if (reg
->value
== 0x03
4453 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4456 /* SSBS. Values are from aarch64_pstatefields. */
4457 if (reg
->value
== 0x19
4458 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_SSBS
))
4461 /* DIT. Values are from aarch64_pstatefields. */
4462 if (reg
->value
== 0x1a
4463 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4466 /* TCO. Values are from aarch64_pstatefields. */
4467 if (reg
->value
== 0x1c
4468 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
))
4474 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4476 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4477 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4478 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4479 { 0, CPENS(0,0,0,0), 0 }
4482 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4484 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4485 { "gva", CPENS (3, C7
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4486 { "gzva", CPENS (3, C7
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4487 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4488 { "igvac", CPENS (0, C7
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4489 { "igsw", CPENS (0, C7
, C6
, 4), F_HASXT
| F_ARCHEXT
},
4490 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4491 { "igdvac", CPENS (0, C7
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4492 { "igdsw", CPENS (0, C7
, C6
, 6), F_HASXT
| F_ARCHEXT
},
4493 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4494 { "cgvac", CPENS (3, C7
, C10
, 3), F_HASXT
| F_ARCHEXT
},
4495 { "cgdvac", CPENS (3, C7
, C10
, 5), F_HASXT
| F_ARCHEXT
},
4496 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4497 { "cgsw", CPENS (0, C7
, C10
, 4), F_HASXT
| F_ARCHEXT
},
4498 { "cgdsw", CPENS (0, C7
, C10
, 6), F_HASXT
| F_ARCHEXT
},
4499 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4500 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4501 { "cgvap", CPENS (3, C7
, C12
, 3), F_HASXT
| F_ARCHEXT
},
4502 { "cgdvap", CPENS (3, C7
, C12
, 5), F_HASXT
| F_ARCHEXT
},
4503 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
},
4504 { "cgvadp", CPENS (3, C7
, C13
, 3), F_HASXT
| F_ARCHEXT
},
4505 { "cgdvadp", CPENS (3, C7
, C13
, 5), F_HASXT
| F_ARCHEXT
},
4506 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4507 { "cigvac", CPENS (3, C7
, C14
, 3), F_HASXT
| F_ARCHEXT
},
4508 { "cigdvac", CPENS (3, C7
, C14
, 5), F_HASXT
| F_ARCHEXT
},
4509 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4510 { "cigsw", CPENS (0, C7
, C14
, 4), F_HASXT
| F_ARCHEXT
},
4511 { "cigdsw", CPENS (0, C7
, C14
, 6), F_HASXT
| F_ARCHEXT
},
4512 { 0, CPENS(0,0,0,0), 0 }
4515 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4517 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4518 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4519 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4520 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4521 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4522 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4523 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4524 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4525 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4526 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4527 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4528 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4529 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4530 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4531 { 0, CPENS(0,0,0,0), 0 }
4534 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4536 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4537 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4538 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4539 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4540 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4541 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4542 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4543 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4544 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4545 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4546 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4547 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4548 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4549 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4550 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4551 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4552 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4553 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4554 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4555 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4556 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4557 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4558 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4559 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4560 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4561 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4562 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4563 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4564 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4565 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4566 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4567 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4569 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4570 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4571 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4572 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4573 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4574 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4575 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4576 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4577 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4578 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4579 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4580 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4581 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4582 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4583 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4584 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4586 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4587 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4588 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4589 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4590 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4591 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4592 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4593 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4594 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4595 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4596 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4597 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4598 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4599 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4600 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4601 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4602 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4603 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4604 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4605 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4606 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4607 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4608 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4609 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4610 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4611 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4612 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4613 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4614 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4615 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4617 { 0, CPENS(0,0,0,0), 0 }
4620 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
4622 /* RCTX is somewhat unique in a way that it has different values
4623 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4624 Thus op2 is masked out and instead encoded directly in the
4625 aarch64_opcode_table entries for the respective instructions. */
4626 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
}, /* WO */
4628 { 0, CPENS(0,0,0,0), 0 }
4632 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4634 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4638 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4639 const aarch64_sys_ins_reg
*reg
)
4641 if (!(reg
->flags
& F_ARCHEXT
))
4644 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4645 if (reg
->value
== CPENS (3, C7
, C12
, 1)
4646 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4649 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4650 if (reg
->value
== CPENS (3, C7
, C13
, 1)
4651 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_CVADP
))
4654 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4655 if ((reg
->value
== CPENS (0, C7
, C6
, 3)
4656 || reg
->value
== CPENS (0, C7
, C6
, 4)
4657 || reg
->value
== CPENS (0, C7
, C10
, 4)
4658 || reg
->value
== CPENS (0, C7
, C14
, 4)
4659 || reg
->value
== CPENS (3, C7
, C10
, 3)
4660 || reg
->value
== CPENS (3, C7
, C12
, 3)
4661 || reg
->value
== CPENS (3, C7
, C13
, 3)
4662 || reg
->value
== CPENS (3, C7
, C14
, 3)
4663 || reg
->value
== CPENS (3, C7
, C4
, 3)
4664 || reg
->value
== CPENS (0, C7
, C6
, 5)
4665 || reg
->value
== CPENS (0, C7
, C6
, 6)
4666 || reg
->value
== CPENS (0, C7
, C10
, 6)
4667 || reg
->value
== CPENS (0, C7
, C14
, 6)
4668 || reg
->value
== CPENS (3, C7
, C10
, 5)
4669 || reg
->value
== CPENS (3, C7
, C12
, 5)
4670 || reg
->value
== CPENS (3, C7
, C13
, 5)
4671 || reg
->value
== CPENS (3, C7
, C14
, 5)
4672 || reg
->value
== CPENS (3, C7
, C4
, 4))
4673 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
))
4676 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4677 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
4678 || reg
->value
== CPENS (0, C7
, C9
, 1))
4679 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4682 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4683 if (reg
->value
== CPENS (3, C7
, C3
, 0)
4684 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PREDRES
))
4707 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4708 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4710 static enum err_type
4711 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
4712 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
4713 bfd_boolean encoding ATTRIBUTE_UNUSED
,
4714 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
4715 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
4717 int t
= BITS (insn
, 4, 0);
4718 int n
= BITS (insn
, 9, 5);
4719 int t2
= BITS (insn
, 14, 10);
4723 /* Write back enabled. */
4724 if ((t
== n
|| t2
== n
) && n
!= 31)
4738 /* Verifier for vector by element 3 operands functions where the
4739 conditions `if sz:L == 11 then UNDEFINED` holds. */
4741 static enum err_type
4742 verify_elem_sd (const struct aarch64_inst
*inst
, const aarch64_insn insn
,
4743 bfd_vma pc ATTRIBUTE_UNUSED
, bfd_boolean encoding
,
4744 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
4745 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
4747 const aarch64_insn undef_pattern
= 0x3;
4750 assert (inst
->opcode
);
4751 assert (inst
->opcode
->operands
[2] == AARCH64_OPND_Em
);
4752 value
= encoding
? inst
->value
: insn
;
4755 if (undef_pattern
== extract_fields (value
, 0, 2, FLD_sz
, FLD_L
))
4761 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4762 If INST is NULL the given insn_sequence is cleared and the sequence is left
4766 init_insn_sequence (const struct aarch64_inst
*inst
,
4767 aarch64_instr_sequence
*insn_sequence
)
4769 int num_req_entries
= 0;
4770 insn_sequence
->next_insn
= 0;
4771 insn_sequence
->num_insns
= num_req_entries
;
4772 if (insn_sequence
->instr
)
4773 XDELETE (insn_sequence
->instr
);
4774 insn_sequence
->instr
= NULL
;
4778 insn_sequence
->instr
= XNEW (aarch64_inst
);
4779 memcpy (insn_sequence
->instr
, inst
, sizeof (aarch64_inst
));
4782 /* Handle all the cases here. May need to think of something smarter than
4783 a giant if/else chain if this grows. At that time, a lookup table may be
4785 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
4786 num_req_entries
= 1;
4788 if (insn_sequence
->current_insns
)
4789 XDELETEVEC (insn_sequence
->current_insns
);
4790 insn_sequence
->current_insns
= NULL
;
4792 if (num_req_entries
!= 0)
4794 size_t size
= num_req_entries
* sizeof (aarch64_inst
);
4795 insn_sequence
->current_insns
4796 = (aarch64_inst
**) XNEWVEC (aarch64_inst
, num_req_entries
);
4797 memset (insn_sequence
->current_insns
, 0, size
);
4802 /* This function verifies that the instruction INST adheres to its specified
4803 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4804 returned and MISMATCH_DETAIL contains the reason why verification failed.
4806 The function is called both during assembly and disassembly. If assembling
4807 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4808 and will contain the PC of the current instruction w.r.t to the section.
4810 If ENCODING and PC=0 then you are at a start of a section. The constraints
4811 are verified against the given state insn_sequence which is updated as it
4812 transitions through the verification. */
4815 verify_constraints (const struct aarch64_inst
*inst
,
4816 const aarch64_insn insn ATTRIBUTE_UNUSED
,
4818 bfd_boolean encoding
,
4819 aarch64_operand_error
*mismatch_detail
,
4820 aarch64_instr_sequence
*insn_sequence
)
4823 assert (inst
->opcode
);
4825 const struct aarch64_opcode
*opcode
= inst
->opcode
;
4826 if (!opcode
->constraints
&& !insn_sequence
->instr
)
4829 assert (insn_sequence
);
4831 enum err_type res
= ERR_OK
;
4833 /* This instruction puts a constraint on the insn_sequence. */
4834 if (opcode
->flags
& F_SCAN
)
4836 if (insn_sequence
->instr
)
4838 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4839 mismatch_detail
->error
= _("instruction opens new dependency "
4840 "sequence without ending previous one");
4841 mismatch_detail
->index
= -1;
4842 mismatch_detail
->non_fatal
= TRUE
;
4846 init_insn_sequence (inst
, insn_sequence
);
4850 /* Verify constraints on an existing sequence. */
4851 if (insn_sequence
->instr
)
4853 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
4854 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4855 closed a previous one that we should have. */
4856 if (!encoding
&& pc
== 0)
4858 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4859 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
4860 mismatch_detail
->index
= -1;
4861 mismatch_detail
->non_fatal
= TRUE
;
4863 /* Reset the sequence. */
4864 init_insn_sequence (NULL
, insn_sequence
);
4868 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4869 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
4871 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4872 instruction for better error messages. */
4873 if (!opcode
->avariant
4874 || !(*opcode
->avariant
&
4875 (AARCH64_FEATURE_SVE
| AARCH64_FEATURE_SVE2
)))
4877 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4878 mismatch_detail
->error
= _("SVE instruction expected after "
4880 mismatch_detail
->index
= -1;
4881 mismatch_detail
->non_fatal
= TRUE
;
4886 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4887 instruction that is allowed to be used with a MOVPRFX. */
4888 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
4890 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4891 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
4893 mismatch_detail
->index
= -1;
4894 mismatch_detail
->non_fatal
= TRUE
;
4899 /* Next check for usage of the predicate register. */
4900 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
4901 aarch64_opnd_info blk_pred
, inst_pred
;
4902 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
4903 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
4904 bfd_boolean predicated
= FALSE
;
4905 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
4907 /* Determine if the movprfx instruction used is predicated or not. */
4908 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
4911 blk_pred
= insn_sequence
->instr
->operands
[1];
4914 unsigned char max_elem_size
= 0;
4915 unsigned char current_elem_size
;
4916 int num_op_used
= 0, last_op_usage
= 0;
4917 int i
, inst_pred_idx
= -1;
4918 int num_ops
= aarch64_num_of_operands (opcode
);
4919 for (i
= 0; i
< num_ops
; i
++)
4921 aarch64_opnd_info inst_op
= inst
->operands
[i
];
4922 switch (inst_op
.type
)
4924 case AARCH64_OPND_SVE_Zd
:
4925 case AARCH64_OPND_SVE_Zm_5
:
4926 case AARCH64_OPND_SVE_Zm_16
:
4927 case AARCH64_OPND_SVE_Zn
:
4928 case AARCH64_OPND_SVE_Zt
:
4929 case AARCH64_OPND_SVE_Vm
:
4930 case AARCH64_OPND_SVE_Vn
:
4931 case AARCH64_OPND_Va
:
4932 case AARCH64_OPND_Vn
:
4933 case AARCH64_OPND_Vm
:
4934 case AARCH64_OPND_Sn
:
4935 case AARCH64_OPND_Sm
:
4936 case AARCH64_OPND_Rn
:
4937 case AARCH64_OPND_Rm
:
4938 case AARCH64_OPND_Rn_SP
:
4939 case AARCH64_OPND_Rt_SP
:
4940 case AARCH64_OPND_Rm_SP
:
4941 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
4947 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
4948 if (current_elem_size
> max_elem_size
)
4949 max_elem_size
= current_elem_size
;
4951 case AARCH64_OPND_SVE_Pd
:
4952 case AARCH64_OPND_SVE_Pg3
:
4953 case AARCH64_OPND_SVE_Pg4_5
:
4954 case AARCH64_OPND_SVE_Pg4_10
:
4955 case AARCH64_OPND_SVE_Pg4_16
:
4956 case AARCH64_OPND_SVE_Pm
:
4957 case AARCH64_OPND_SVE_Pn
:
4958 case AARCH64_OPND_SVE_Pt
:
4959 inst_pred
= inst_op
;
4967 assert (max_elem_size
!= 0);
4968 aarch64_opnd_info inst_dest
= inst
->operands
[0];
4969 /* Determine the size that should be used to compare against the
4972 = opcode
->constraints
& C_MAX_ELEM
4974 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
4976 /* If movprfx is predicated do some extra checks. */
4979 /* The instruction must be predicated. */
4980 if (inst_pred_idx
< 0)
4982 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4983 mismatch_detail
->error
= _("predicated instruction expected "
4985 mismatch_detail
->index
= -1;
4986 mismatch_detail
->non_fatal
= TRUE
;
4991 /* The instruction must have a merging predicate. */
4992 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
4994 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
4995 mismatch_detail
->error
= _("merging predicate expected due "
4996 "to preceding `movprfx'");
4997 mismatch_detail
->index
= inst_pred_idx
;
4998 mismatch_detail
->non_fatal
= TRUE
;
5003 /* The same register must be used in instruction. */
5004 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
5006 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5007 mismatch_detail
->error
= _("predicate register differs "
5008 "from that in preceding "
5010 mismatch_detail
->index
= inst_pred_idx
;
5011 mismatch_detail
->non_fatal
= TRUE
;
5017 /* Destructive operations by definition must allow one usage of the
5020 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
5022 /* Operand is not used at all. */
5023 if (num_op_used
== 0)
5025 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5026 mismatch_detail
->error
= _("output register of preceding "
5027 "`movprfx' not used in current "
5029 mismatch_detail
->index
= 0;
5030 mismatch_detail
->non_fatal
= TRUE
;
5035 /* We now know it's used, now determine exactly where it's used. */
5036 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
5038 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5039 mismatch_detail
->error
= _("output register of preceding "
5040 "`movprfx' expected as output");
5041 mismatch_detail
->index
= 0;
5042 mismatch_detail
->non_fatal
= TRUE
;
5047 /* Operand used more than allowed for the specific opcode type. */
5048 if (num_op_used
> allowed_usage
)
5050 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5051 mismatch_detail
->error
= _("output register of preceding "
5052 "`movprfx' used as input");
5053 mismatch_detail
->index
= last_op_usage
;
5054 mismatch_detail
->non_fatal
= TRUE
;
5059 /* Now the only thing left is the qualifiers checks. The register
5060 must have the same maximum element size. */
5061 if (inst_dest
.qualifier
5062 && blk_dest
.qualifier
5063 && current_elem_size
5064 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
5066 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5067 mismatch_detail
->error
= _("register size not compatible with "
5068 "previous `movprfx'");
5069 mismatch_detail
->index
= 0;
5070 mismatch_detail
->non_fatal
= TRUE
;
5077 /* Add the new instruction to the sequence. */
5078 memcpy (insn_sequence
->current_insns
+ insn_sequence
->next_insn
++,
5079 inst
, sizeof (aarch64_inst
));
5081 /* Check if sequence is now full. */
5082 if (insn_sequence
->next_insn
>= insn_sequence
->num_insns
)
5084 /* Sequence is full, but we don't have anything special to do for now,
5085 so clear and reset it. */
5086 init_insn_sequence (NULL
, insn_sequence
);
5094 /* Return true if VALUE cannot be moved into an SVE register using DUP
5095 (with any element size, not just ESIZE) and if using DUPM would
5096 therefore be OK. ESIZE is the number of bytes in the immediate. */
5099 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
5101 int64_t svalue
= uvalue
;
5102 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
5104 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
5106 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
5108 svalue
= (int32_t) uvalue
;
5109 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
5111 svalue
= (int16_t) uvalue
;
5112 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
5116 if ((svalue
& 0xff) == 0)
5118 return svalue
< -128 || svalue
>= 128;
5121 /* Include the opcode description table as well as the operand description
5123 #define VERIFIER(x) verify_##x
5124 #include "aarch64-tbl.h"