1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
283 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
284 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
285 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
288 enum aarch64_operand_class
289 aarch64_get_operand_class (enum aarch64_opnd type
)
291 return aarch64_operands
[type
].op_class
;
295 aarch64_get_operand_name (enum aarch64_opnd type
)
297 return aarch64_operands
[type
].name
;
300 /* Get operand description string.
301 This is usually for the diagnosis purpose. */
303 aarch64_get_operand_desc (enum aarch64_opnd type
)
305 return aarch64_operands
[type
].desc
;
308 /* Table of all conditional affixes. */
309 const aarch64_cond aarch64_conds
[16] =
314 {{"cc", "lo", "ul"}, 0x3},
330 get_cond_from_value (aarch64_insn value
)
333 return &aarch64_conds
[(unsigned int) value
];
337 get_inverted_cond (const aarch64_cond
*cond
)
339 return &aarch64_conds
[cond
->value
^ 0x1];
342 /* Table describing the operand extension/shifting operators; indexed by
343 enum aarch64_modifier_kind.
345 The value column provides the most common values for encoding modifiers,
346 which enables table-driven encoding/decoding for the modifiers. */
347 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
367 enum aarch64_modifier_kind
368 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
370 return desc
- aarch64_operand_modifiers
;
374 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
376 return aarch64_operand_modifiers
[kind
].value
;
379 enum aarch64_modifier_kind
380 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
381 bfd_boolean extend_p
)
383 if (extend_p
== TRUE
)
384 return AARCH64_MOD_UXTB
+ value
;
386 return AARCH64_MOD_LSL
- value
;
390 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
392 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
396 static inline bfd_boolean
397 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
399 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
403 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
423 /* Table describing the operands supported by the aliases of the HINT
426 The name column is the operand that is accepted for the alias. The value
427 column is the hint number of the alias. The list of operands is terminated
428 by NULL in the name column. */
430 const struct aarch64_name_value_pair aarch64_hint_options
[] =
432 { "csync", 0x11 }, /* PSB CSYNC. */
436 /* op -> op: load = 0 instruction = 1 store = 2
438 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
439 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
440 const struct aarch64_name_value_pair aarch64_prfops
[32] =
442 { "pldl1keep", B(0, 1, 0) },
443 { "pldl1strm", B(0, 1, 1) },
444 { "pldl2keep", B(0, 2, 0) },
445 { "pldl2strm", B(0, 2, 1) },
446 { "pldl3keep", B(0, 3, 0) },
447 { "pldl3strm", B(0, 3, 1) },
450 { "plil1keep", B(1, 1, 0) },
451 { "plil1strm", B(1, 1, 1) },
452 { "plil2keep", B(1, 2, 0) },
453 { "plil2strm", B(1, 2, 1) },
454 { "plil3keep", B(1, 3, 0) },
455 { "plil3strm", B(1, 3, 1) },
458 { "pstl1keep", B(2, 1, 0) },
459 { "pstl1strm", B(2, 1, 1) },
460 { "pstl2keep", B(2, 2, 0) },
461 { "pstl2strm", B(2, 2, 1) },
462 { "pstl3keep", B(2, 3, 0) },
463 { "pstl3strm", B(2, 3, 1) },
477 /* Utilities on value constraint. */
480 value_in_range_p (int64_t value
, int low
, int high
)
482 return (value
>= low
&& value
<= high
) ? 1 : 0;
486 value_aligned_p (int64_t value
, int align
)
488 return ((value
& (align
- 1)) == 0) ? 1 : 0;
491 /* A signed value fits in a field. */
493 value_fit_signed_field_p (int64_t value
, unsigned width
)
496 if (width
< sizeof (value
) * 8)
498 int64_t lim
= (int64_t)1 << (width
- 1);
499 if (value
>= -lim
&& value
< lim
)
505 /* An unsigned value fits in a field. */
507 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
510 if (width
< sizeof (value
) * 8)
512 int64_t lim
= (int64_t)1 << width
;
513 if (value
>= 0 && value
< lim
)
519 /* Return 1 if OPERAND is SP or WSP. */
521 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
523 return ((aarch64_get_operand_class (operand
->type
)
524 == AARCH64_OPND_CLASS_INT_REG
)
525 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
526 && operand
->reg
.regno
== 31);
529 /* Return 1 if OPERAND is XZR or WZP. */
531 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
533 return ((aarch64_get_operand_class (operand
->type
)
534 == AARCH64_OPND_CLASS_INT_REG
)
535 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
536 && operand
->reg
.regno
== 31);
539 /* Return true if the operand *OPERAND that has the operand code
540 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
541 qualified by the qualifier TARGET. */
544 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
545 aarch64_opnd_qualifier_t target
)
547 switch (operand
->qualifier
)
549 case AARCH64_OPND_QLF_W
:
550 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
553 case AARCH64_OPND_QLF_X
:
554 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
557 case AARCH64_OPND_QLF_WSP
:
558 if (target
== AARCH64_OPND_QLF_W
559 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
562 case AARCH64_OPND_QLF_SP
:
563 if (target
== AARCH64_OPND_QLF_X
564 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
574 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
575 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
577 Return NIL if more than one expected qualifiers are found. */
579 aarch64_opnd_qualifier_t
580 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
582 const aarch64_opnd_qualifier_t known_qlf
,
589 When the known qualifier is NIL, we have to assume that there is only
590 one qualifier sequence in the *QSEQ_LIST and return the corresponding
591 qualifier directly. One scenario is that for instruction
592 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
593 which has only one possible valid qualifier sequence
595 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
596 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
598 Because the qualifier NIL has dual roles in the qualifier sequence:
599 it can mean no qualifier for the operand, or the qualifer sequence is
600 not in use (when all qualifiers in the sequence are NILs), we have to
601 handle this special case here. */
602 if (known_qlf
== AARCH64_OPND_NIL
)
604 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
605 return qseq_list
[0][idx
];
608 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
610 if (qseq_list
[i
][known_idx
] == known_qlf
)
613 /* More than one sequences are found to have KNOWN_QLF at
615 return AARCH64_OPND_NIL
;
620 return qseq_list
[saved_i
][idx
];
623 enum operand_qualifier_kind
631 /* Operand qualifier description. */
632 struct operand_qualifier_data
634 /* The usage of the three data fields depends on the qualifier kind. */
641 enum operand_qualifier_kind kind
;
644 /* Indexed by the operand qualifier enumerators. */
645 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
647 {0, 0, 0, "NIL", OQK_NIL
},
649 /* Operand variant qualifiers.
651 element size, number of elements and common value for encoding. */
653 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
654 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
655 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
656 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
658 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
659 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
660 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
661 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
662 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
664 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
665 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
666 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
667 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
668 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
669 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
670 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
671 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
672 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
673 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
675 {0, 0, 0, "z", OQK_OPD_VARIANT
},
676 {0, 0, 0, "m", OQK_OPD_VARIANT
},
678 /* Qualifiers constraining the value range.
680 Lower bound, higher bound, unused. */
682 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
683 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
684 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
685 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
686 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
687 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
689 /* Qualifiers for miscellaneous purpose.
691 unused, unused and unused. */
696 {0, 0, 0, "retrieving", 0},
699 static inline bfd_boolean
700 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
702 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
706 static inline bfd_boolean
707 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
709 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
714 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
716 return aarch64_opnd_qualifiers
[qualifier
].desc
;
719 /* Given an operand qualifier, return the expected data element size
720 of a qualified operand. */
722 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
724 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
725 return aarch64_opnd_qualifiers
[qualifier
].data0
;
729 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
731 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
732 return aarch64_opnd_qualifiers
[qualifier
].data1
;
736 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
738 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
739 return aarch64_opnd_qualifiers
[qualifier
].data2
;
743 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
745 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
746 return aarch64_opnd_qualifiers
[qualifier
].data0
;
750 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
752 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
753 return aarch64_opnd_qualifiers
[qualifier
].data1
;
758 aarch64_verbose (const char *str
, ...)
769 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
773 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
774 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
779 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
780 const aarch64_opnd_qualifier_t
*qualifier
)
783 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
785 aarch64_verbose ("dump_match_qualifiers:");
786 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
787 curr
[i
] = opnd
[i
].qualifier
;
788 dump_qualifier_sequence (curr
);
789 aarch64_verbose ("against");
790 dump_qualifier_sequence (qualifier
);
792 #endif /* DEBUG_AARCH64 */
794 /* TODO improve this, we can have an extra field at the runtime to
795 store the number of operands rather than calculating it every time. */
798 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
801 const enum aarch64_opnd
*opnds
= opcode
->operands
;
802 while (opnds
[i
++] != AARCH64_OPND_NIL
)
805 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
809 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
810 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
812 N.B. on the entry, it is very likely that only some operands in *INST
813 have had their qualifiers been established.
815 If STOP_AT is not -1, the function will only try to match
816 the qualifier sequence for operands before and including the operand
817 of index STOP_AT; and on success *RET will only be filled with the first
818 (STOP_AT+1) qualifiers.
820 A couple examples of the matching algorithm:
828 Apart from serving the main encoding routine, this can also be called
829 during or after the operand decoding. */
832 aarch64_find_best_match (const aarch64_inst
*inst
,
833 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
834 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
838 const aarch64_opnd_qualifier_t
*qualifiers
;
840 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
843 DEBUG_TRACE ("SUCCEED: no operand");
847 if (stop_at
< 0 || stop_at
>= num_opnds
)
848 stop_at
= num_opnds
- 1;
850 /* For each pattern. */
851 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
854 qualifiers
= *qualifiers_list
;
856 /* Start as positive. */
859 DEBUG_TRACE ("%d", i
);
862 dump_match_qualifiers (inst
->operands
, qualifiers
);
865 /* Most opcodes has much fewer patterns in the list.
866 First NIL qualifier indicates the end in the list. */
867 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
869 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
875 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
877 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
879 /* Either the operand does not have qualifier, or the qualifier
880 for the operand needs to be deduced from the qualifier
882 In the latter case, any constraint checking related with
883 the obtained qualifier should be done later in
884 operand_general_constraint_met_p. */
887 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
889 /* Unless the target qualifier can also qualify the operand
890 (which has already had a non-nil qualifier), non-equal
891 qualifiers are generally un-matched. */
892 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
901 continue; /* Equal qualifiers are certainly matched. */
904 /* Qualifiers established. */
911 /* Fill the result in *RET. */
913 qualifiers
= *qualifiers_list
;
915 DEBUG_TRACE ("complete qualifiers using list %d", i
);
918 dump_qualifier_sequence (qualifiers
);
921 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
922 ret
[j
] = *qualifiers
;
923 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
924 ret
[j
] = AARCH64_OPND_QLF_NIL
;
926 DEBUG_TRACE ("SUCCESS");
930 DEBUG_TRACE ("FAIL");
934 /* Operand qualifier matching and resolving.
936 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
937 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
939 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
943 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
946 aarch64_opnd_qualifier_seq_t qualifiers
;
948 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
951 DEBUG_TRACE ("matching FAIL");
955 if (inst
->opcode
->flags
& F_STRICT
)
957 /* Require an exact qualifier match, even for NIL qualifiers. */
958 nops
= aarch64_num_of_operands (inst
->opcode
);
959 for (i
= 0; i
< nops
; ++i
)
960 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
964 /* Update the qualifiers. */
965 if (update_p
== TRUE
)
966 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
968 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
970 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
971 "update %s with %s for operand %d",
972 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
973 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
974 inst
->operands
[i
].qualifier
= qualifiers
[i
];
977 DEBUG_TRACE ("matching SUCCESS");
981 /* Return TRUE if VALUE is a wide constant that can be moved into a general
984 IS32 indicates whether value is a 32-bit immediate or not.
985 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
986 amount will be returned in *SHIFT_AMOUNT. */
989 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
993 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
997 /* Allow all zeros or all ones in top 32-bits, so that
998 32-bit constant expressions like ~0x80000000 are
1000 uint64_t ext
= value
;
1001 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
1002 /* Immediate out of range. */
1004 value
&= (int64_t) 0xffffffff;
1007 /* first, try movz then movn */
1009 if ((value
& ((int64_t) 0xffff << 0)) == value
)
1011 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
1013 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
1015 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
1020 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1024 if (shift_amount
!= NULL
)
1025 *shift_amount
= amount
;
1027 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1032 /* Build the accepted values for immediate logical SIMD instructions.
1034 The standard encodings of the immediate value are:
1035 N imms immr SIMD size R S
1036 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1037 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1038 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1039 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1040 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1041 0 11110s 00000r 2 UInt(r) UInt(s)
1042 where all-ones value of S is reserved.
1044 Let's call E the SIMD size.
1046 The immediate value is: S+1 bits '1' rotated to the right by R.
1048 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1049 (remember S != E - 1). */
1051 #define TOTAL_IMM_NB 5334
1056 aarch64_insn encoding
;
1057 } simd_imm_encoding
;
1059 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1062 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1064 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1065 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1067 if (imm1
->imm
< imm2
->imm
)
1069 if (imm1
->imm
> imm2
->imm
)
1074 /* immediate bitfield standard encoding
1075 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1076 1 ssssss rrrrrr 64 rrrrrr ssssss
1077 0 0sssss 0rrrrr 32 rrrrr sssss
1078 0 10ssss 00rrrr 16 rrrr ssss
1079 0 110sss 000rrr 8 rrr sss
1080 0 1110ss 0000rr 4 rr ss
1081 0 11110s 00000r 2 r s */
1083 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1085 return (is64
<< 12) | (r
<< 6) | s
;
1089 build_immediate_table (void)
1091 uint32_t log_e
, e
, s
, r
, s_mask
;
1097 for (log_e
= 1; log_e
<= 6; log_e
++)
1099 /* Get element size. */
1104 mask
= 0xffffffffffffffffull
;
1110 mask
= (1ull << e
) - 1;
1112 1 ((1 << 4) - 1) << 2 = 111100
1113 2 ((1 << 3) - 1) << 3 = 111000
1114 3 ((1 << 2) - 1) << 4 = 110000
1115 4 ((1 << 1) - 1) << 5 = 100000
1116 5 ((1 << 0) - 1) << 6 = 000000 */
1117 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1119 for (s
= 0; s
< e
- 1; s
++)
1120 for (r
= 0; r
< e
; r
++)
1122 /* s+1 consecutive bits to 1 (s < 63) */
1123 imm
= (1ull << (s
+ 1)) - 1;
1124 /* rotate right by r */
1126 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1127 /* replicate the constant depending on SIMD size */
1130 case 1: imm
= (imm
<< 2) | imm
;
1131 case 2: imm
= (imm
<< 4) | imm
;
1132 case 3: imm
= (imm
<< 8) | imm
;
1133 case 4: imm
= (imm
<< 16) | imm
;
1134 case 5: imm
= (imm
<< 32) | imm
;
1138 simd_immediates
[nb_imms
].imm
= imm
;
1139 simd_immediates
[nb_imms
].encoding
=
1140 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1144 assert (nb_imms
== TOTAL_IMM_NB
);
1145 qsort(simd_immediates
, nb_imms
,
1146 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1149 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1150 be accepted by logical (immediate) instructions
1151 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1153 ESIZE is the number of bytes in the decoded immediate value.
1154 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1155 VALUE will be returned in *ENCODING. */
1158 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1160 simd_imm_encoding imm_enc
;
1161 const simd_imm_encoding
*imm_encoding
;
1162 static bfd_boolean initialized
= FALSE
;
1166 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1169 if (initialized
== FALSE
)
1171 build_immediate_table ();
1175 /* Allow all zeros or all ones in top bits, so that
1176 constant expressions like ~1 are permitted. */
1177 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1178 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1181 /* Replicate to a full 64-bit value. */
1183 for (i
= esize
* 8; i
< 64; i
*= 2)
1184 value
|= (value
<< i
);
1186 imm_enc
.imm
= value
;
1187 imm_encoding
= (const simd_imm_encoding
*)
1188 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1189 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1190 if (imm_encoding
== NULL
)
1192 DEBUG_TRACE ("exit with FALSE");
1195 if (encoding
!= NULL
)
1196 *encoding
= imm_encoding
->encoding
;
1197 DEBUG_TRACE ("exit with TRUE");
1201 /* If 64-bit immediate IMM is in the format of
1202 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1203 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1204 of value "abcdefgh". Otherwise return -1. */
1206 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1212 for (i
= 0; i
< 8; i
++)
1214 byte
= (imm
>> (8 * i
)) & 0xff;
1217 else if (byte
!= 0x00)
1223 /* Utility inline functions for operand_general_constraint_met_p. */
1226 set_error (aarch64_operand_error
*mismatch_detail
,
1227 enum aarch64_operand_error_kind kind
, int idx
,
1230 if (mismatch_detail
== NULL
)
1232 mismatch_detail
->kind
= kind
;
1233 mismatch_detail
->index
= idx
;
1234 mismatch_detail
->error
= error
;
1238 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1241 if (mismatch_detail
== NULL
)
1243 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1247 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1248 int idx
, int lower_bound
, int upper_bound
,
1251 if (mismatch_detail
== NULL
)
1253 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1254 mismatch_detail
->data
[0] = lower_bound
;
1255 mismatch_detail
->data
[1] = upper_bound
;
1259 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1260 int idx
, int lower_bound
, int upper_bound
)
1262 if (mismatch_detail
== NULL
)
1264 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1265 _("immediate value"));
1269 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1270 int idx
, int lower_bound
, int upper_bound
)
1272 if (mismatch_detail
== NULL
)
1274 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1275 _("immediate offset"));
1279 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1280 int idx
, int lower_bound
, int upper_bound
)
1282 if (mismatch_detail
== NULL
)
1284 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1285 _("register number"));
1289 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1290 int idx
, int lower_bound
, int upper_bound
)
1292 if (mismatch_detail
== NULL
)
1294 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1295 _("register element index"));
1299 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1300 int idx
, int lower_bound
, int upper_bound
)
1302 if (mismatch_detail
== NULL
)
1304 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1308 /* Report that the MUL modifier in operand IDX should be in the range
1309 [LOWER_BOUND, UPPER_BOUND]. */
1311 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1312 int idx
, int lower_bound
, int upper_bound
)
1314 if (mismatch_detail
== NULL
)
1316 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1321 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1324 if (mismatch_detail
== NULL
)
1326 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1327 mismatch_detail
->data
[0] = alignment
;
1331 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1334 if (mismatch_detail
== NULL
)
1336 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1337 mismatch_detail
->data
[0] = expected_num
;
1341 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1344 if (mismatch_detail
== NULL
)
1346 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1349 /* General constraint checking based on operand code.
1351 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1352 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1354 This function has to be called after the qualifiers for all operands
1357 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1358 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1359 of error message during the disassembling where error message is not
1360 wanted. We avoid the dynamic construction of strings of error messages
1361 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1362 use a combination of error code, static string and some integer data to
1363 represent an error. */
1366 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1367 enum aarch64_opnd type
,
1368 const aarch64_opcode
*opcode
,
1369 aarch64_operand_error
*mismatch_detail
)
1374 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1375 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1377 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1379 switch (aarch64_operands
[type
].op_class
)
1381 case AARCH64_OPND_CLASS_INT_REG
:
1382 /* Check pair reg constraints for cas* instructions. */
1383 if (type
== AARCH64_OPND_PAIRREG
)
1385 assert (idx
== 1 || idx
== 3);
1386 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1388 set_syntax_error (mismatch_detail
, idx
- 1,
1389 _("reg pair must start from even reg"));
1392 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1394 set_syntax_error (mismatch_detail
, idx
,
1395 _("reg pair must be contiguous"));
1401 /* <Xt> may be optional in some IC and TLBI instructions. */
1402 if (type
== AARCH64_OPND_Rt_SYS
)
1404 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1405 == AARCH64_OPND_CLASS_SYSTEM
));
1406 if (opnds
[1].present
1407 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1409 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1412 if (!opnds
[1].present
1413 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1415 set_other_error (mismatch_detail
, idx
, _("missing register"));
1421 case AARCH64_OPND_QLF_WSP
:
1422 case AARCH64_OPND_QLF_SP
:
1423 if (!aarch64_stack_pointer_p (opnd
))
1425 set_other_error (mismatch_detail
, idx
,
1426 _("stack pointer register expected"));
1435 case AARCH64_OPND_CLASS_SVE_REG
:
1438 case AARCH64_OPND_SVE_Zn_INDEX
:
1439 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1440 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1442 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1448 case AARCH64_OPND_SVE_ZnxN
:
1449 case AARCH64_OPND_SVE_ZtxN
:
1450 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1452 set_other_error (mismatch_detail
, idx
,
1453 _("invalid register list"));
1463 case AARCH64_OPND_CLASS_PRED_REG
:
1464 if (opnd
->reg
.regno
>= 8
1465 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1467 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1472 case AARCH64_OPND_CLASS_COND
:
1473 if (type
== AARCH64_OPND_COND1
1474 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1476 /* Not allow AL or NV. */
1477 set_syntax_error (mismatch_detail
, idx
, NULL
);
1481 case AARCH64_OPND_CLASS_ADDRESS
:
1482 /* Check writeback. */
1483 switch (opcode
->iclass
)
1487 case ldstnapair_offs
:
1490 if (opnd
->addr
.writeback
== 1)
1492 set_syntax_error (mismatch_detail
, idx
,
1493 _("unexpected address writeback"));
1498 case ldstpair_indexed
:
1501 if (opnd
->addr
.writeback
== 0)
1503 set_syntax_error (mismatch_detail
, idx
,
1504 _("address writeback expected"));
1509 assert (opnd
->addr
.writeback
== 0);
1514 case AARCH64_OPND_ADDR_SIMM7
:
1515 /* Scaled signed 7 bits immediate offset. */
1516 /* Get the size of the data element that is accessed, which may be
1517 different from that of the source register size,
1518 e.g. in strb/ldrb. */
1519 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1520 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1522 set_offset_out_of_range_error (mismatch_detail
, idx
,
1523 -64 * size
, 63 * size
);
1526 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1528 set_unaligned_error (mismatch_detail
, idx
, size
);
1532 case AARCH64_OPND_ADDR_SIMM9
:
1533 /* Unscaled signed 9 bits immediate offset. */
1534 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1536 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1541 case AARCH64_OPND_ADDR_SIMM9_2
:
1542 /* Unscaled signed 9 bits immediate offset, which has to be negative
1544 size
= aarch64_get_qualifier_esize (qualifier
);
1545 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1546 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1547 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1549 set_other_error (mismatch_detail
, idx
,
1550 _("negative or unaligned offset expected"));
1553 case AARCH64_OPND_SIMD_ADDR_POST
:
1554 /* AdvSIMD load/store multiple structures, post-index. */
1556 if (opnd
->addr
.offset
.is_reg
)
1558 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1562 set_other_error (mismatch_detail
, idx
,
1563 _("invalid register offset"));
1569 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1570 unsigned num_bytes
; /* total number of bytes transferred. */
1571 /* The opcode dependent area stores the number of elements in
1572 each structure to be loaded/stored. */
1573 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1574 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1575 /* Special handling of loading single structure to all lane. */
1576 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1577 * aarch64_get_qualifier_esize (prev
->qualifier
);
1579 num_bytes
= prev
->reglist
.num_regs
1580 * aarch64_get_qualifier_esize (prev
->qualifier
)
1581 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1582 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1584 set_other_error (mismatch_detail
, idx
,
1585 _("invalid post-increment amount"));
1591 case AARCH64_OPND_ADDR_REGOFF
:
1592 /* Get the size of the data element that is accessed, which may be
1593 different from that of the source register size,
1594 e.g. in strb/ldrb. */
1595 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1596 /* It is either no shift or shift by the binary logarithm of SIZE. */
1597 if (opnd
->shifter
.amount
!= 0
1598 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1600 set_other_error (mismatch_detail
, idx
,
1601 _("invalid shift amount"));
1604 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1606 switch (opnd
->shifter
.kind
)
1608 case AARCH64_MOD_UXTW
:
1609 case AARCH64_MOD_LSL
:
1610 case AARCH64_MOD_SXTW
:
1611 case AARCH64_MOD_SXTX
: break;
1613 set_other_error (mismatch_detail
, idx
,
1614 _("invalid extend/shift operator"));
1619 case AARCH64_OPND_ADDR_UIMM12
:
1620 imm
= opnd
->addr
.offset
.imm
;
1621 /* Get the size of the data element that is accessed, which may be
1622 different from that of the source register size,
1623 e.g. in strb/ldrb. */
1624 size
= aarch64_get_qualifier_esize (qualifier
);
1625 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1627 set_offset_out_of_range_error (mismatch_detail
, idx
,
1631 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1633 set_unaligned_error (mismatch_detail
, idx
, size
);
1638 case AARCH64_OPND_ADDR_PCREL14
:
1639 case AARCH64_OPND_ADDR_PCREL19
:
1640 case AARCH64_OPND_ADDR_PCREL21
:
1641 case AARCH64_OPND_ADDR_PCREL26
:
1642 imm
= opnd
->imm
.value
;
1643 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1645 /* The offset value in a PC-relative branch instruction is alway
1646 4-byte aligned and is encoded without the lowest 2 bits. */
1647 if (!value_aligned_p (imm
, 4))
1649 set_unaligned_error (mismatch_detail
, idx
, 4);
1652 /* Right shift by 2 so that we can carry out the following check
1656 size
= get_operand_fields_width (get_operand_from_code (type
));
1657 if (!value_fit_signed_field_p (imm
, size
))
1659 set_other_error (mismatch_detail
, idx
,
1660 _("immediate out of range"));
1670 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1671 if (type
== AARCH64_OPND_LEt
)
1673 /* Get the upper bound for the element index. */
1674 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1675 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1677 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1681 /* The opcode dependent area stores the number of elements in
1682 each structure to be loaded/stored. */
1683 num
= get_opcode_dependent_value (opcode
);
1686 case AARCH64_OPND_LVt
:
1687 assert (num
>= 1 && num
<= 4);
1688 /* Unless LD1/ST1, the number of registers should be equal to that
1689 of the structure elements. */
1690 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1692 set_reg_list_error (mismatch_detail
, idx
, num
);
1696 case AARCH64_OPND_LVt_AL
:
1697 case AARCH64_OPND_LEt
:
1698 assert (num
>= 1 && num
<= 4);
1699 /* The number of registers should be equal to that of the structure
1701 if (opnd
->reglist
.num_regs
!= num
)
1703 set_reg_list_error (mismatch_detail
, idx
, num
);
1712 case AARCH64_OPND_CLASS_IMMEDIATE
:
1713 /* Constraint check on immediate operand. */
1714 imm
= opnd
->imm
.value
;
1715 /* E.g. imm_0_31 constrains value to be 0..31. */
1716 if (qualifier_value_in_range_constraint_p (qualifier
)
1717 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1718 get_upper_bound (qualifier
)))
1720 set_imm_out_of_range_error (mismatch_detail
, idx
,
1721 get_lower_bound (qualifier
),
1722 get_upper_bound (qualifier
));
1728 case AARCH64_OPND_AIMM
:
1729 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1731 set_other_error (mismatch_detail
, idx
,
1732 _("invalid shift operator"));
1735 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1737 set_other_error (mismatch_detail
, idx
,
1738 _("shift amount expected to be 0 or 12"));
1741 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1743 set_other_error (mismatch_detail
, idx
,
1744 _("immediate out of range"));
1749 case AARCH64_OPND_HALF
:
1750 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1751 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1753 set_other_error (mismatch_detail
, idx
,
1754 _("invalid shift operator"));
1757 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1758 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1760 set_other_error (mismatch_detail
, idx
,
1761 _("shift amount should be a multiple of 16"));
1764 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1766 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1770 if (opnd
->imm
.value
< 0)
1772 set_other_error (mismatch_detail
, idx
,
1773 _("negative immediate value not allowed"));
1776 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1778 set_other_error (mismatch_detail
, idx
,
1779 _("immediate out of range"));
1784 case AARCH64_OPND_IMM_MOV
:
1786 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1787 imm
= opnd
->imm
.value
;
1791 case OP_MOV_IMM_WIDEN
:
1793 /* Fall through... */
1794 case OP_MOV_IMM_WIDE
:
1795 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
1797 set_other_error (mismatch_detail
, idx
,
1798 _("immediate out of range"));
1802 case OP_MOV_IMM_LOG
:
1803 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
1805 set_other_error (mismatch_detail
, idx
,
1806 _("immediate out of range"));
1817 case AARCH64_OPND_NZCV
:
1818 case AARCH64_OPND_CCMP_IMM
:
1819 case AARCH64_OPND_EXCEPTION
:
1820 case AARCH64_OPND_UIMM4
:
1821 case AARCH64_OPND_UIMM7
:
1822 case AARCH64_OPND_UIMM3_OP1
:
1823 case AARCH64_OPND_UIMM3_OP2
:
1824 size
= get_operand_fields_width (get_operand_from_code (type
));
1826 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1828 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1834 case AARCH64_OPND_WIDTH
:
1835 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1836 && opnds
[0].type
== AARCH64_OPND_Rd
);
1837 size
= get_upper_bound (qualifier
);
1838 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1839 /* lsb+width <= reg.size */
1841 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1842 size
- opnds
[idx
-1].imm
.value
);
1847 case AARCH64_OPND_LIMM
:
1849 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1850 uint64_t uimm
= opnd
->imm
.value
;
1851 if (opcode
->op
== OP_BIC
)
1853 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
1855 set_other_error (mismatch_detail
, idx
,
1856 _("immediate out of range"));
1862 case AARCH64_OPND_IMM0
:
1863 case AARCH64_OPND_FPIMM0
:
1864 if (opnd
->imm
.value
!= 0)
1866 set_other_error (mismatch_detail
, idx
,
1867 _("immediate zero expected"));
1872 case AARCH64_OPND_SHLL_IMM
:
1874 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1875 if (opnd
->imm
.value
!= size
)
1877 set_other_error (mismatch_detail
, idx
,
1878 _("invalid shift amount"));
1883 case AARCH64_OPND_IMM_VLSL
:
1884 size
= aarch64_get_qualifier_esize (qualifier
);
1885 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1887 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1893 case AARCH64_OPND_IMM_VLSR
:
1894 size
= aarch64_get_qualifier_esize (qualifier
);
1895 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1897 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1902 case AARCH64_OPND_SIMD_IMM
:
1903 case AARCH64_OPND_SIMD_IMM_SFT
:
1904 /* Qualifier check. */
1907 case AARCH64_OPND_QLF_LSL
:
1908 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1910 set_other_error (mismatch_detail
, idx
,
1911 _("invalid shift operator"));
1915 case AARCH64_OPND_QLF_MSL
:
1916 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1918 set_other_error (mismatch_detail
, idx
,
1919 _("invalid shift operator"));
1923 case AARCH64_OPND_QLF_NIL
:
1924 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1926 set_other_error (mismatch_detail
, idx
,
1927 _("shift is not permitted"));
1935 /* Is the immediate valid? */
1937 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1939 /* uimm8 or simm8 */
1940 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1942 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1946 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1949 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1950 ffffffffgggggggghhhhhhhh'. */
1951 set_other_error (mismatch_detail
, idx
,
1952 _("invalid value for immediate"));
1955 /* Is the shift amount valid? */
1956 switch (opnd
->shifter
.kind
)
1958 case AARCH64_MOD_LSL
:
1959 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1960 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1962 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1966 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1968 set_unaligned_error (mismatch_detail
, idx
, 8);
1972 case AARCH64_MOD_MSL
:
1973 /* Only 8 and 16 are valid shift amount. */
1974 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1976 set_other_error (mismatch_detail
, idx
,
1977 _("shift amount expected to be 0 or 16"));
1982 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1984 set_other_error (mismatch_detail
, idx
,
1985 _("invalid shift operator"));
1992 case AARCH64_OPND_FPIMM
:
1993 case AARCH64_OPND_SIMD_FPIMM
:
1994 if (opnd
->imm
.is_fp
== 0)
1996 set_other_error (mismatch_detail
, idx
,
1997 _("floating-point immediate expected"));
2000 /* The value is expected to be an 8-bit floating-point constant with
2001 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2002 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2004 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2006 set_other_error (mismatch_detail
, idx
,
2007 _("immediate out of range"));
2010 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2012 set_other_error (mismatch_detail
, idx
,
2013 _("invalid shift operator"));
2018 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2019 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2020 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2022 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2032 case AARCH64_OPND_CLASS_CP_REG
:
2033 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2034 valid range: C0 - C15. */
2035 if (opnd
->reg
.regno
> 15)
2037 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2042 case AARCH64_OPND_CLASS_SYSTEM
:
2045 case AARCH64_OPND_PSTATEFIELD
:
2046 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2049 The immediate must be #0 or #1. */
2050 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2051 || opnd
->pstatefield
== 0x04) /* PAN. */
2052 && opnds
[1].imm
.value
> 1)
2054 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2057 /* MSR SPSel, #uimm4
2058 Uses uimm4 as a control value to select the stack pointer: if
2059 bit 0 is set it selects the current exception level's stack
2060 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2061 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2062 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2064 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2073 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2074 /* Get the upper bound for the element index. */
2075 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2076 /* Index out-of-range. */
2077 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2079 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2082 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2083 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2084 number is encoded in "size:M:Rm":
2090 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2091 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2093 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2098 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2099 assert (idx
== 1 || idx
== 2);
2102 case AARCH64_OPND_Rm_EXT
:
2103 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2104 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2106 set_other_error (mismatch_detail
, idx
,
2107 _("extend operator expected"));
2110 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2111 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2112 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2114 if (!aarch64_stack_pointer_p (opnds
+ 0)
2115 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2117 if (!opnd
->shifter
.operator_present
)
2119 set_other_error (mismatch_detail
, idx
,
2120 _("missing extend operator"));
2123 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2125 set_other_error (mismatch_detail
, idx
,
2126 _("'LSL' operator not allowed"));
2130 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2131 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2132 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2134 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2137 /* In the 64-bit form, the final register operand is written as Wm
2138 for all but the (possibly omitted) UXTX/LSL and SXTX
2140 N.B. GAS allows X register to be used with any operator as a
2141 programming convenience. */
2142 if (qualifier
== AARCH64_OPND_QLF_X
2143 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2144 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2145 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2147 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2152 case AARCH64_OPND_Rm_SFT
:
2153 /* ROR is not available to the shifted register operand in
2154 arithmetic instructions. */
2155 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2157 set_other_error (mismatch_detail
, idx
,
2158 _("shift operator expected"));
2161 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2162 && opcode
->iclass
!= log_shift
)
2164 set_other_error (mismatch_detail
, idx
,
2165 _("'ROR' operator not allowed"));
2168 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2169 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2171 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2188 /* Main entrypoint for the operand constraint checking.
2190 Return 1 if operands of *INST meet the constraint applied by the operand
2191 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2192 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2193 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2194 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2195 error kind when it is notified that an instruction does not pass the check).
2197 Un-determined operand qualifiers may get established during the process. */
2200 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2201 aarch64_operand_error
*mismatch_detail
)
2205 DEBUG_TRACE ("enter");
2207 /* Check for cases where a source register needs to be the same as the
2208 destination register. Do this before matching qualifiers since if
2209 an instruction has both invalid tying and invalid qualifiers,
2210 the error about qualifiers would suggest several alternative
2211 instructions that also have invalid tying. */
2212 i
= inst
->opcode
->tied_operand
;
2213 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2215 if (mismatch_detail
)
2217 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2218 mismatch_detail
->index
= i
;
2219 mismatch_detail
->error
= NULL
;
2224 /* Match operands' qualifier.
2225 *INST has already had qualifier establish for some, if not all, of
2226 its operands; we need to find out whether these established
2227 qualifiers match one of the qualifier sequence in
2228 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2229 with the corresponding qualifier in such a sequence.
2230 Only basic operand constraint checking is done here; the more thorough
2231 constraint checking will carried out by operand_general_constraint_met_p,
2232 which has be to called after this in order to get all of the operands'
2233 qualifiers established. */
2234 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2236 DEBUG_TRACE ("FAIL on operand qualifier matching");
2237 if (mismatch_detail
)
2239 /* Return an error type to indicate that it is the qualifier
2240 matching failure; we don't care about which operand as there
2241 are enough information in the opcode table to reproduce it. */
2242 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2243 mismatch_detail
->index
= -1;
2244 mismatch_detail
->error
= NULL
;
2249 /* Match operands' constraint. */
2250 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2252 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2253 if (type
== AARCH64_OPND_NIL
)
2255 if (inst
->operands
[i
].skip
)
2257 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2260 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2261 inst
->opcode
, mismatch_detail
) == 0)
2263 DEBUG_TRACE ("FAIL on operand %d", i
);
2268 DEBUG_TRACE ("PASS");
2273 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2274 Also updates the TYPE of each INST->OPERANDS with the corresponding
2275 value of OPCODE->OPERANDS.
2277 Note that some operand qualifiers may need to be manually cleared by
2278 the caller before it further calls the aarch64_opcode_encode; by
2279 doing this, it helps the qualifier matching facilities work
2282 const aarch64_opcode
*
2283 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2286 const aarch64_opcode
*old
= inst
->opcode
;
2288 inst
->opcode
= opcode
;
2290 /* Update the operand types. */
2291 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2293 inst
->operands
[i
].type
= opcode
->operands
[i
];
2294 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2298 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2304 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2307 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2308 if (operands
[i
] == operand
)
2310 else if (operands
[i
] == AARCH64_OPND_NIL
)
2315 /* R0...R30, followed by FOR31. */
2316 #define BANK(R, FOR31) \
2317 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2318 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2319 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2320 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2321 /* [0][0] 32-bit integer regs with sp Wn
2322 [0][1] 64-bit integer regs with sp Xn sf=1
2323 [1][0] 32-bit integer regs with #0 Wn
2324 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2325 static const char *int_reg
[2][2][32] = {
2326 #define R32(X) "w" #X
2327 #define R64(X) "x" #X
2328 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2329 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2335 /* Return the integer register name.
2336 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2338 static inline const char *
2339 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2341 const int has_zr
= sp_reg_p
? 0 : 1;
2342 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2343 return int_reg
[has_zr
][is_64
][regno
];
2346 /* Like get_int_reg_name, but IS_64 is always 1. */
2348 static inline const char *
2349 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2351 const int has_zr
= sp_reg_p
? 0 : 1;
2352 return int_reg
[has_zr
][1][regno
];
2355 /* Get the name of the integer offset register in OPND, using the shift type
2356 to decide whether it's a word or doubleword. */
2358 static inline const char *
2359 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2361 switch (opnd
->shifter
.kind
)
2363 case AARCH64_MOD_UXTW
:
2364 case AARCH64_MOD_SXTW
:
2365 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2367 case AARCH64_MOD_LSL
:
2368 case AARCH64_MOD_SXTX
:
2369 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2376 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2396 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2397 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2398 (depending on the type of the instruction). IMM8 will be expanded to a
2399 single-precision floating-point value (SIZE == 4) or a double-precision
2400 floating-point value (SIZE == 8). A half-precision floating-point value
2401 (SIZE == 2) is expanded to a single-precision floating-point value. The
2402 expanded value is returned. */
2405 expand_fp_imm (int size
, uint32_t imm8
)
2408 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2410 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2411 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2412 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2413 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2414 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2417 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2418 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2419 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2420 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2421 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2424 else if (size
== 4 || size
== 2)
2426 imm
= (imm8_7
<< 31) /* imm8<7> */
2427 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2428 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2429 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2433 /* An unsupported size. */
2440 /* Produce the string representation of the register list operand *OPND
2441 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2442 the register name that comes before the register number, such as "v". */
2444 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2447 const int num_regs
= opnd
->reglist
.num_regs
;
2448 const int first_reg
= opnd
->reglist
.first_regno
;
2449 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2450 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2451 char tb
[8]; /* Temporary buffer. */
2453 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2454 assert (num_regs
>= 1 && num_regs
<= 4);
2456 /* Prepare the index if any. */
2457 if (opnd
->reglist
.has_index
)
2458 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2462 /* The hyphenated form is preferred for disassembly if there are
2463 more than two registers in the list, and the register numbers
2464 are monotonically increasing in increments of one. */
2465 if (num_regs
> 2 && last_reg
> first_reg
)
2466 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2467 prefix
, last_reg
, qlf_name
, tb
);
2470 const int reg0
= first_reg
;
2471 const int reg1
= (first_reg
+ 1) & 0x1f;
2472 const int reg2
= (first_reg
+ 2) & 0x1f;
2473 const int reg3
= (first_reg
+ 3) & 0x1f;
2478 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2481 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2482 prefix
, reg1
, qlf_name
, tb
);
2485 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2486 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2487 prefix
, reg2
, qlf_name
, tb
);
2490 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2491 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2492 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2498 /* Print the register+immediate address in OPND to BUF, which has SIZE
2499 characters. BASE is the name of the base register. */
2502 print_immediate_offset_address (char *buf
, size_t size
,
2503 const aarch64_opnd_info
*opnd
,
2506 if (opnd
->addr
.writeback
)
2508 if (opnd
->addr
.preind
)
2509 snprintf (buf
, size
, "[%s,#%d]!", base
, opnd
->addr
.offset
.imm
);
2511 snprintf (buf
, size
, "[%s],#%d", base
, opnd
->addr
.offset
.imm
);
2515 if (opnd
->addr
.offset
.imm
)
2516 snprintf (buf
, size
, "[%s,#%d]", base
, opnd
->addr
.offset
.imm
);
2518 snprintf (buf
, size
, "[%s]", base
);
2522 /* Produce the string representation of the register offset address operand
2523 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2524 the names of the base and offset registers. */
2526 print_register_offset_address (char *buf
, size_t size
,
2527 const aarch64_opnd_info
*opnd
,
2528 const char *base
, const char *offset
)
2530 char tb
[16]; /* Temporary buffer. */
2531 bfd_boolean print_extend_p
= TRUE
;
2532 bfd_boolean print_amount_p
= TRUE
;
2533 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2535 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2536 || !opnd
->shifter
.amount_present
))
2538 /* Not print the shift/extend amount when the amount is zero and
2539 when it is not the special case of 8-bit load/store instruction. */
2540 print_amount_p
= FALSE
;
2541 /* Likewise, no need to print the shift operator LSL in such a
2543 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2544 print_extend_p
= FALSE
;
2547 /* Prepare for the extend/shift. */
2551 snprintf (tb
, sizeof (tb
), ",%s #%" PRIi64
, shift_name
,
2552 opnd
->shifter
.amount
);
2554 snprintf (tb
, sizeof (tb
), ",%s", shift_name
);
2559 snprintf (buf
, size
, "[%s,%s%s]", base
, offset
, tb
);
2562 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2563 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2564 PC, PCREL_P and ADDRESS are used to pass in and return information about
2565 the PC-relative address calculation, where the PC value is passed in
2566 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2567 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2568 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2570 The function serves both the disassembler and the assembler diagnostics
2571 issuer, which is the reason why it lives in this file. */
2574 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2575 const aarch64_opcode
*opcode
,
2576 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2580 const char *name
= NULL
;
2581 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2582 enum aarch64_modifier_kind kind
;
2583 uint64_t addr
, enum_value
;
2591 case AARCH64_OPND_Rd
:
2592 case AARCH64_OPND_Rn
:
2593 case AARCH64_OPND_Rm
:
2594 case AARCH64_OPND_Rt
:
2595 case AARCH64_OPND_Rt2
:
2596 case AARCH64_OPND_Rs
:
2597 case AARCH64_OPND_Ra
:
2598 case AARCH64_OPND_Rt_SYS
:
2599 case AARCH64_OPND_PAIRREG
:
2600 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2601 the <ic_op>, therefore we we use opnd->present to override the
2602 generic optional-ness information. */
2603 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2605 /* Omit the operand, e.g. RET. */
2606 if (optional_operand_p (opcode
, idx
)
2607 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2609 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2610 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2611 snprintf (buf
, size
, "%s",
2612 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2615 case AARCH64_OPND_Rd_SP
:
2616 case AARCH64_OPND_Rn_SP
:
2617 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2618 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2619 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2620 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2621 snprintf (buf
, size
, "%s",
2622 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2625 case AARCH64_OPND_Rm_EXT
:
2626 kind
= opnd
->shifter
.kind
;
2627 assert (idx
== 1 || idx
== 2);
2628 if ((aarch64_stack_pointer_p (opnds
)
2629 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2630 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2631 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2632 && kind
== AARCH64_MOD_UXTW
)
2633 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2634 && kind
== AARCH64_MOD_UXTX
)))
2636 /* 'LSL' is the preferred form in this case. */
2637 kind
= AARCH64_MOD_LSL
;
2638 if (opnd
->shifter
.amount
== 0)
2640 /* Shifter omitted. */
2641 snprintf (buf
, size
, "%s",
2642 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2646 if (opnd
->shifter
.amount
)
2647 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
2648 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2649 aarch64_operand_modifiers
[kind
].name
,
2650 opnd
->shifter
.amount
);
2652 snprintf (buf
, size
, "%s, %s",
2653 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2654 aarch64_operand_modifiers
[kind
].name
);
2657 case AARCH64_OPND_Rm_SFT
:
2658 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2659 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2660 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2661 snprintf (buf
, size
, "%s",
2662 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2664 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
2665 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2666 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2667 opnd
->shifter
.amount
);
2670 case AARCH64_OPND_Fd
:
2671 case AARCH64_OPND_Fn
:
2672 case AARCH64_OPND_Fm
:
2673 case AARCH64_OPND_Fa
:
2674 case AARCH64_OPND_Ft
:
2675 case AARCH64_OPND_Ft2
:
2676 case AARCH64_OPND_Sd
:
2677 case AARCH64_OPND_Sn
:
2678 case AARCH64_OPND_Sm
:
2679 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2683 case AARCH64_OPND_Vd
:
2684 case AARCH64_OPND_Vn
:
2685 case AARCH64_OPND_Vm
:
2686 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2687 aarch64_get_qualifier_name (opnd
->qualifier
));
2690 case AARCH64_OPND_Ed
:
2691 case AARCH64_OPND_En
:
2692 case AARCH64_OPND_Em
:
2693 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2694 aarch64_get_qualifier_name (opnd
->qualifier
),
2695 opnd
->reglane
.index
);
2698 case AARCH64_OPND_VdD1
:
2699 case AARCH64_OPND_VnD1
:
2700 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2703 case AARCH64_OPND_LVn
:
2704 case AARCH64_OPND_LVt
:
2705 case AARCH64_OPND_LVt_AL
:
2706 case AARCH64_OPND_LEt
:
2707 print_register_list (buf
, size
, opnd
, "v");
2710 case AARCH64_OPND_SVE_Pd
:
2711 case AARCH64_OPND_SVE_Pg3
:
2712 case AARCH64_OPND_SVE_Pg4_5
:
2713 case AARCH64_OPND_SVE_Pg4_10
:
2714 case AARCH64_OPND_SVE_Pg4_16
:
2715 case AARCH64_OPND_SVE_Pm
:
2716 case AARCH64_OPND_SVE_Pn
:
2717 case AARCH64_OPND_SVE_Pt
:
2718 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2719 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
2720 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
2721 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
2722 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
2723 aarch64_get_qualifier_name (opnd
->qualifier
));
2725 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
2726 aarch64_get_qualifier_name (opnd
->qualifier
));
2729 case AARCH64_OPND_SVE_Za_5
:
2730 case AARCH64_OPND_SVE_Za_16
:
2731 case AARCH64_OPND_SVE_Zd
:
2732 case AARCH64_OPND_SVE_Zm_5
:
2733 case AARCH64_OPND_SVE_Zm_16
:
2734 case AARCH64_OPND_SVE_Zn
:
2735 case AARCH64_OPND_SVE_Zt
:
2736 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2737 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
2739 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
2740 aarch64_get_qualifier_name (opnd
->qualifier
));
2743 case AARCH64_OPND_SVE_ZnxN
:
2744 case AARCH64_OPND_SVE_ZtxN
:
2745 print_register_list (buf
, size
, opnd
, "z");
2748 case AARCH64_OPND_SVE_Zn_INDEX
:
2749 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2750 aarch64_get_qualifier_name (opnd
->qualifier
),
2751 opnd
->reglane
.index
);
2754 case AARCH64_OPND_Cn
:
2755 case AARCH64_OPND_Cm
:
2756 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2759 case AARCH64_OPND_IDX
:
2760 case AARCH64_OPND_IMM
:
2761 case AARCH64_OPND_WIDTH
:
2762 case AARCH64_OPND_UIMM3_OP1
:
2763 case AARCH64_OPND_UIMM3_OP2
:
2764 case AARCH64_OPND_BIT_NUM
:
2765 case AARCH64_OPND_IMM_VLSL
:
2766 case AARCH64_OPND_IMM_VLSR
:
2767 case AARCH64_OPND_SHLL_IMM
:
2768 case AARCH64_OPND_IMM0
:
2769 case AARCH64_OPND_IMMR
:
2770 case AARCH64_OPND_IMMS
:
2771 case AARCH64_OPND_FBITS
:
2772 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2775 case AARCH64_OPND_SVE_PATTERN
:
2776 if (optional_operand_p (opcode
, idx
)
2777 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
2779 enum_value
= opnd
->imm
.value
;
2780 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
2781 if (aarch64_sve_pattern_array
[enum_value
])
2782 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
2784 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2787 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2788 if (optional_operand_p (opcode
, idx
)
2789 && !opnd
->shifter
.operator_present
2790 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
2792 enum_value
= opnd
->imm
.value
;
2793 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
2794 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
2795 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
2797 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2798 if (opnd
->shifter
.operator_present
)
2800 size_t len
= strlen (buf
);
2801 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
2802 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2803 opnd
->shifter
.amount
);
2807 case AARCH64_OPND_SVE_PRFOP
:
2808 enum_value
= opnd
->imm
.value
;
2809 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
2810 if (aarch64_sve_prfop_array
[enum_value
])
2811 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
2813 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2816 case AARCH64_OPND_IMM_MOV
:
2817 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2819 case 4: /* e.g. MOV Wd, #<imm32>. */
2821 int imm32
= opnd
->imm
.value
;
2822 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2825 case 8: /* e.g. MOV Xd, #<imm64>. */
2826 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2827 opnd
->imm
.value
, opnd
->imm
.value
);
2829 default: assert (0);
2833 case AARCH64_OPND_FPIMM0
:
2834 snprintf (buf
, size
, "#0.0");
2837 case AARCH64_OPND_LIMM
:
2838 case AARCH64_OPND_AIMM
:
2839 case AARCH64_OPND_HALF
:
2840 if (opnd
->shifter
.amount
)
2841 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
2842 opnd
->shifter
.amount
);
2844 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2847 case AARCH64_OPND_SIMD_IMM
:
2848 case AARCH64_OPND_SIMD_IMM_SFT
:
2849 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2850 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2851 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2853 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
2854 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2855 opnd
->shifter
.amount
);
2858 case AARCH64_OPND_FPIMM
:
2859 case AARCH64_OPND_SIMD_FPIMM
:
2860 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2862 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2865 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2866 snprintf (buf
, size
, "#%.18e", c
.f
);
2869 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2872 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2873 snprintf (buf
, size
, "#%.18e", c
.f
);
2876 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2879 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2880 snprintf (buf
, size
, "#%.18e", c
.d
);
2883 default: assert (0);
2887 case AARCH64_OPND_CCMP_IMM
:
2888 case AARCH64_OPND_NZCV
:
2889 case AARCH64_OPND_EXCEPTION
:
2890 case AARCH64_OPND_UIMM4
:
2891 case AARCH64_OPND_UIMM7
:
2892 if (optional_operand_p (opcode
, idx
) == TRUE
2893 && (opnd
->imm
.value
==
2894 (int64_t) get_optional_operand_default_value (opcode
)))
2895 /* Omit the operand, e.g. DCPS1. */
2897 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2900 case AARCH64_OPND_COND
:
2901 case AARCH64_OPND_COND1
:
2902 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2905 case AARCH64_OPND_ADDR_ADRP
:
2906 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2912 /* This is not necessary during the disassembling, as print_address_func
2913 in the disassemble_info will take care of the printing. But some
2914 other callers may be still interested in getting the string in *STR,
2915 so here we do snprintf regardless. */
2916 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2919 case AARCH64_OPND_ADDR_PCREL14
:
2920 case AARCH64_OPND_ADDR_PCREL19
:
2921 case AARCH64_OPND_ADDR_PCREL21
:
2922 case AARCH64_OPND_ADDR_PCREL26
:
2923 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2928 /* This is not necessary during the disassembling, as print_address_func
2929 in the disassemble_info will take care of the printing. But some
2930 other callers may be still interested in getting the string in *STR,
2931 so here we do snprintf regardless. */
2932 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2935 case AARCH64_OPND_ADDR_SIMPLE
:
2936 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2937 case AARCH64_OPND_SIMD_ADDR_POST
:
2938 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2939 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2941 if (opnd
->addr
.offset
.is_reg
)
2942 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2944 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2947 snprintf (buf
, size
, "[%s]", name
);
2950 case AARCH64_OPND_ADDR_REGOFF
:
2951 print_register_offset_address
2952 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2953 get_offset_int_reg_name (opnd
));
2956 case AARCH64_OPND_ADDR_SIMM7
:
2957 case AARCH64_OPND_ADDR_SIMM9
:
2958 case AARCH64_OPND_ADDR_SIMM9_2
:
2959 print_immediate_offset_address
2960 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
2963 case AARCH64_OPND_ADDR_UIMM12
:
2964 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2965 if (opnd
->addr
.offset
.imm
)
2966 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2968 snprintf (buf
, size
, "[%s]", name
);
2971 case AARCH64_OPND_SYSREG
:
2972 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2973 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2974 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2976 if (aarch64_sys_regs
[i
].name
)
2977 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2980 /* Implementation defined system register. */
2981 unsigned int value
= opnd
->sysreg
;
2982 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2983 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2988 case AARCH64_OPND_PSTATEFIELD
:
2989 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2990 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2992 assert (aarch64_pstatefields
[i
].name
);
2993 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2996 case AARCH64_OPND_SYSREG_AT
:
2997 case AARCH64_OPND_SYSREG_DC
:
2998 case AARCH64_OPND_SYSREG_IC
:
2999 case AARCH64_OPND_SYSREG_TLBI
:
3000 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3003 case AARCH64_OPND_BARRIER
:
3004 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3007 case AARCH64_OPND_BARRIER_ISB
:
3008 /* Operand can be omitted, e.g. in DCPS1. */
3009 if (! optional_operand_p (opcode
, idx
)
3010 || (opnd
->barrier
->value
3011 != get_optional_operand_default_value (opcode
)))
3012 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3015 case AARCH64_OPND_PRFOP
:
3016 if (opnd
->prfop
->name
!= NULL
)
3017 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3019 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3022 case AARCH64_OPND_BARRIER_PSB
:
3023 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3031 #define CPENC(op0,op1,crn,crm,op2) \
3032 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3033 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3034 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3035 /* for 3.9.10 System Instructions */
3036 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3058 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3063 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3068 #define F_HASXT 0x4 /* System instruction register <Xt>
3072 /* TODO there are two more issues need to be resolved
3073 1. handle read-only and write-only system registers
3074 2. handle cpu-implementation-defined system registers. */
3075 const aarch64_sys_reg aarch64_sys_regs
[] =
3077 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
3078 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
3079 { "elr_el1", CPEN_(0,C0
,1), 0 },
3080 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
3081 { "sp_el0", CPEN_(0,C1
,0), 0 },
3082 { "spsel", CPEN_(0,C2
,0), 0 },
3083 { "daif", CPEN_(3,C2
,1), 0 },
3084 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
3085 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
3086 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
3087 { "nzcv", CPEN_(3,C2
,0), 0 },
3088 { "fpcr", CPEN_(3,C4
,0), 0 },
3089 { "fpsr", CPEN_(3,C4
,1), 0 },
3090 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
3091 { "dlr_el0", CPEN_(3,C5
,1), 0 },
3092 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
3093 { "elr_el2", CPEN_(4,C0
,1), 0 },
3094 { "sp_el1", CPEN_(4,C1
,0), 0 },
3095 { "spsr_irq", CPEN_(4,C3
,0), 0 },
3096 { "spsr_abt", CPEN_(4,C3
,1), 0 },
3097 { "spsr_und", CPEN_(4,C3
,2), 0 },
3098 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
3099 { "spsr_el3", CPEN_(6,C0
,0), 0 },
3100 { "elr_el3", CPEN_(6,C0
,1), 0 },
3101 { "sp_el2", CPEN_(6,C1
,0), 0 },
3102 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
3103 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
3104 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
3105 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
3106 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
3107 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
3108 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
3109 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
3110 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
3111 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
3112 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
3113 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
3114 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
3115 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
3116 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
3117 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
3118 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
3119 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
3120 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
3121 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
3122 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
3123 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
3124 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
3125 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
3126 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
3127 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
3128 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
3129 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
3130 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
3131 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3132 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3133 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3134 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3135 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3136 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3137 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3138 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3139 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3140 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3141 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3142 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3143 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3144 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3145 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3146 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3147 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3148 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3149 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3150 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3151 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3152 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3153 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3154 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3155 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3156 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3157 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3158 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3159 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3160 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3161 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3162 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3163 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3164 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3165 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3166 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3167 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3168 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3169 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3170 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3171 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3172 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3173 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3174 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3175 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3176 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3177 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3178 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3179 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3180 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3181 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3182 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3183 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3184 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3185 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3186 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3187 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3188 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3189 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3190 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3191 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3192 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3193 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3194 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3195 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3196 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3197 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3198 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3199 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3200 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3201 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3202 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3203 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3204 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3205 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3206 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3207 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3208 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3209 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3210 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3211 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3212 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3213 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3214 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3215 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3216 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3217 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3218 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3219 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3220 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3221 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3222 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3223 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3224 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3225 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3226 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3227 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3228 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3229 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3230 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3231 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3232 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3233 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3234 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3235 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3236 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3237 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3238 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3239 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3240 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3241 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3242 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3243 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3244 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3245 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3246 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3247 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3248 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3249 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3250 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3251 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3252 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3253 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3254 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3255 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3256 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3257 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3258 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3259 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3260 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3261 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3262 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3263 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3264 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3265 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3266 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3267 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3268 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3269 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3270 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3271 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3272 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3273 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3274 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3275 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3276 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3277 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3278 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3279 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3280 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3281 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3282 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3283 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3284 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3285 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3286 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3287 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3288 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3289 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3290 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3291 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3292 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3293 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3294 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3295 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3296 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3297 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3298 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3299 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3300 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3301 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3302 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3303 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3304 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3305 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3306 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3307 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3308 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3309 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3310 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3311 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3312 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3313 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3314 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3315 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3316 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3317 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3318 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3319 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3320 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3321 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3322 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3323 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3324 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3325 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3326 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3327 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3328 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3329 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3330 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3331 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3332 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3333 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3334 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3335 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3336 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3337 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3338 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3339 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3340 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3341 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3342 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3343 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3344 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3345 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3346 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3347 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3348 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3349 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3350 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3351 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3352 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3353 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3354 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3355 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3356 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3357 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3358 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3359 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3360 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3361 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3362 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3363 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3364 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3365 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3366 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3367 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3368 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3369 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3370 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3371 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3372 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3373 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3374 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3375 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3376 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3377 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3378 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3379 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3380 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3381 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3382 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3383 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3384 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3385 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3386 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3387 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3388 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3389 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3390 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3391 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3392 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3393 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3394 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3395 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3396 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3397 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3398 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3399 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3400 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3401 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3402 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3403 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3404 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3405 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3406 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3407 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3408 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3409 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3410 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3411 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3412 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3413 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3414 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3415 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3416 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3417 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3418 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3419 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3420 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3421 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3422 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3423 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3424 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3425 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3426 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3427 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3428 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3429 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3430 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3431 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3432 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3433 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3434 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3435 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3436 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3437 { 0, CPENC(0,0,0,0,0), 0 },
3441 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3443 return (reg
->flags
& F_DEPRECATED
) != 0;
3447 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3448 const aarch64_sys_reg
*reg
)
3450 if (!(reg
->flags
& F_ARCHEXT
))
3453 /* PAN. Values are from aarch64_sys_regs. */
3454 if (reg
->value
== CPEN_(0,C2
,3)
3455 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3458 /* Virtualization host extensions: system registers. */
3459 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3460 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3461 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3462 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3463 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3464 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3467 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3468 if ((reg
->value
== CPEN_ (5, C0
, 0)
3469 || reg
->value
== CPEN_ (5, C0
, 1)
3470 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3471 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3472 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3473 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3474 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3475 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3476 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3477 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3478 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3479 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3480 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3481 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3482 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3483 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3484 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3487 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3488 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3489 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3490 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3491 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3492 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3493 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3494 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3497 /* ARMv8.2 features. */
3499 /* ID_AA64MMFR2_EL1. */
3500 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3501 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3505 if (reg
->value
== CPEN_ (0, C2
, 4)
3506 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3509 /* RAS extension. */
3511 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3512 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3513 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3514 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
3515 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3516 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3517 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
3518 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
3519 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
3520 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
3521 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3522 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3523 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3526 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3527 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3528 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3529 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3530 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3533 /* Statistical Profiling extension. */
3534 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3535 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3536 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3537 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3538 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3539 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3540 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3541 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3542 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3543 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3544 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3545 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3546 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3547 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3553 const aarch64_sys_reg aarch64_pstatefields
[] =
3555 { "spsel", 0x05, 0 },
3556 { "daifset", 0x1e, 0 },
3557 { "daifclr", 0x1f, 0 },
3558 { "pan", 0x04, F_ARCHEXT
},
3559 { "uao", 0x03, F_ARCHEXT
},
3560 { 0, CPENC(0,0,0,0,0), 0 },
3564 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3565 const aarch64_sys_reg
*reg
)
3567 if (!(reg
->flags
& F_ARCHEXT
))
3570 /* PAN. Values are from aarch64_pstatefields. */
3571 if (reg
->value
== 0x04
3572 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3575 /* UAO. Values are from aarch64_pstatefields. */
3576 if (reg
->value
== 0x03
3577 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3583 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3585 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3586 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3587 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3588 { 0, CPENS(0,0,0,0), 0 }
3591 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3593 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3594 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3595 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3596 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3597 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3598 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3599 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3600 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3601 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3602 { 0, CPENS(0,0,0,0), 0 }
3605 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3607 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3608 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3609 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3610 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3611 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3612 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3613 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3614 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3615 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3616 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3617 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3618 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3619 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3620 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3621 { 0, CPENS(0,0,0,0), 0 }
3624 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3626 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3627 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3628 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3629 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3630 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3631 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3632 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3633 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3634 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3635 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3636 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3637 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3638 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3639 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3640 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3641 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3642 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3643 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3644 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3645 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3646 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3647 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3648 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3649 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3650 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3651 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3652 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3653 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3654 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3655 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3656 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3657 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3658 { 0, CPENS(0,0,0,0), 0 }
3662 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3664 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3668 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3669 const aarch64_sys_ins_reg
*reg
)
3671 if (!(reg
->flags
& F_ARCHEXT
))
3674 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3675 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3676 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3679 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3680 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3681 || reg
->value
== CPENS (0, C7
, C9
, 1))
3682 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3705 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3706 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3709 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
3710 const aarch64_insn insn
)
3712 int t
= BITS (insn
, 4, 0);
3713 int n
= BITS (insn
, 9, 5);
3714 int t2
= BITS (insn
, 14, 10);
3718 /* Write back enabled. */
3719 if ((t
== n
|| t2
== n
) && n
!= 31)
3733 /* Include the opcode description table as well as the operand description
3735 #define VERIFIER(x) verify_##x
3736 #include "aarch64-tbl.h"