1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
203 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
204 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
205 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
206 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
207 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
208 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
209 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
210 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
211 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
212 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
213 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
214 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
215 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
216 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
217 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
220 enum aarch64_operand_class
221 aarch64_get_operand_class (enum aarch64_opnd type
)
223 return aarch64_operands
[type
].op_class
;
227 aarch64_get_operand_name (enum aarch64_opnd type
)
229 return aarch64_operands
[type
].name
;
232 /* Get operand description string.
233 This is usually for the diagnosis purpose. */
235 aarch64_get_operand_desc (enum aarch64_opnd type
)
237 return aarch64_operands
[type
].desc
;
240 /* Table of all conditional affixes. */
241 const aarch64_cond aarch64_conds
[16] =
246 {{"cc", "lo", "ul"}, 0x3},
262 get_cond_from_value (aarch64_insn value
)
265 return &aarch64_conds
[(unsigned int) value
];
269 get_inverted_cond (const aarch64_cond
*cond
)
271 return &aarch64_conds
[cond
->value
^ 0x1];
274 /* Table describing the operand extension/shifting operators; indexed by
275 enum aarch64_modifier_kind.
277 The value column provides the most common values for encoding modifiers,
278 which enables table-driven encoding/decoding for the modifiers. */
279 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
298 enum aarch64_modifier_kind
299 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
301 return desc
- aarch64_operand_modifiers
;
305 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
307 return aarch64_operand_modifiers
[kind
].value
;
310 enum aarch64_modifier_kind
311 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
312 bfd_boolean extend_p
)
314 if (extend_p
== TRUE
)
315 return AARCH64_MOD_UXTB
+ value
;
317 return AARCH64_MOD_LSL
- value
;
321 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
323 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
327 static inline bfd_boolean
328 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
330 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
334 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
354 /* Table describing the operands supported by the aliases of the HINT
357 The name column is the operand that is accepted for the alias. The value
358 column is the hint number of the alias. The list of operands is terminated
359 by NULL in the name column. */
361 const struct aarch64_name_value_pair aarch64_hint_options
[] =
363 { "csync", 0x11 }, /* PSB CSYNC. */
367 /* op -> op: load = 0 instruction = 1 store = 2
369 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
370 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
371 const struct aarch64_name_value_pair aarch64_prfops
[32] =
373 { "pldl1keep", B(0, 1, 0) },
374 { "pldl1strm", B(0, 1, 1) },
375 { "pldl2keep", B(0, 2, 0) },
376 { "pldl2strm", B(0, 2, 1) },
377 { "pldl3keep", B(0, 3, 0) },
378 { "pldl3strm", B(0, 3, 1) },
381 { "plil1keep", B(1, 1, 0) },
382 { "plil1strm", B(1, 1, 1) },
383 { "plil2keep", B(1, 2, 0) },
384 { "plil2strm", B(1, 2, 1) },
385 { "plil3keep", B(1, 3, 0) },
386 { "plil3strm", B(1, 3, 1) },
389 { "pstl1keep", B(2, 1, 0) },
390 { "pstl1strm", B(2, 1, 1) },
391 { "pstl2keep", B(2, 2, 0) },
392 { "pstl2strm", B(2, 2, 1) },
393 { "pstl3keep", B(2, 3, 0) },
394 { "pstl3strm", B(2, 3, 1) },
408 /* Utilities on value constraint. */
411 value_in_range_p (int64_t value
, int low
, int high
)
413 return (value
>= low
&& value
<= high
) ? 1 : 0;
417 value_aligned_p (int64_t value
, int align
)
419 return ((value
& (align
- 1)) == 0) ? 1 : 0;
422 /* A signed value fits in a field. */
424 value_fit_signed_field_p (int64_t value
, unsigned width
)
427 if (width
< sizeof (value
) * 8)
429 int64_t lim
= (int64_t)1 << (width
- 1);
430 if (value
>= -lim
&& value
< lim
)
436 /* An unsigned value fits in a field. */
438 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
441 if (width
< sizeof (value
) * 8)
443 int64_t lim
= (int64_t)1 << width
;
444 if (value
>= 0 && value
< lim
)
450 /* Return 1 if OPERAND is SP or WSP. */
452 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
454 return ((aarch64_get_operand_class (operand
->type
)
455 == AARCH64_OPND_CLASS_INT_REG
)
456 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
457 && operand
->reg
.regno
== 31);
460 /* Return 1 if OPERAND is XZR or WZP. */
462 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
464 return ((aarch64_get_operand_class (operand
->type
)
465 == AARCH64_OPND_CLASS_INT_REG
)
466 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
467 && operand
->reg
.regno
== 31);
470 /* Return true if the operand *OPERAND that has the operand code
471 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
472 qualified by the qualifier TARGET. */
475 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
476 aarch64_opnd_qualifier_t target
)
478 switch (operand
->qualifier
)
480 case AARCH64_OPND_QLF_W
:
481 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
484 case AARCH64_OPND_QLF_X
:
485 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
488 case AARCH64_OPND_QLF_WSP
:
489 if (target
== AARCH64_OPND_QLF_W
490 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
493 case AARCH64_OPND_QLF_SP
:
494 if (target
== AARCH64_OPND_QLF_X
495 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
505 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
506 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
508 Return NIL if more than one expected qualifiers are found. */
510 aarch64_opnd_qualifier_t
511 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
513 const aarch64_opnd_qualifier_t known_qlf
,
520 When the known qualifier is NIL, we have to assume that there is only
521 one qualifier sequence in the *QSEQ_LIST and return the corresponding
522 qualifier directly. One scenario is that for instruction
523 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
524 which has only one possible valid qualifier sequence
526 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
527 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
529 Because the qualifier NIL has dual roles in the qualifier sequence:
530 it can mean no qualifier for the operand, or the qualifer sequence is
531 not in use (when all qualifiers in the sequence are NILs), we have to
532 handle this special case here. */
533 if (known_qlf
== AARCH64_OPND_NIL
)
535 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
536 return qseq_list
[0][idx
];
539 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
541 if (qseq_list
[i
][known_idx
] == known_qlf
)
544 /* More than one sequences are found to have KNOWN_QLF at
546 return AARCH64_OPND_NIL
;
551 return qseq_list
[saved_i
][idx
];
554 enum operand_qualifier_kind
562 /* Operand qualifier description. */
563 struct operand_qualifier_data
565 /* The usage of the three data fields depends on the qualifier kind. */
572 enum operand_qualifier_kind kind
;
575 /* Indexed by the operand qualifier enumerators. */
576 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
578 {0, 0, 0, "NIL", OQK_NIL
},
580 /* Operand variant qualifiers.
582 element size, number of elements and common value for encoding. */
584 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
585 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
586 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
587 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
589 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
590 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
591 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
592 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
593 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
595 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
596 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
597 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
598 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
599 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
600 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
601 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
602 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
603 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
604 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
606 {0, 0, 0, "z", OQK_OPD_VARIANT
},
607 {0, 0, 0, "m", OQK_OPD_VARIANT
},
609 /* Qualifiers constraining the value range.
611 Lower bound, higher bound, unused. */
613 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
614 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
615 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
616 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
617 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
618 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
620 /* Qualifiers for miscellaneous purpose.
622 unused, unused and unused. */
627 {0, 0, 0, "retrieving", 0},
630 static inline bfd_boolean
631 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
633 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
637 static inline bfd_boolean
638 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
640 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
645 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
647 return aarch64_opnd_qualifiers
[qualifier
].desc
;
650 /* Given an operand qualifier, return the expected data element size
651 of a qualified operand. */
653 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
655 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
656 return aarch64_opnd_qualifiers
[qualifier
].data0
;
660 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
662 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
663 return aarch64_opnd_qualifiers
[qualifier
].data1
;
667 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
669 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
670 return aarch64_opnd_qualifiers
[qualifier
].data2
;
674 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
676 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
677 return aarch64_opnd_qualifiers
[qualifier
].data0
;
681 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
683 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
684 return aarch64_opnd_qualifiers
[qualifier
].data1
;
689 aarch64_verbose (const char *str
, ...)
700 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
704 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
705 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
710 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
711 const aarch64_opnd_qualifier_t
*qualifier
)
714 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
716 aarch64_verbose ("dump_match_qualifiers:");
717 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
718 curr
[i
] = opnd
[i
].qualifier
;
719 dump_qualifier_sequence (curr
);
720 aarch64_verbose ("against");
721 dump_qualifier_sequence (qualifier
);
723 #endif /* DEBUG_AARCH64 */
725 /* TODO improve this, we can have an extra field at the runtime to
726 store the number of operands rather than calculating it every time. */
729 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
732 const enum aarch64_opnd
*opnds
= opcode
->operands
;
733 while (opnds
[i
++] != AARCH64_OPND_NIL
)
736 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
740 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
741 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
743 N.B. on the entry, it is very likely that only some operands in *INST
744 have had their qualifiers been established.
746 If STOP_AT is not -1, the function will only try to match
747 the qualifier sequence for operands before and including the operand
748 of index STOP_AT; and on success *RET will only be filled with the first
749 (STOP_AT+1) qualifiers.
751 A couple examples of the matching algorithm:
759 Apart from serving the main encoding routine, this can also be called
760 during or after the operand decoding. */
763 aarch64_find_best_match (const aarch64_inst
*inst
,
764 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
765 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
769 const aarch64_opnd_qualifier_t
*qualifiers
;
771 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
774 DEBUG_TRACE ("SUCCEED: no operand");
778 if (stop_at
< 0 || stop_at
>= num_opnds
)
779 stop_at
= num_opnds
- 1;
781 /* For each pattern. */
782 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
785 qualifiers
= *qualifiers_list
;
787 /* Start as positive. */
790 DEBUG_TRACE ("%d", i
);
793 dump_match_qualifiers (inst
->operands
, qualifiers
);
796 /* Most opcodes has much fewer patterns in the list.
797 First NIL qualifier indicates the end in the list. */
798 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
800 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
806 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
808 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
810 /* Either the operand does not have qualifier, or the qualifier
811 for the operand needs to be deduced from the qualifier
813 In the latter case, any constraint checking related with
814 the obtained qualifier should be done later in
815 operand_general_constraint_met_p. */
818 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
820 /* Unless the target qualifier can also qualify the operand
821 (which has already had a non-nil qualifier), non-equal
822 qualifiers are generally un-matched. */
823 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
832 continue; /* Equal qualifiers are certainly matched. */
835 /* Qualifiers established. */
842 /* Fill the result in *RET. */
844 qualifiers
= *qualifiers_list
;
846 DEBUG_TRACE ("complete qualifiers using list %d", i
);
849 dump_qualifier_sequence (qualifiers
);
852 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
853 ret
[j
] = *qualifiers
;
854 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
855 ret
[j
] = AARCH64_OPND_QLF_NIL
;
857 DEBUG_TRACE ("SUCCESS");
861 DEBUG_TRACE ("FAIL");
865 /* Operand qualifier matching and resolving.
867 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
868 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
870 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
874 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
877 aarch64_opnd_qualifier_seq_t qualifiers
;
879 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
882 DEBUG_TRACE ("matching FAIL");
886 if (inst
->opcode
->flags
& F_STRICT
)
888 /* Require an exact qualifier match, even for NIL qualifiers. */
889 nops
= aarch64_num_of_operands (inst
->opcode
);
890 for (i
= 0; i
< nops
; ++i
)
891 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
895 /* Update the qualifiers. */
896 if (update_p
== TRUE
)
897 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
899 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
901 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
902 "update %s with %s for operand %d",
903 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
904 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
905 inst
->operands
[i
].qualifier
= qualifiers
[i
];
908 DEBUG_TRACE ("matching SUCCESS");
912 /* Return TRUE if VALUE is a wide constant that can be moved into a general
915 IS32 indicates whether value is a 32-bit immediate or not.
916 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
917 amount will be returned in *SHIFT_AMOUNT. */
920 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
924 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
928 /* Allow all zeros or all ones in top 32-bits, so that
929 32-bit constant expressions like ~0x80000000 are
931 uint64_t ext
= value
;
932 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
933 /* Immediate out of range. */
935 value
&= (int64_t) 0xffffffff;
938 /* first, try movz then movn */
940 if ((value
& ((int64_t) 0xffff << 0)) == value
)
942 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
944 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
946 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
951 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
955 if (shift_amount
!= NULL
)
956 *shift_amount
= amount
;
958 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
963 /* Build the accepted values for immediate logical SIMD instructions.
965 The standard encodings of the immediate value are:
966 N imms immr SIMD size R S
967 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
968 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
969 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
970 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
971 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
972 0 11110s 00000r 2 UInt(r) UInt(s)
973 where all-ones value of S is reserved.
975 Let's call E the SIMD size.
977 The immediate value is: S+1 bits '1' rotated to the right by R.
979 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
980 (remember S != E - 1). */
982 #define TOTAL_IMM_NB 5334
987 aarch64_insn encoding
;
990 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
993 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
995 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
996 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
998 if (imm1
->imm
< imm2
->imm
)
1000 if (imm1
->imm
> imm2
->imm
)
1005 /* immediate bitfield standard encoding
1006 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1007 1 ssssss rrrrrr 64 rrrrrr ssssss
1008 0 0sssss 0rrrrr 32 rrrrr sssss
1009 0 10ssss 00rrrr 16 rrrr ssss
1010 0 110sss 000rrr 8 rrr sss
1011 0 1110ss 0000rr 4 rr ss
1012 0 11110s 00000r 2 r s */
1014 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1016 return (is64
<< 12) | (r
<< 6) | s
;
1020 build_immediate_table (void)
1022 uint32_t log_e
, e
, s
, r
, s_mask
;
1028 for (log_e
= 1; log_e
<= 6; log_e
++)
1030 /* Get element size. */
1035 mask
= 0xffffffffffffffffull
;
1041 mask
= (1ull << e
) - 1;
1043 1 ((1 << 4) - 1) << 2 = 111100
1044 2 ((1 << 3) - 1) << 3 = 111000
1045 3 ((1 << 2) - 1) << 4 = 110000
1046 4 ((1 << 1) - 1) << 5 = 100000
1047 5 ((1 << 0) - 1) << 6 = 000000 */
1048 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1050 for (s
= 0; s
< e
- 1; s
++)
1051 for (r
= 0; r
< e
; r
++)
1053 /* s+1 consecutive bits to 1 (s < 63) */
1054 imm
= (1ull << (s
+ 1)) - 1;
1055 /* rotate right by r */
1057 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1058 /* replicate the constant depending on SIMD size */
1061 case 1: imm
= (imm
<< 2) | imm
;
1062 case 2: imm
= (imm
<< 4) | imm
;
1063 case 3: imm
= (imm
<< 8) | imm
;
1064 case 4: imm
= (imm
<< 16) | imm
;
1065 case 5: imm
= (imm
<< 32) | imm
;
1069 simd_immediates
[nb_imms
].imm
= imm
;
1070 simd_immediates
[nb_imms
].encoding
=
1071 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1075 assert (nb_imms
== TOTAL_IMM_NB
);
1076 qsort(simd_immediates
, nb_imms
,
1077 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1080 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1081 be accepted by logical (immediate) instructions
1082 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1084 ESIZE is the number of bytes in the decoded immediate value.
1085 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1086 VALUE will be returned in *ENCODING. */
1089 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1091 simd_imm_encoding imm_enc
;
1092 const simd_imm_encoding
*imm_encoding
;
1093 static bfd_boolean initialized
= FALSE
;
1097 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1100 if (initialized
== FALSE
)
1102 build_immediate_table ();
1106 /* Allow all zeros or all ones in top bits, so that
1107 constant expressions like ~1 are permitted. */
1108 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1109 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1112 /* Replicate to a full 64-bit value. */
1114 for (i
= esize
* 8; i
< 64; i
*= 2)
1115 value
|= (value
<< i
);
1117 imm_enc
.imm
= value
;
1118 imm_encoding
= (const simd_imm_encoding
*)
1119 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1120 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1121 if (imm_encoding
== NULL
)
1123 DEBUG_TRACE ("exit with FALSE");
1126 if (encoding
!= NULL
)
1127 *encoding
= imm_encoding
->encoding
;
1128 DEBUG_TRACE ("exit with TRUE");
1132 /* If 64-bit immediate IMM is in the format of
1133 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1134 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1135 of value "abcdefgh". Otherwise return -1. */
1137 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1143 for (i
= 0; i
< 8; i
++)
1145 byte
= (imm
>> (8 * i
)) & 0xff;
1148 else if (byte
!= 0x00)
1154 /* Utility inline functions for operand_general_constraint_met_p. */
1157 set_error (aarch64_operand_error
*mismatch_detail
,
1158 enum aarch64_operand_error_kind kind
, int idx
,
1161 if (mismatch_detail
== NULL
)
1163 mismatch_detail
->kind
= kind
;
1164 mismatch_detail
->index
= idx
;
1165 mismatch_detail
->error
= error
;
1169 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1172 if (mismatch_detail
== NULL
)
1174 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1178 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1179 int idx
, int lower_bound
, int upper_bound
,
1182 if (mismatch_detail
== NULL
)
1184 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1185 mismatch_detail
->data
[0] = lower_bound
;
1186 mismatch_detail
->data
[1] = upper_bound
;
1190 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1191 int idx
, int lower_bound
, int upper_bound
)
1193 if (mismatch_detail
== NULL
)
1195 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1196 _("immediate value"));
1200 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1201 int idx
, int lower_bound
, int upper_bound
)
1203 if (mismatch_detail
== NULL
)
1205 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1206 _("immediate offset"));
1210 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1211 int idx
, int lower_bound
, int upper_bound
)
1213 if (mismatch_detail
== NULL
)
1215 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1216 _("register number"));
1220 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1221 int idx
, int lower_bound
, int upper_bound
)
1223 if (mismatch_detail
== NULL
)
1225 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1226 _("register element index"));
1230 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1231 int idx
, int lower_bound
, int upper_bound
)
1233 if (mismatch_detail
== NULL
)
1235 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1240 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1243 if (mismatch_detail
== NULL
)
1245 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1246 mismatch_detail
->data
[0] = alignment
;
1250 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1253 if (mismatch_detail
== NULL
)
1255 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1256 mismatch_detail
->data
[0] = expected_num
;
1260 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1263 if (mismatch_detail
== NULL
)
1265 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1268 /* General constraint checking based on operand code.
1270 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1271 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1273 This function has to be called after the qualifiers for all operands
1276 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1277 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1278 of error message during the disassembling where error message is not
1279 wanted. We avoid the dynamic construction of strings of error messages
1280 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1281 use a combination of error code, static string and some integer data to
1282 represent an error. */
1285 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1286 enum aarch64_opnd type
,
1287 const aarch64_opcode
*opcode
,
1288 aarch64_operand_error
*mismatch_detail
)
1293 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1294 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1296 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1298 switch (aarch64_operands
[type
].op_class
)
1300 case AARCH64_OPND_CLASS_INT_REG
:
1301 /* Check pair reg constraints for cas* instructions. */
1302 if (type
== AARCH64_OPND_PAIRREG
)
1304 assert (idx
== 1 || idx
== 3);
1305 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1307 set_syntax_error (mismatch_detail
, idx
- 1,
1308 _("reg pair must start from even reg"));
1311 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1313 set_syntax_error (mismatch_detail
, idx
,
1314 _("reg pair must be contiguous"));
1320 /* <Xt> may be optional in some IC and TLBI instructions. */
1321 if (type
== AARCH64_OPND_Rt_SYS
)
1323 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1324 == AARCH64_OPND_CLASS_SYSTEM
));
1325 if (opnds
[1].present
1326 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1328 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1331 if (!opnds
[1].present
1332 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1334 set_other_error (mismatch_detail
, idx
, _("missing register"));
1340 case AARCH64_OPND_QLF_WSP
:
1341 case AARCH64_OPND_QLF_SP
:
1342 if (!aarch64_stack_pointer_p (opnd
))
1344 set_other_error (mismatch_detail
, idx
,
1345 _("stack pointer register expected"));
1354 case AARCH64_OPND_CLASS_SVE_REG
:
1357 case AARCH64_OPND_SVE_Zn_INDEX
:
1358 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1359 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1361 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1367 case AARCH64_OPND_SVE_ZnxN
:
1368 case AARCH64_OPND_SVE_ZtxN
:
1369 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1371 set_other_error (mismatch_detail
, idx
,
1372 _("invalid register list"));
1382 case AARCH64_OPND_CLASS_PRED_REG
:
1383 if (opnd
->reg
.regno
>= 8
1384 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1386 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1391 case AARCH64_OPND_CLASS_COND
:
1392 if (type
== AARCH64_OPND_COND1
1393 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1395 /* Not allow AL or NV. */
1396 set_syntax_error (mismatch_detail
, idx
, NULL
);
1400 case AARCH64_OPND_CLASS_ADDRESS
:
1401 /* Check writeback. */
1402 switch (opcode
->iclass
)
1406 case ldstnapair_offs
:
1409 if (opnd
->addr
.writeback
== 1)
1411 set_syntax_error (mismatch_detail
, idx
,
1412 _("unexpected address writeback"));
1417 case ldstpair_indexed
:
1420 if (opnd
->addr
.writeback
== 0)
1422 set_syntax_error (mismatch_detail
, idx
,
1423 _("address writeback expected"));
1428 assert (opnd
->addr
.writeback
== 0);
1433 case AARCH64_OPND_ADDR_SIMM7
:
1434 /* Scaled signed 7 bits immediate offset. */
1435 /* Get the size of the data element that is accessed, which may be
1436 different from that of the source register size,
1437 e.g. in strb/ldrb. */
1438 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1439 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1441 set_offset_out_of_range_error (mismatch_detail
, idx
,
1442 -64 * size
, 63 * size
);
1445 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1447 set_unaligned_error (mismatch_detail
, idx
, size
);
1451 case AARCH64_OPND_ADDR_SIMM9
:
1452 /* Unscaled signed 9 bits immediate offset. */
1453 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1455 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1460 case AARCH64_OPND_ADDR_SIMM9_2
:
1461 /* Unscaled signed 9 bits immediate offset, which has to be negative
1463 size
= aarch64_get_qualifier_esize (qualifier
);
1464 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1465 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1466 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1468 set_other_error (mismatch_detail
, idx
,
1469 _("negative or unaligned offset expected"));
1472 case AARCH64_OPND_SIMD_ADDR_POST
:
1473 /* AdvSIMD load/store multiple structures, post-index. */
1475 if (opnd
->addr
.offset
.is_reg
)
1477 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1481 set_other_error (mismatch_detail
, idx
,
1482 _("invalid register offset"));
1488 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1489 unsigned num_bytes
; /* total number of bytes transferred. */
1490 /* The opcode dependent area stores the number of elements in
1491 each structure to be loaded/stored. */
1492 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1493 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1494 /* Special handling of loading single structure to all lane. */
1495 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1496 * aarch64_get_qualifier_esize (prev
->qualifier
);
1498 num_bytes
= prev
->reglist
.num_regs
1499 * aarch64_get_qualifier_esize (prev
->qualifier
)
1500 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1501 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1503 set_other_error (mismatch_detail
, idx
,
1504 _("invalid post-increment amount"));
1510 case AARCH64_OPND_ADDR_REGOFF
:
1511 /* Get the size of the data element that is accessed, which may be
1512 different from that of the source register size,
1513 e.g. in strb/ldrb. */
1514 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1515 /* It is either no shift or shift by the binary logarithm of SIZE. */
1516 if (opnd
->shifter
.amount
!= 0
1517 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1519 set_other_error (mismatch_detail
, idx
,
1520 _("invalid shift amount"));
1523 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1525 switch (opnd
->shifter
.kind
)
1527 case AARCH64_MOD_UXTW
:
1528 case AARCH64_MOD_LSL
:
1529 case AARCH64_MOD_SXTW
:
1530 case AARCH64_MOD_SXTX
: break;
1532 set_other_error (mismatch_detail
, idx
,
1533 _("invalid extend/shift operator"));
1538 case AARCH64_OPND_ADDR_UIMM12
:
1539 imm
= opnd
->addr
.offset
.imm
;
1540 /* Get the size of the data element that is accessed, which may be
1541 different from that of the source register size,
1542 e.g. in strb/ldrb. */
1543 size
= aarch64_get_qualifier_esize (qualifier
);
1544 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1546 set_offset_out_of_range_error (mismatch_detail
, idx
,
1550 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1552 set_unaligned_error (mismatch_detail
, idx
, size
);
1557 case AARCH64_OPND_ADDR_PCREL14
:
1558 case AARCH64_OPND_ADDR_PCREL19
:
1559 case AARCH64_OPND_ADDR_PCREL21
:
1560 case AARCH64_OPND_ADDR_PCREL26
:
1561 imm
= opnd
->imm
.value
;
1562 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1564 /* The offset value in a PC-relative branch instruction is alway
1565 4-byte aligned and is encoded without the lowest 2 bits. */
1566 if (!value_aligned_p (imm
, 4))
1568 set_unaligned_error (mismatch_detail
, idx
, 4);
1571 /* Right shift by 2 so that we can carry out the following check
1575 size
= get_operand_fields_width (get_operand_from_code (type
));
1576 if (!value_fit_signed_field_p (imm
, size
))
1578 set_other_error (mismatch_detail
, idx
,
1579 _("immediate out of range"));
1589 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1590 if (type
== AARCH64_OPND_LEt
)
1592 /* Get the upper bound for the element index. */
1593 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1594 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
1596 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1600 /* The opcode dependent area stores the number of elements in
1601 each structure to be loaded/stored. */
1602 num
= get_opcode_dependent_value (opcode
);
1605 case AARCH64_OPND_LVt
:
1606 assert (num
>= 1 && num
<= 4);
1607 /* Unless LD1/ST1, the number of registers should be equal to that
1608 of the structure elements. */
1609 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1611 set_reg_list_error (mismatch_detail
, idx
, num
);
1615 case AARCH64_OPND_LVt_AL
:
1616 case AARCH64_OPND_LEt
:
1617 assert (num
>= 1 && num
<= 4);
1618 /* The number of registers should be equal to that of the structure
1620 if (opnd
->reglist
.num_regs
!= num
)
1622 set_reg_list_error (mismatch_detail
, idx
, num
);
1631 case AARCH64_OPND_CLASS_IMMEDIATE
:
1632 /* Constraint check on immediate operand. */
1633 imm
= opnd
->imm
.value
;
1634 /* E.g. imm_0_31 constrains value to be 0..31. */
1635 if (qualifier_value_in_range_constraint_p (qualifier
)
1636 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1637 get_upper_bound (qualifier
)))
1639 set_imm_out_of_range_error (mismatch_detail
, idx
,
1640 get_lower_bound (qualifier
),
1641 get_upper_bound (qualifier
));
1647 case AARCH64_OPND_AIMM
:
1648 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1650 set_other_error (mismatch_detail
, idx
,
1651 _("invalid shift operator"));
1654 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1656 set_other_error (mismatch_detail
, idx
,
1657 _("shift amount expected to be 0 or 12"));
1660 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1662 set_other_error (mismatch_detail
, idx
,
1663 _("immediate out of range"));
1668 case AARCH64_OPND_HALF
:
1669 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1670 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1672 set_other_error (mismatch_detail
, idx
,
1673 _("invalid shift operator"));
1676 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1677 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1679 set_other_error (mismatch_detail
, idx
,
1680 _("shift amount should be a multiple of 16"));
1683 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1685 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1689 if (opnd
->imm
.value
< 0)
1691 set_other_error (mismatch_detail
, idx
,
1692 _("negative immediate value not allowed"));
1695 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1697 set_other_error (mismatch_detail
, idx
,
1698 _("immediate out of range"));
1703 case AARCH64_OPND_IMM_MOV
:
1705 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1706 imm
= opnd
->imm
.value
;
1710 case OP_MOV_IMM_WIDEN
:
1712 /* Fall through... */
1713 case OP_MOV_IMM_WIDE
:
1714 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
1716 set_other_error (mismatch_detail
, idx
,
1717 _("immediate out of range"));
1721 case OP_MOV_IMM_LOG
:
1722 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
1724 set_other_error (mismatch_detail
, idx
,
1725 _("immediate out of range"));
1736 case AARCH64_OPND_NZCV
:
1737 case AARCH64_OPND_CCMP_IMM
:
1738 case AARCH64_OPND_EXCEPTION
:
1739 case AARCH64_OPND_UIMM4
:
1740 case AARCH64_OPND_UIMM7
:
1741 case AARCH64_OPND_UIMM3_OP1
:
1742 case AARCH64_OPND_UIMM3_OP2
:
1743 size
= get_operand_fields_width (get_operand_from_code (type
));
1745 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1747 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1753 case AARCH64_OPND_WIDTH
:
1754 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1755 && opnds
[0].type
== AARCH64_OPND_Rd
);
1756 size
= get_upper_bound (qualifier
);
1757 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1758 /* lsb+width <= reg.size */
1760 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1761 size
- opnds
[idx
-1].imm
.value
);
1766 case AARCH64_OPND_LIMM
:
1768 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1769 uint64_t uimm
= opnd
->imm
.value
;
1770 if (opcode
->op
== OP_BIC
)
1772 if (aarch64_logical_immediate_p (uimm
, esize
, NULL
) == FALSE
)
1774 set_other_error (mismatch_detail
, idx
,
1775 _("immediate out of range"));
1781 case AARCH64_OPND_IMM0
:
1782 case AARCH64_OPND_FPIMM0
:
1783 if (opnd
->imm
.value
!= 0)
1785 set_other_error (mismatch_detail
, idx
,
1786 _("immediate zero expected"));
1791 case AARCH64_OPND_SHLL_IMM
:
1793 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1794 if (opnd
->imm
.value
!= size
)
1796 set_other_error (mismatch_detail
, idx
,
1797 _("invalid shift amount"));
1802 case AARCH64_OPND_IMM_VLSL
:
1803 size
= aarch64_get_qualifier_esize (qualifier
);
1804 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1806 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1812 case AARCH64_OPND_IMM_VLSR
:
1813 size
= aarch64_get_qualifier_esize (qualifier
);
1814 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1816 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1821 case AARCH64_OPND_SIMD_IMM
:
1822 case AARCH64_OPND_SIMD_IMM_SFT
:
1823 /* Qualifier check. */
1826 case AARCH64_OPND_QLF_LSL
:
1827 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1829 set_other_error (mismatch_detail
, idx
,
1830 _("invalid shift operator"));
1834 case AARCH64_OPND_QLF_MSL
:
1835 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1837 set_other_error (mismatch_detail
, idx
,
1838 _("invalid shift operator"));
1842 case AARCH64_OPND_QLF_NIL
:
1843 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1845 set_other_error (mismatch_detail
, idx
,
1846 _("shift is not permitted"));
1854 /* Is the immediate valid? */
1856 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1858 /* uimm8 or simm8 */
1859 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1861 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1865 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1868 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1869 ffffffffgggggggghhhhhhhh'. */
1870 set_other_error (mismatch_detail
, idx
,
1871 _("invalid value for immediate"));
1874 /* Is the shift amount valid? */
1875 switch (opnd
->shifter
.kind
)
1877 case AARCH64_MOD_LSL
:
1878 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1879 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1881 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1885 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1887 set_unaligned_error (mismatch_detail
, idx
, 8);
1891 case AARCH64_MOD_MSL
:
1892 /* Only 8 and 16 are valid shift amount. */
1893 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1895 set_other_error (mismatch_detail
, idx
,
1896 _("shift amount expected to be 0 or 16"));
1901 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1903 set_other_error (mismatch_detail
, idx
,
1904 _("invalid shift operator"));
1911 case AARCH64_OPND_FPIMM
:
1912 case AARCH64_OPND_SIMD_FPIMM
:
1913 if (opnd
->imm
.is_fp
== 0)
1915 set_other_error (mismatch_detail
, idx
,
1916 _("floating-point immediate expected"));
1919 /* The value is expected to be an 8-bit floating-point constant with
1920 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1921 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1923 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1925 set_other_error (mismatch_detail
, idx
,
1926 _("immediate out of range"));
1929 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1931 set_other_error (mismatch_detail
, idx
,
1932 _("invalid shift operator"));
1942 case AARCH64_OPND_CLASS_CP_REG
:
1943 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1944 valid range: C0 - C15. */
1945 if (opnd
->reg
.regno
> 15)
1947 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1952 case AARCH64_OPND_CLASS_SYSTEM
:
1955 case AARCH64_OPND_PSTATEFIELD
:
1956 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1959 The immediate must be #0 or #1. */
1960 if ((opnd
->pstatefield
== 0x03 /* UAO. */
1961 || opnd
->pstatefield
== 0x04) /* PAN. */
1962 && opnds
[1].imm
.value
> 1)
1964 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1967 /* MSR SPSel, #uimm4
1968 Uses uimm4 as a control value to select the stack pointer: if
1969 bit 0 is set it selects the current exception level's stack
1970 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1971 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1972 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1974 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1983 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1984 /* Get the upper bound for the element index. */
1985 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1986 /* Index out-of-range. */
1987 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1989 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1992 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1993 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1994 number is encoded in "size:M:Rm":
2000 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
2001 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2003 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2008 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2009 assert (idx
== 1 || idx
== 2);
2012 case AARCH64_OPND_Rm_EXT
:
2013 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
2014 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2016 set_other_error (mismatch_detail
, idx
,
2017 _("extend operator expected"));
2020 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2021 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2022 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2024 if (!aarch64_stack_pointer_p (opnds
+ 0)
2025 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2027 if (!opnd
->shifter
.operator_present
)
2029 set_other_error (mismatch_detail
, idx
,
2030 _("missing extend operator"));
2033 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2035 set_other_error (mismatch_detail
, idx
,
2036 _("'LSL' operator not allowed"));
2040 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2041 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2042 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2044 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2047 /* In the 64-bit form, the final register operand is written as Wm
2048 for all but the (possibly omitted) UXTX/LSL and SXTX
2050 N.B. GAS allows X register to be used with any operator as a
2051 programming convenience. */
2052 if (qualifier
== AARCH64_OPND_QLF_X
2053 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2054 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2055 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2057 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2062 case AARCH64_OPND_Rm_SFT
:
2063 /* ROR is not available to the shifted register operand in
2064 arithmetic instructions. */
2065 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
2067 set_other_error (mismatch_detail
, idx
,
2068 _("shift operator expected"));
2071 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2072 && opcode
->iclass
!= log_shift
)
2074 set_other_error (mismatch_detail
, idx
,
2075 _("'ROR' operator not allowed"));
2078 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2079 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2081 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2098 /* Main entrypoint for the operand constraint checking.
2100 Return 1 if operands of *INST meet the constraint applied by the operand
2101 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2102 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2103 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2104 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2105 error kind when it is notified that an instruction does not pass the check).
2107 Un-determined operand qualifiers may get established during the process. */
2110 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2111 aarch64_operand_error
*mismatch_detail
)
2115 DEBUG_TRACE ("enter");
2117 /* Check for cases where a source register needs to be the same as the
2118 destination register. Do this before matching qualifiers since if
2119 an instruction has both invalid tying and invalid qualifiers,
2120 the error about qualifiers would suggest several alternative
2121 instructions that also have invalid tying. */
2122 i
= inst
->opcode
->tied_operand
;
2123 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2125 if (mismatch_detail
)
2127 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2128 mismatch_detail
->index
= i
;
2129 mismatch_detail
->error
= NULL
;
2134 /* Match operands' qualifier.
2135 *INST has already had qualifier establish for some, if not all, of
2136 its operands; we need to find out whether these established
2137 qualifiers match one of the qualifier sequence in
2138 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2139 with the corresponding qualifier in such a sequence.
2140 Only basic operand constraint checking is done here; the more thorough
2141 constraint checking will carried out by operand_general_constraint_met_p,
2142 which has be to called after this in order to get all of the operands'
2143 qualifiers established. */
2144 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2146 DEBUG_TRACE ("FAIL on operand qualifier matching");
2147 if (mismatch_detail
)
2149 /* Return an error type to indicate that it is the qualifier
2150 matching failure; we don't care about which operand as there
2151 are enough information in the opcode table to reproduce it. */
2152 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2153 mismatch_detail
->index
= -1;
2154 mismatch_detail
->error
= NULL
;
2159 /* Match operands' constraint. */
2160 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2162 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2163 if (type
== AARCH64_OPND_NIL
)
2165 if (inst
->operands
[i
].skip
)
2167 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2170 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2171 inst
->opcode
, mismatch_detail
) == 0)
2173 DEBUG_TRACE ("FAIL on operand %d", i
);
2178 DEBUG_TRACE ("PASS");
2183 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2184 Also updates the TYPE of each INST->OPERANDS with the corresponding
2185 value of OPCODE->OPERANDS.
2187 Note that some operand qualifiers may need to be manually cleared by
2188 the caller before it further calls the aarch64_opcode_encode; by
2189 doing this, it helps the qualifier matching facilities work
2192 const aarch64_opcode
*
2193 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2196 const aarch64_opcode
*old
= inst
->opcode
;
2198 inst
->opcode
= opcode
;
2200 /* Update the operand types. */
2201 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2203 inst
->operands
[i
].type
= opcode
->operands
[i
];
2204 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2208 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2214 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2217 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2218 if (operands
[i
] == operand
)
2220 else if (operands
[i
] == AARCH64_OPND_NIL
)
2225 /* R0...R30, followed by FOR31. */
2226 #define BANK(R, FOR31) \
2227 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2228 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2229 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2230 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2231 /* [0][0] 32-bit integer regs with sp Wn
2232 [0][1] 64-bit integer regs with sp Xn sf=1
2233 [1][0] 32-bit integer regs with #0 Wn
2234 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2235 static const char *int_reg
[2][2][32] = {
2236 #define R32(X) "w" #X
2237 #define R64(X) "x" #X
2238 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2239 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2245 /* Return the integer register name.
2246 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2248 static inline const char *
2249 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2251 const int has_zr
= sp_reg_p
? 0 : 1;
2252 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2253 return int_reg
[has_zr
][is_64
][regno
];
2256 /* Like get_int_reg_name, but IS_64 is always 1. */
2258 static inline const char *
2259 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2261 const int has_zr
= sp_reg_p
? 0 : 1;
2262 return int_reg
[has_zr
][1][regno
];
2265 /* Get the name of the integer offset register in OPND, using the shift type
2266 to decide whether it's a word or doubleword. */
2268 static inline const char *
2269 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2271 switch (opnd
->shifter
.kind
)
2273 case AARCH64_MOD_UXTW
:
2274 case AARCH64_MOD_SXTW
:
2275 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2277 case AARCH64_MOD_LSL
:
2278 case AARCH64_MOD_SXTX
:
2279 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2286 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2306 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2307 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2308 (depending on the type of the instruction). IMM8 will be expanded to a
2309 single-precision floating-point value (SIZE == 4) or a double-precision
2310 floating-point value (SIZE == 8). A half-precision floating-point value
2311 (SIZE == 2) is expanded to a single-precision floating-point value. The
2312 expanded value is returned. */
2315 expand_fp_imm (int size
, uint32_t imm8
)
2318 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2320 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2321 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2322 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2323 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2324 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2327 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2328 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2329 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2330 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2331 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2334 else if (size
== 4 || size
== 2)
2336 imm
= (imm8_7
<< 31) /* imm8<7> */
2337 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2338 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2339 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2343 /* An unsupported size. */
2350 /* Produce the string representation of the register list operand *OPND
2351 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2352 the register name that comes before the register number, such as "v". */
2354 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
2357 const int num_regs
= opnd
->reglist
.num_regs
;
2358 const int first_reg
= opnd
->reglist
.first_regno
;
2359 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2360 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2361 char tb
[8]; /* Temporary buffer. */
2363 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2364 assert (num_regs
>= 1 && num_regs
<= 4);
2366 /* Prepare the index if any. */
2367 if (opnd
->reglist
.has_index
)
2368 snprintf (tb
, 8, "[%" PRIi64
"]", opnd
->reglist
.index
);
2372 /* The hyphenated form is preferred for disassembly if there are
2373 more than two registers in the list, and the register numbers
2374 are monotonically increasing in increments of one. */
2375 if (num_regs
> 2 && last_reg
> first_reg
)
2376 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
2377 prefix
, last_reg
, qlf_name
, tb
);
2380 const int reg0
= first_reg
;
2381 const int reg1
= (first_reg
+ 1) & 0x1f;
2382 const int reg2
= (first_reg
+ 2) & 0x1f;
2383 const int reg3
= (first_reg
+ 3) & 0x1f;
2388 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
2391 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
2392 prefix
, reg1
, qlf_name
, tb
);
2395 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2396 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2397 prefix
, reg2
, qlf_name
, tb
);
2400 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2401 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
2402 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
2408 /* Print the register+immediate address in OPND to BUF, which has SIZE
2409 characters. BASE is the name of the base register. */
2412 print_immediate_offset_address (char *buf
, size_t size
,
2413 const aarch64_opnd_info
*opnd
,
2416 if (opnd
->addr
.writeback
)
2418 if (opnd
->addr
.preind
)
2419 snprintf (buf
, size
, "[%s,#%d]!", base
, opnd
->addr
.offset
.imm
);
2421 snprintf (buf
, size
, "[%s],#%d", base
, opnd
->addr
.offset
.imm
);
2425 if (opnd
->addr
.offset
.imm
)
2426 snprintf (buf
, size
, "[%s,#%d]", base
, opnd
->addr
.offset
.imm
);
2428 snprintf (buf
, size
, "[%s]", base
);
2432 /* Produce the string representation of the register offset address operand
2433 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2434 the names of the base and offset registers. */
2436 print_register_offset_address (char *buf
, size_t size
,
2437 const aarch64_opnd_info
*opnd
,
2438 const char *base
, const char *offset
)
2440 char tb
[16]; /* Temporary buffer. */
2441 bfd_boolean print_extend_p
= TRUE
;
2442 bfd_boolean print_amount_p
= TRUE
;
2443 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2445 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2446 || !opnd
->shifter
.amount_present
))
2448 /* Not print the shift/extend amount when the amount is zero and
2449 when it is not the special case of 8-bit load/store instruction. */
2450 print_amount_p
= FALSE
;
2451 /* Likewise, no need to print the shift operator LSL in such a
2453 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2454 print_extend_p
= FALSE
;
2457 /* Prepare for the extend/shift. */
2461 snprintf (tb
, sizeof (tb
), ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2463 snprintf (tb
, sizeof (tb
), ",%s", shift_name
);
2468 snprintf (buf
, size
, "[%s,%s%s]", base
, offset
, tb
);
2471 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2472 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2473 PC, PCREL_P and ADDRESS are used to pass in and return information about
2474 the PC-relative address calculation, where the PC value is passed in
2475 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2476 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2477 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2479 The function serves both the disassembler and the assembler diagnostics
2480 issuer, which is the reason why it lives in this file. */
2483 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2484 const aarch64_opcode
*opcode
,
2485 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2489 const char *name
= NULL
;
2490 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2491 enum aarch64_modifier_kind kind
;
2500 case AARCH64_OPND_Rd
:
2501 case AARCH64_OPND_Rn
:
2502 case AARCH64_OPND_Rm
:
2503 case AARCH64_OPND_Rt
:
2504 case AARCH64_OPND_Rt2
:
2505 case AARCH64_OPND_Rs
:
2506 case AARCH64_OPND_Ra
:
2507 case AARCH64_OPND_Rt_SYS
:
2508 case AARCH64_OPND_PAIRREG
:
2509 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2510 the <ic_op>, therefore we we use opnd->present to override the
2511 generic optional-ness information. */
2512 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2514 /* Omit the operand, e.g. RET. */
2515 if (optional_operand_p (opcode
, idx
)
2516 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2518 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2519 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2520 snprintf (buf
, size
, "%s",
2521 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2524 case AARCH64_OPND_Rd_SP
:
2525 case AARCH64_OPND_Rn_SP
:
2526 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2527 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2528 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2529 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2530 snprintf (buf
, size
, "%s",
2531 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2534 case AARCH64_OPND_Rm_EXT
:
2535 kind
= opnd
->shifter
.kind
;
2536 assert (idx
== 1 || idx
== 2);
2537 if ((aarch64_stack_pointer_p (opnds
)
2538 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2539 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2540 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2541 && kind
== AARCH64_MOD_UXTW
)
2542 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2543 && kind
== AARCH64_MOD_UXTX
)))
2545 /* 'LSL' is the preferred form in this case. */
2546 kind
= AARCH64_MOD_LSL
;
2547 if (opnd
->shifter
.amount
== 0)
2549 /* Shifter omitted. */
2550 snprintf (buf
, size
, "%s",
2551 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2555 if (opnd
->shifter
.amount
)
2556 snprintf (buf
, size
, "%s, %s #%d",
2557 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2558 aarch64_operand_modifiers
[kind
].name
,
2559 opnd
->shifter
.amount
);
2561 snprintf (buf
, size
, "%s, %s",
2562 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2563 aarch64_operand_modifiers
[kind
].name
);
2566 case AARCH64_OPND_Rm_SFT
:
2567 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2568 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2569 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2570 snprintf (buf
, size
, "%s",
2571 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2573 snprintf (buf
, size
, "%s, %s #%d",
2574 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2575 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2576 opnd
->shifter
.amount
);
2579 case AARCH64_OPND_Fd
:
2580 case AARCH64_OPND_Fn
:
2581 case AARCH64_OPND_Fm
:
2582 case AARCH64_OPND_Fa
:
2583 case AARCH64_OPND_Ft
:
2584 case AARCH64_OPND_Ft2
:
2585 case AARCH64_OPND_Sd
:
2586 case AARCH64_OPND_Sn
:
2587 case AARCH64_OPND_Sm
:
2588 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2592 case AARCH64_OPND_Vd
:
2593 case AARCH64_OPND_Vn
:
2594 case AARCH64_OPND_Vm
:
2595 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2596 aarch64_get_qualifier_name (opnd
->qualifier
));
2599 case AARCH64_OPND_Ed
:
2600 case AARCH64_OPND_En
:
2601 case AARCH64_OPND_Em
:
2602 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2603 aarch64_get_qualifier_name (opnd
->qualifier
),
2604 opnd
->reglane
.index
);
2607 case AARCH64_OPND_VdD1
:
2608 case AARCH64_OPND_VnD1
:
2609 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2612 case AARCH64_OPND_LVn
:
2613 case AARCH64_OPND_LVt
:
2614 case AARCH64_OPND_LVt_AL
:
2615 case AARCH64_OPND_LEt
:
2616 print_register_list (buf
, size
, opnd
, "v");
2619 case AARCH64_OPND_SVE_Pd
:
2620 case AARCH64_OPND_SVE_Pg3
:
2621 case AARCH64_OPND_SVE_Pg4_5
:
2622 case AARCH64_OPND_SVE_Pg4_10
:
2623 case AARCH64_OPND_SVE_Pg4_16
:
2624 case AARCH64_OPND_SVE_Pm
:
2625 case AARCH64_OPND_SVE_Pn
:
2626 case AARCH64_OPND_SVE_Pt
:
2627 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2628 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
2629 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
2630 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
2631 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
2632 aarch64_get_qualifier_name (opnd
->qualifier
));
2634 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
2635 aarch64_get_qualifier_name (opnd
->qualifier
));
2638 case AARCH64_OPND_SVE_Za_5
:
2639 case AARCH64_OPND_SVE_Za_16
:
2640 case AARCH64_OPND_SVE_Zd
:
2641 case AARCH64_OPND_SVE_Zm_5
:
2642 case AARCH64_OPND_SVE_Zm_16
:
2643 case AARCH64_OPND_SVE_Zn
:
2644 case AARCH64_OPND_SVE_Zt
:
2645 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
2646 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
2648 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
2649 aarch64_get_qualifier_name (opnd
->qualifier
));
2652 case AARCH64_OPND_SVE_ZnxN
:
2653 case AARCH64_OPND_SVE_ZtxN
:
2654 print_register_list (buf
, size
, opnd
, "z");
2657 case AARCH64_OPND_SVE_Zn_INDEX
:
2658 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
2659 aarch64_get_qualifier_name (opnd
->qualifier
),
2660 opnd
->reglane
.index
);
2663 case AARCH64_OPND_Cn
:
2664 case AARCH64_OPND_Cm
:
2665 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2668 case AARCH64_OPND_IDX
:
2669 case AARCH64_OPND_IMM
:
2670 case AARCH64_OPND_WIDTH
:
2671 case AARCH64_OPND_UIMM3_OP1
:
2672 case AARCH64_OPND_UIMM3_OP2
:
2673 case AARCH64_OPND_BIT_NUM
:
2674 case AARCH64_OPND_IMM_VLSL
:
2675 case AARCH64_OPND_IMM_VLSR
:
2676 case AARCH64_OPND_SHLL_IMM
:
2677 case AARCH64_OPND_IMM0
:
2678 case AARCH64_OPND_IMMR
:
2679 case AARCH64_OPND_IMMS
:
2680 case AARCH64_OPND_FBITS
:
2681 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2684 case AARCH64_OPND_IMM_MOV
:
2685 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2687 case 4: /* e.g. MOV Wd, #<imm32>. */
2689 int imm32
= opnd
->imm
.value
;
2690 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2693 case 8: /* e.g. MOV Xd, #<imm64>. */
2694 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2695 opnd
->imm
.value
, opnd
->imm
.value
);
2697 default: assert (0);
2701 case AARCH64_OPND_FPIMM0
:
2702 snprintf (buf
, size
, "#0.0");
2705 case AARCH64_OPND_LIMM
:
2706 case AARCH64_OPND_AIMM
:
2707 case AARCH64_OPND_HALF
:
2708 if (opnd
->shifter
.amount
)
2709 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2710 opnd
->shifter
.amount
);
2712 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2715 case AARCH64_OPND_SIMD_IMM
:
2716 case AARCH64_OPND_SIMD_IMM_SFT
:
2717 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2718 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2719 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2721 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2722 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2723 opnd
->shifter
.amount
);
2726 case AARCH64_OPND_FPIMM
:
2727 case AARCH64_OPND_SIMD_FPIMM
:
2728 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2730 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2733 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2734 snprintf (buf
, size
, "#%.18e", c
.f
);
2737 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2740 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2741 snprintf (buf
, size
, "#%.18e", c
.f
);
2744 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2747 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2748 snprintf (buf
, size
, "#%.18e", c
.d
);
2751 default: assert (0);
2755 case AARCH64_OPND_CCMP_IMM
:
2756 case AARCH64_OPND_NZCV
:
2757 case AARCH64_OPND_EXCEPTION
:
2758 case AARCH64_OPND_UIMM4
:
2759 case AARCH64_OPND_UIMM7
:
2760 if (optional_operand_p (opcode
, idx
) == TRUE
2761 && (opnd
->imm
.value
==
2762 (int64_t) get_optional_operand_default_value (opcode
)))
2763 /* Omit the operand, e.g. DCPS1. */
2765 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2768 case AARCH64_OPND_COND
:
2769 case AARCH64_OPND_COND1
:
2770 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2773 case AARCH64_OPND_ADDR_ADRP
:
2774 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2780 /* This is not necessary during the disassembling, as print_address_func
2781 in the disassemble_info will take care of the printing. But some
2782 other callers may be still interested in getting the string in *STR,
2783 so here we do snprintf regardless. */
2784 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2787 case AARCH64_OPND_ADDR_PCREL14
:
2788 case AARCH64_OPND_ADDR_PCREL19
:
2789 case AARCH64_OPND_ADDR_PCREL21
:
2790 case AARCH64_OPND_ADDR_PCREL26
:
2791 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2796 /* This is not necessary during the disassembling, as print_address_func
2797 in the disassemble_info will take care of the printing. But some
2798 other callers may be still interested in getting the string in *STR,
2799 so here we do snprintf regardless. */
2800 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2803 case AARCH64_OPND_ADDR_SIMPLE
:
2804 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2805 case AARCH64_OPND_SIMD_ADDR_POST
:
2806 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2807 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2809 if (opnd
->addr
.offset
.is_reg
)
2810 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2812 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2815 snprintf (buf
, size
, "[%s]", name
);
2818 case AARCH64_OPND_ADDR_REGOFF
:
2819 print_register_offset_address
2820 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2821 get_offset_int_reg_name (opnd
));
2824 case AARCH64_OPND_ADDR_SIMM7
:
2825 case AARCH64_OPND_ADDR_SIMM9
:
2826 case AARCH64_OPND_ADDR_SIMM9_2
:
2827 print_immediate_offset_address
2828 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
2831 case AARCH64_OPND_ADDR_UIMM12
:
2832 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2833 if (opnd
->addr
.offset
.imm
)
2834 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2836 snprintf (buf
, size
, "[%s]", name
);
2839 case AARCH64_OPND_SYSREG
:
2840 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2841 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2842 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2844 if (aarch64_sys_regs
[i
].name
)
2845 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2848 /* Implementation defined system register. */
2849 unsigned int value
= opnd
->sysreg
;
2850 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2851 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2856 case AARCH64_OPND_PSTATEFIELD
:
2857 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2858 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2860 assert (aarch64_pstatefields
[i
].name
);
2861 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2864 case AARCH64_OPND_SYSREG_AT
:
2865 case AARCH64_OPND_SYSREG_DC
:
2866 case AARCH64_OPND_SYSREG_IC
:
2867 case AARCH64_OPND_SYSREG_TLBI
:
2868 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2871 case AARCH64_OPND_BARRIER
:
2872 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2875 case AARCH64_OPND_BARRIER_ISB
:
2876 /* Operand can be omitted, e.g. in DCPS1. */
2877 if (! optional_operand_p (opcode
, idx
)
2878 || (opnd
->barrier
->value
2879 != get_optional_operand_default_value (opcode
)))
2880 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2883 case AARCH64_OPND_PRFOP
:
2884 if (opnd
->prfop
->name
!= NULL
)
2885 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2887 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2890 case AARCH64_OPND_BARRIER_PSB
:
2891 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
2899 #define CPENC(op0,op1,crn,crm,op2) \
2900 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2901 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2902 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2903 /* for 3.9.10 System Instructions */
2904 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2926 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2931 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2936 #define F_HASXT 0x4 /* System instruction register <Xt>
2940 /* TODO there are two more issues need to be resolved
2941 1. handle read-only and write-only system registers
2942 2. handle cpu-implementation-defined system registers. */
2943 const aarch64_sys_reg aarch64_sys_regs
[] =
2945 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2946 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2947 { "elr_el1", CPEN_(0,C0
,1), 0 },
2948 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2949 { "sp_el0", CPEN_(0,C1
,0), 0 },
2950 { "spsel", CPEN_(0,C2
,0), 0 },
2951 { "daif", CPEN_(3,C2
,1), 0 },
2952 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2953 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2954 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
2955 { "nzcv", CPEN_(3,C2
,0), 0 },
2956 { "fpcr", CPEN_(3,C4
,0), 0 },
2957 { "fpsr", CPEN_(3,C4
,1), 0 },
2958 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2959 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2960 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2961 { "elr_el2", CPEN_(4,C0
,1), 0 },
2962 { "sp_el1", CPEN_(4,C1
,0), 0 },
2963 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2964 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2965 { "spsr_und", CPEN_(4,C3
,2), 0 },
2966 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2967 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2968 { "elr_el3", CPEN_(6,C0
,1), 0 },
2969 { "sp_el2", CPEN_(6,C1
,0), 0 },
2970 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2971 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2972 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2973 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2974 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2975 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2976 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2977 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2978 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2979 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2980 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2981 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2982 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2983 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2984 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2985 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2986 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2987 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2988 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2989 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2990 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2991 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2992 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2993 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2994 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2995 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2996 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2997 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2998 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2999 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
3000 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
3001 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
3002 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
3003 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
3004 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
3005 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
3006 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
3007 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
3008 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
3009 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
3010 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
3011 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
3012 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
3013 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
3014 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
3015 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
3016 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
3017 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
3018 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
3019 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
3020 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
3021 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
3022 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
3023 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
3024 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
3025 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
3026 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
3027 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
3028 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
3029 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
3030 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
3031 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
3032 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
3033 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
3034 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
3035 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
3036 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
3037 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
3038 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
3039 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
3040 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
3041 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
3042 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
3043 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
3044 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
3045 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
3046 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
3047 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
3048 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
3049 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
3050 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
3051 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
3052 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
3053 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
3054 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
3055 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
3056 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
3057 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
3058 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
3059 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
3060 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
3061 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
3062 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
3063 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
3064 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
3065 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
3066 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
3067 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
3068 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
3069 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
3070 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
3071 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
3072 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
3073 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
3074 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
3075 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
3076 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
3077 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
3078 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
3079 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
3080 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
3081 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
3082 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
3083 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
3084 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
3085 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
3086 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
3087 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
3088 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
3089 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
3090 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
3091 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
3092 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
3093 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
3094 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
3095 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
3096 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
3097 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
3098 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
3099 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
3100 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
3101 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
3102 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
3103 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
3104 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
3105 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
3106 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
3107 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
3108 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
3109 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
3110 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
3111 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
3112 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
3113 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
3114 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
3115 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
3116 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
3117 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
3118 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
3119 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
3120 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
3121 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
3122 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
3123 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
3124 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
3125 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
3126 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
3127 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
3128 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
3129 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
3130 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
3131 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
3132 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
3133 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
3134 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
3135 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
3136 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
3137 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
3138 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
3139 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
3140 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
3141 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
3142 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
3143 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
3144 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
3145 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
3146 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
3147 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
3148 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
3149 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
3150 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
3151 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
3152 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
3153 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3154 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3155 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3156 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3157 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3158 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3159 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3160 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3161 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3162 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3163 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3164 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3165 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3166 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3167 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3168 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3169 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3170 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3171 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3172 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3173 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3174 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3175 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3176 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3177 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3178 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3179 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3180 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3181 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3182 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3183 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3184 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3185 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3186 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3187 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3188 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3189 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3190 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3191 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3192 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3193 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3194 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3195 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3196 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3197 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3198 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3199 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3200 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3201 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3202 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3203 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3204 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3205 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3206 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3207 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3208 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3209 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3210 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3211 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3212 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3213 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3214 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3215 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3216 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3217 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3218 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3219 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3220 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3221 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3222 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3223 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3224 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3225 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3226 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3227 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3228 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3229 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3230 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3231 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3232 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3233 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3234 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3235 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3236 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3237 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3238 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3239 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3240 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3241 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3242 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3243 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3244 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3245 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3246 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3247 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3248 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3249 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3250 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3251 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3252 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3253 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3254 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3255 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3256 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3257 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3258 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3259 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3260 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3261 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3262 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3263 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3264 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3265 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3266 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3267 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3268 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3269 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3270 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3271 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3272 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3273 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3274 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3275 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3276 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3277 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3278 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3279 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3280 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3281 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3282 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3283 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3284 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3285 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3286 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3287 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3288 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3289 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3290 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3291 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3292 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3293 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3294 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3295 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3296 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3297 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3298 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3299 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3300 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3301 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3302 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3303 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3304 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3305 { 0, CPENC(0,0,0,0,0), 0 },
3309 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3311 return (reg
->flags
& F_DEPRECATED
) != 0;
3315 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3316 const aarch64_sys_reg
*reg
)
3318 if (!(reg
->flags
& F_ARCHEXT
))
3321 /* PAN. Values are from aarch64_sys_regs. */
3322 if (reg
->value
== CPEN_(0,C2
,3)
3323 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3326 /* Virtualization host extensions: system registers. */
3327 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3328 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3329 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3330 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3331 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3332 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3335 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3336 if ((reg
->value
== CPEN_ (5, C0
, 0)
3337 || reg
->value
== CPEN_ (5, C0
, 1)
3338 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3339 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3340 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3341 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3342 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3343 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3344 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3345 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3346 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3347 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3348 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3349 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3350 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3351 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3352 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3355 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3356 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3357 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3358 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3359 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3360 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3361 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3362 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3365 /* ARMv8.2 features. */
3367 /* ID_AA64MMFR2_EL1. */
3368 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3369 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3373 if (reg
->value
== CPEN_ (0, C2
, 4)
3374 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3377 /* RAS extension. */
3379 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3380 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3381 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3382 || reg
->value
== CPENC (3, 0, C5
, C3
, 1)
3383 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3384 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3385 || reg
->value
== CPENC (3, 0, C5
, C4
, 0)
3386 || reg
->value
== CPENC (3, 0, C5
, C4
, 1)
3387 || reg
->value
== CPENC (3, 0, C5
, C4
, 2)
3388 || reg
->value
== CPENC (3, 0, C5
, C4
, 3)
3389 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3390 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3391 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3394 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3395 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3396 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3397 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3398 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3401 /* Statistical Profiling extension. */
3402 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3403 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3404 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3405 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3406 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3407 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3408 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3409 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3410 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3411 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3412 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3413 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3414 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3415 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3421 const aarch64_sys_reg aarch64_pstatefields
[] =
3423 { "spsel", 0x05, 0 },
3424 { "daifset", 0x1e, 0 },
3425 { "daifclr", 0x1f, 0 },
3426 { "pan", 0x04, F_ARCHEXT
},
3427 { "uao", 0x03, F_ARCHEXT
},
3428 { 0, CPENC(0,0,0,0,0), 0 },
3432 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3433 const aarch64_sys_reg
*reg
)
3435 if (!(reg
->flags
& F_ARCHEXT
))
3438 /* PAN. Values are from aarch64_pstatefields. */
3439 if (reg
->value
== 0x04
3440 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3443 /* UAO. Values are from aarch64_pstatefields. */
3444 if (reg
->value
== 0x03
3445 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3451 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3453 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3454 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3455 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3456 { 0, CPENS(0,0,0,0), 0 }
3459 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3461 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3462 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3463 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3464 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3465 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3466 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3467 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3468 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3469 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3470 { 0, CPENS(0,0,0,0), 0 }
3473 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3475 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3476 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3477 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3478 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3479 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3480 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3481 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3482 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3483 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3484 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3485 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3486 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3487 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3488 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3489 { 0, CPENS(0,0,0,0), 0 }
3492 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3494 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3495 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3496 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3497 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3498 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3499 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3500 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3501 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3502 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3503 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3504 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3505 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3506 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3507 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3508 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3509 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3510 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3511 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3512 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3513 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3514 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3515 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3516 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3517 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3518 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3519 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3520 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3521 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3522 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3523 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3524 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3525 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3526 { 0, CPENS(0,0,0,0), 0 }
3530 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3532 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3536 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3537 const aarch64_sys_ins_reg
*reg
)
3539 if (!(reg
->flags
& F_ARCHEXT
))
3542 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3543 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3544 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3547 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3548 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3549 || reg
->value
== CPENS (0, C7
, C9
, 1))
3550 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3573 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3574 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3577 verify_ldpsw (const struct aarch64_opcode
* opcode ATTRIBUTE_UNUSED
,
3578 const aarch64_insn insn
)
3580 int t
= BITS (insn
, 4, 0);
3581 int n
= BITS (insn
, 9, 5);
3582 int t2
= BITS (insn
, 14, 10);
3586 /* Write back enabled. */
3587 if ((t
== n
|| t2
== n
) && n
!= 31)
3601 /* Include the opcode description table as well as the operand description
3603 #define VERIFIER(x) verify_##x
3604 #include "aarch64-tbl.h"