1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type
)
207 return aarch64_operands
[type
].op_class
;
211 aarch64_get_operand_name (enum aarch64_opnd type
)
213 return aarch64_operands
[type
].name
;
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
219 aarch64_get_operand_desc (enum aarch64_opnd type
)
221 return aarch64_operands
[type
].desc
;
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds
[16] =
230 {{"cc", "lo", "ul"}, 0x3},
246 get_cond_from_value (aarch64_insn value
)
249 return &aarch64_conds
[(unsigned int) value
];
253 get_inverted_cond (const aarch64_cond
*cond
)
255 return &aarch64_conds
[cond
->value
^ 0x1];
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
285 return desc
- aarch64_operand_modifiers
;
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
291 return aarch64_operand_modifiers
[kind
].value
;
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
296 bfd_boolean extend_p
)
298 if (extend_p
== TRUE
)
299 return AARCH64_MOD_UXTB
+ value
;
301 return AARCH64_MOD_LSL
- value
;
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
307 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
314 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
318 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
338 /* Table describing the operands supported by the aliases of the HINT
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
345 const struct aarch64_name_value_pair aarch64_hint_options
[] =
347 { "csync", 0x11 }, /* PSB CSYNC. */
351 /* op -> op: load = 0 instruction = 1 store = 2
353 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
354 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
355 const struct aarch64_name_value_pair aarch64_prfops
[32] =
357 { "pldl1keep", B(0, 1, 0) },
358 { "pldl1strm", B(0, 1, 1) },
359 { "pldl2keep", B(0, 2, 0) },
360 { "pldl2strm", B(0, 2, 1) },
361 { "pldl3keep", B(0, 3, 0) },
362 { "pldl3strm", B(0, 3, 1) },
365 { "plil1keep", B(1, 1, 0) },
366 { "plil1strm", B(1, 1, 1) },
367 { "plil2keep", B(1, 2, 0) },
368 { "plil2strm", B(1, 2, 1) },
369 { "plil3keep", B(1, 3, 0) },
370 { "plil3strm", B(1, 3, 1) },
373 { "pstl1keep", B(2, 1, 0) },
374 { "pstl1strm", B(2, 1, 1) },
375 { "pstl2keep", B(2, 2, 0) },
376 { "pstl2strm", B(2, 2, 1) },
377 { "pstl3keep", B(2, 3, 0) },
378 { "pstl3strm", B(2, 3, 1) },
392 /* Utilities on value constraint. */
395 value_in_range_p (int64_t value
, int low
, int high
)
397 return (value
>= low
&& value
<= high
) ? 1 : 0;
401 value_aligned_p (int64_t value
, int align
)
403 return ((value
& (align
- 1)) == 0) ? 1 : 0;
406 /* A signed value fits in a field. */
408 value_fit_signed_field_p (int64_t value
, unsigned width
)
411 if (width
< sizeof (value
) * 8)
413 int64_t lim
= (int64_t)1 << (width
- 1);
414 if (value
>= -lim
&& value
< lim
)
420 /* An unsigned value fits in a field. */
422 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
425 if (width
< sizeof (value
) * 8)
427 int64_t lim
= (int64_t)1 << width
;
428 if (value
>= 0 && value
< lim
)
434 /* Return 1 if OPERAND is SP or WSP. */
436 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
438 return ((aarch64_get_operand_class (operand
->type
)
439 == AARCH64_OPND_CLASS_INT_REG
)
440 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
441 && operand
->reg
.regno
== 31);
444 /* Return 1 if OPERAND is XZR or WZP. */
446 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
448 return ((aarch64_get_operand_class (operand
->type
)
449 == AARCH64_OPND_CLASS_INT_REG
)
450 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
451 && operand
->reg
.regno
== 31);
454 /* Return true if the operand *OPERAND that has the operand code
455 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
456 qualified by the qualifier TARGET. */
459 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
460 aarch64_opnd_qualifier_t target
)
462 switch (operand
->qualifier
)
464 case AARCH64_OPND_QLF_W
:
465 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
468 case AARCH64_OPND_QLF_X
:
469 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
472 case AARCH64_OPND_QLF_WSP
:
473 if (target
== AARCH64_OPND_QLF_W
474 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
477 case AARCH64_OPND_QLF_SP
:
478 if (target
== AARCH64_OPND_QLF_X
479 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
489 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
490 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
492 Return NIL if more than one expected qualifiers are found. */
494 aarch64_opnd_qualifier_t
495 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
497 const aarch64_opnd_qualifier_t known_qlf
,
504 When the known qualifier is NIL, we have to assume that there is only
505 one qualifier sequence in the *QSEQ_LIST and return the corresponding
506 qualifier directly. One scenario is that for instruction
507 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
508 which has only one possible valid qualifier sequence
510 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
511 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
513 Because the qualifier NIL has dual roles in the qualifier sequence:
514 it can mean no qualifier for the operand, or the qualifer sequence is
515 not in use (when all qualifiers in the sequence are NILs), we have to
516 handle this special case here. */
517 if (known_qlf
== AARCH64_OPND_NIL
)
519 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
520 return qseq_list
[0][idx
];
523 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
525 if (qseq_list
[i
][known_idx
] == known_qlf
)
528 /* More than one sequences are found to have KNOWN_QLF at
530 return AARCH64_OPND_NIL
;
535 return qseq_list
[saved_i
][idx
];
538 enum operand_qualifier_kind
546 /* Operand qualifier description. */
547 struct operand_qualifier_data
549 /* The usage of the three data fields depends on the qualifier kind. */
556 enum operand_qualifier_kind kind
;
559 /* Indexed by the operand qualifier enumerators. */
560 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
562 {0, 0, 0, "NIL", OQK_NIL
},
564 /* Operand variant qualifiers.
566 element size, number of elements and common value for encoding. */
568 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
569 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
570 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
571 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
573 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
574 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
575 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
576 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
577 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
579 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
580 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
581 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
582 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
583 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
584 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
585 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
586 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
587 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
589 /* Qualifiers constraining the value range.
591 Lower bound, higher bound, unused. */
593 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
594 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
595 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
596 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
597 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
598 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
600 /* Qualifiers for miscellaneous purpose.
602 unused, unused and unused. */
607 {0, 0, 0, "retrieving", 0},
610 static inline bfd_boolean
611 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
613 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
617 static inline bfd_boolean
618 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
620 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
625 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
627 return aarch64_opnd_qualifiers
[qualifier
].desc
;
630 /* Given an operand qualifier, return the expected data element size
631 of a qualified operand. */
633 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
635 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
636 return aarch64_opnd_qualifiers
[qualifier
].data0
;
640 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
642 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
643 return aarch64_opnd_qualifiers
[qualifier
].data1
;
647 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
649 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
650 return aarch64_opnd_qualifiers
[qualifier
].data2
;
654 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
656 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
657 return aarch64_opnd_qualifiers
[qualifier
].data0
;
661 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
663 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
664 return aarch64_opnd_qualifiers
[qualifier
].data1
;
669 aarch64_verbose (const char *str
, ...)
680 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
684 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
685 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
690 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
691 const aarch64_opnd_qualifier_t
*qualifier
)
694 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
696 aarch64_verbose ("dump_match_qualifiers:");
697 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
698 curr
[i
] = opnd
[i
].qualifier
;
699 dump_qualifier_sequence (curr
);
700 aarch64_verbose ("against");
701 dump_qualifier_sequence (qualifier
);
703 #endif /* DEBUG_AARCH64 */
705 /* TODO improve this, we can have an extra field at the runtime to
706 store the number of operands rather than calculating it every time. */
709 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
712 const enum aarch64_opnd
*opnds
= opcode
->operands
;
713 while (opnds
[i
++] != AARCH64_OPND_NIL
)
716 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
720 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
721 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
723 N.B. on the entry, it is very likely that only some operands in *INST
724 have had their qualifiers been established.
726 If STOP_AT is not -1, the function will only try to match
727 the qualifier sequence for operands before and including the operand
728 of index STOP_AT; and on success *RET will only be filled with the first
729 (STOP_AT+1) qualifiers.
731 A couple examples of the matching algorithm:
739 Apart from serving the main encoding routine, this can also be called
740 during or after the operand decoding. */
743 aarch64_find_best_match (const aarch64_inst
*inst
,
744 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
745 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
749 const aarch64_opnd_qualifier_t
*qualifiers
;
751 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
754 DEBUG_TRACE ("SUCCEED: no operand");
758 if (stop_at
< 0 || stop_at
>= num_opnds
)
759 stop_at
= num_opnds
- 1;
761 /* For each pattern. */
762 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
765 qualifiers
= *qualifiers_list
;
767 /* Start as positive. */
770 DEBUG_TRACE ("%d", i
);
773 dump_match_qualifiers (inst
->operands
, qualifiers
);
776 /* Most opcodes has much fewer patterns in the list.
777 First NIL qualifier indicates the end in the list. */
778 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
780 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
786 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
788 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
790 /* Either the operand does not have qualifier, or the qualifier
791 for the operand needs to be deduced from the qualifier
793 In the latter case, any constraint checking related with
794 the obtained qualifier should be done later in
795 operand_general_constraint_met_p. */
798 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
800 /* Unless the target qualifier can also qualify the operand
801 (which has already had a non-nil qualifier), non-equal
802 qualifiers are generally un-matched. */
803 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
812 continue; /* Equal qualifiers are certainly matched. */
815 /* Qualifiers established. */
822 /* Fill the result in *RET. */
824 qualifiers
= *qualifiers_list
;
826 DEBUG_TRACE ("complete qualifiers using list %d", i
);
829 dump_qualifier_sequence (qualifiers
);
832 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
833 ret
[j
] = *qualifiers
;
834 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
835 ret
[j
] = AARCH64_OPND_QLF_NIL
;
837 DEBUG_TRACE ("SUCCESS");
841 DEBUG_TRACE ("FAIL");
845 /* Operand qualifier matching and resolving.
847 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
848 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
850 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
854 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
857 aarch64_opnd_qualifier_seq_t qualifiers
;
859 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
862 DEBUG_TRACE ("matching FAIL");
866 /* Update the qualifiers. */
867 if (update_p
== TRUE
)
868 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
870 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
872 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
873 "update %s with %s for operand %d",
874 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
875 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
876 inst
->operands
[i
].qualifier
= qualifiers
[i
];
879 DEBUG_TRACE ("matching SUCCESS");
883 /* Return TRUE if VALUE is a wide constant that can be moved into a general
886 IS32 indicates whether value is a 32-bit immediate or not.
887 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
888 amount will be returned in *SHIFT_AMOUNT. */
891 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
895 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
899 /* Allow all zeros or all ones in top 32-bits, so that
900 32-bit constant expressions like ~0x80000000 are
902 uint64_t ext
= value
;
903 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
904 /* Immediate out of range. */
906 value
&= (int64_t) 0xffffffff;
909 /* first, try movz then movn */
911 if ((value
& ((int64_t) 0xffff << 0)) == value
)
913 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
915 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
917 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
922 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
926 if (shift_amount
!= NULL
)
927 *shift_amount
= amount
;
929 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
934 /* Build the accepted values for immediate logical SIMD instructions.
936 The standard encodings of the immediate value are:
937 N imms immr SIMD size R S
938 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
939 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
940 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
941 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
942 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
943 0 11110s 00000r 2 UInt(r) UInt(s)
944 where all-ones value of S is reserved.
946 Let's call E the SIMD size.
948 The immediate value is: S+1 bits '1' rotated to the right by R.
950 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
951 (remember S != E - 1). */
953 #define TOTAL_IMM_NB 5334
958 aarch64_insn encoding
;
961 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
964 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
966 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
967 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
969 if (imm1
->imm
< imm2
->imm
)
971 if (imm1
->imm
> imm2
->imm
)
976 /* immediate bitfield standard encoding
977 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
978 1 ssssss rrrrrr 64 rrrrrr ssssss
979 0 0sssss 0rrrrr 32 rrrrr sssss
980 0 10ssss 00rrrr 16 rrrr ssss
981 0 110sss 000rrr 8 rrr sss
982 0 1110ss 0000rr 4 rr ss
983 0 11110s 00000r 2 r s */
985 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
987 return (is64
<< 12) | (r
<< 6) | s
;
991 build_immediate_table (void)
993 uint32_t log_e
, e
, s
, r
, s_mask
;
999 for (log_e
= 1; log_e
<= 6; log_e
++)
1001 /* Get element size. */
1006 mask
= 0xffffffffffffffffull
;
1012 mask
= (1ull << e
) - 1;
1014 1 ((1 << 4) - 1) << 2 = 111100
1015 2 ((1 << 3) - 1) << 3 = 111000
1016 3 ((1 << 2) - 1) << 4 = 110000
1017 4 ((1 << 1) - 1) << 5 = 100000
1018 5 ((1 << 0) - 1) << 6 = 000000 */
1019 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1021 for (s
= 0; s
< e
- 1; s
++)
1022 for (r
= 0; r
< e
; r
++)
1024 /* s+1 consecutive bits to 1 (s < 63) */
1025 imm
= (1ull << (s
+ 1)) - 1;
1026 /* rotate right by r */
1028 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1029 /* replicate the constant depending on SIMD size */
1032 case 1: imm
= (imm
<< 2) | imm
;
1033 case 2: imm
= (imm
<< 4) | imm
;
1034 case 3: imm
= (imm
<< 8) | imm
;
1035 case 4: imm
= (imm
<< 16) | imm
;
1036 case 5: imm
= (imm
<< 32) | imm
;
1040 simd_immediates
[nb_imms
].imm
= imm
;
1041 simd_immediates
[nb_imms
].encoding
=
1042 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1046 assert (nb_imms
== TOTAL_IMM_NB
);
1047 qsort(simd_immediates
, nb_imms
,
1048 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1051 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1052 be accepted by logical (immediate) instructions
1053 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1055 IS32 indicates whether or not VALUE is a 32-bit immediate.
1056 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1057 VALUE will be returned in *ENCODING. */
1060 aarch64_logical_immediate_p (uint64_t value
, int is32
, aarch64_insn
*encoding
)
1062 simd_imm_encoding imm_enc
;
1063 const simd_imm_encoding
*imm_encoding
;
1064 static bfd_boolean initialized
= FALSE
;
1066 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1069 if (initialized
== FALSE
)
1071 build_immediate_table ();
1077 /* Allow all zeros or all ones in top 32-bits, so that
1078 constant expressions like ~1 are permitted. */
1079 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1082 /* Replicate the 32 lower bits to the 32 upper bits. */
1083 value
&= 0xffffffff;
1084 value
|= value
<< 32;
1087 imm_enc
.imm
= value
;
1088 imm_encoding
= (const simd_imm_encoding
*)
1089 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1090 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1091 if (imm_encoding
== NULL
)
1093 DEBUG_TRACE ("exit with FALSE");
1096 if (encoding
!= NULL
)
1097 *encoding
= imm_encoding
->encoding
;
1098 DEBUG_TRACE ("exit with TRUE");
1102 /* If 64-bit immediate IMM is in the format of
1103 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1104 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1105 of value "abcdefgh". Otherwise return -1. */
1107 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1113 for (i
= 0; i
< 8; i
++)
1115 byte
= (imm
>> (8 * i
)) & 0xff;
1118 else if (byte
!= 0x00)
1124 /* Utility inline functions for operand_general_constraint_met_p. */
1127 set_error (aarch64_operand_error
*mismatch_detail
,
1128 enum aarch64_operand_error_kind kind
, int idx
,
1131 if (mismatch_detail
== NULL
)
1133 mismatch_detail
->kind
= kind
;
1134 mismatch_detail
->index
= idx
;
1135 mismatch_detail
->error
= error
;
1139 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1142 if (mismatch_detail
== NULL
)
1144 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1148 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1149 int idx
, int lower_bound
, int upper_bound
,
1152 if (mismatch_detail
== NULL
)
1154 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1155 mismatch_detail
->data
[0] = lower_bound
;
1156 mismatch_detail
->data
[1] = upper_bound
;
1160 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1161 int idx
, int lower_bound
, int upper_bound
)
1163 if (mismatch_detail
== NULL
)
1165 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1166 _("immediate value"));
1170 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1171 int idx
, int lower_bound
, int upper_bound
)
1173 if (mismatch_detail
== NULL
)
1175 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1176 _("immediate offset"));
1180 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1181 int idx
, int lower_bound
, int upper_bound
)
1183 if (mismatch_detail
== NULL
)
1185 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1186 _("register number"));
1190 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1191 int idx
, int lower_bound
, int upper_bound
)
1193 if (mismatch_detail
== NULL
)
1195 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1196 _("register element index"));
1200 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1201 int idx
, int lower_bound
, int upper_bound
)
1203 if (mismatch_detail
== NULL
)
1205 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1210 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1213 if (mismatch_detail
== NULL
)
1215 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1216 mismatch_detail
->data
[0] = alignment
;
1220 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1223 if (mismatch_detail
== NULL
)
1225 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1226 mismatch_detail
->data
[0] = expected_num
;
1230 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1233 if (mismatch_detail
== NULL
)
1235 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1238 /* General constraint checking based on operand code.
1240 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1241 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1243 This function has to be called after the qualifiers for all operands
1246 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1247 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1248 of error message during the disassembling where error message is not
1249 wanted. We avoid the dynamic construction of strings of error messages
1250 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1251 use a combination of error code, static string and some integer data to
1252 represent an error. */
1255 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1256 enum aarch64_opnd type
,
1257 const aarch64_opcode
*opcode
,
1258 aarch64_operand_error
*mismatch_detail
)
1263 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1264 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1266 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1268 switch (aarch64_operands
[type
].op_class
)
1270 case AARCH64_OPND_CLASS_INT_REG
:
1271 /* Check pair reg constraints for cas* instructions. */
1272 if (type
== AARCH64_OPND_PAIRREG
)
1274 assert (idx
== 1 || idx
== 3);
1275 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1277 set_syntax_error (mismatch_detail
, idx
- 1,
1278 _("reg pair must start from even reg"));
1281 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1283 set_syntax_error (mismatch_detail
, idx
,
1284 _("reg pair must be contiguous"));
1290 /* <Xt> may be optional in some IC and TLBI instructions. */
1291 if (type
== AARCH64_OPND_Rt_SYS
)
1293 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1294 == AARCH64_OPND_CLASS_SYSTEM
));
1295 if (opnds
[1].present
1296 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1298 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1301 if (!opnds
[1].present
1302 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1304 set_other_error (mismatch_detail
, idx
, _("missing register"));
1310 case AARCH64_OPND_QLF_WSP
:
1311 case AARCH64_OPND_QLF_SP
:
1312 if (!aarch64_stack_pointer_p (opnd
))
1314 set_other_error (mismatch_detail
, idx
,
1315 _("stack pointer register expected"));
1324 case AARCH64_OPND_CLASS_COND
:
1325 if (type
== AARCH64_OPND_COND1
1326 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1328 /* Not allow AL or NV. */
1329 set_syntax_error (mismatch_detail
, idx
, NULL
);
1333 case AARCH64_OPND_CLASS_ADDRESS
:
1334 /* Check writeback. */
1335 switch (opcode
->iclass
)
1339 case ldstnapair_offs
:
1342 if (opnd
->addr
.writeback
== 1)
1344 set_syntax_error (mismatch_detail
, idx
,
1345 _("unexpected address writeback"));
1350 case ldstpair_indexed
:
1353 if (opnd
->addr
.writeback
== 0)
1355 set_syntax_error (mismatch_detail
, idx
,
1356 _("address writeback expected"));
1361 assert (opnd
->addr
.writeback
== 0);
1366 case AARCH64_OPND_ADDR_SIMM7
:
1367 /* Scaled signed 7 bits immediate offset. */
1368 /* Get the size of the data element that is accessed, which may be
1369 different from that of the source register size,
1370 e.g. in strb/ldrb. */
1371 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1372 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1374 set_offset_out_of_range_error (mismatch_detail
, idx
,
1375 -64 * size
, 63 * size
);
1378 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1380 set_unaligned_error (mismatch_detail
, idx
, size
);
1384 case AARCH64_OPND_ADDR_SIMM9
:
1385 /* Unscaled signed 9 bits immediate offset. */
1386 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1388 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1393 case AARCH64_OPND_ADDR_SIMM9_2
:
1394 /* Unscaled signed 9 bits immediate offset, which has to be negative
1396 size
= aarch64_get_qualifier_esize (qualifier
);
1397 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1398 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1399 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1401 set_other_error (mismatch_detail
, idx
,
1402 _("negative or unaligned offset expected"));
1405 case AARCH64_OPND_SIMD_ADDR_POST
:
1406 /* AdvSIMD load/store multiple structures, post-index. */
1408 if (opnd
->addr
.offset
.is_reg
)
1410 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1414 set_other_error (mismatch_detail
, idx
,
1415 _("invalid register offset"));
1421 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1422 unsigned num_bytes
; /* total number of bytes transferred. */
1423 /* The opcode dependent area stores the number of elements in
1424 each structure to be loaded/stored. */
1425 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1426 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1427 /* Special handling of loading single structure to all lane. */
1428 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1429 * aarch64_get_qualifier_esize (prev
->qualifier
);
1431 num_bytes
= prev
->reglist
.num_regs
1432 * aarch64_get_qualifier_esize (prev
->qualifier
)
1433 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1434 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1436 set_other_error (mismatch_detail
, idx
,
1437 _("invalid post-increment amount"));
1443 case AARCH64_OPND_ADDR_REGOFF
:
1444 /* Get the size of the data element that is accessed, which may be
1445 different from that of the source register size,
1446 e.g. in strb/ldrb. */
1447 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1448 /* It is either no shift or shift by the binary logarithm of SIZE. */
1449 if (opnd
->shifter
.amount
!= 0
1450 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1452 set_other_error (mismatch_detail
, idx
,
1453 _("invalid shift amount"));
1456 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1458 switch (opnd
->shifter
.kind
)
1460 case AARCH64_MOD_UXTW
:
1461 case AARCH64_MOD_LSL
:
1462 case AARCH64_MOD_SXTW
:
1463 case AARCH64_MOD_SXTX
: break;
1465 set_other_error (mismatch_detail
, idx
,
1466 _("invalid extend/shift operator"));
1471 case AARCH64_OPND_ADDR_UIMM12
:
1472 imm
= opnd
->addr
.offset
.imm
;
1473 /* Get the size of the data element that is accessed, which may be
1474 different from that of the source register size,
1475 e.g. in strb/ldrb. */
1476 size
= aarch64_get_qualifier_esize (qualifier
);
1477 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1479 set_offset_out_of_range_error (mismatch_detail
, idx
,
1483 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1485 set_unaligned_error (mismatch_detail
, idx
, size
);
1490 case AARCH64_OPND_ADDR_PCREL14
:
1491 case AARCH64_OPND_ADDR_PCREL19
:
1492 case AARCH64_OPND_ADDR_PCREL21
:
1493 case AARCH64_OPND_ADDR_PCREL26
:
1494 imm
= opnd
->imm
.value
;
1495 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1497 /* The offset value in a PC-relative branch instruction is alway
1498 4-byte aligned and is encoded without the lowest 2 bits. */
1499 if (!value_aligned_p (imm
, 4))
1501 set_unaligned_error (mismatch_detail
, idx
, 4);
1504 /* Right shift by 2 so that we can carry out the following check
1508 size
= get_operand_fields_width (get_operand_from_code (type
));
1509 if (!value_fit_signed_field_p (imm
, size
))
1511 set_other_error (mismatch_detail
, idx
,
1512 _("immediate out of range"));
1522 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1523 /* The opcode dependent area stores the number of elements in
1524 each structure to be loaded/stored. */
1525 num
= get_opcode_dependent_value (opcode
);
1528 case AARCH64_OPND_LVt
:
1529 assert (num
>= 1 && num
<= 4);
1530 /* Unless LD1/ST1, the number of registers should be equal to that
1531 of the structure elements. */
1532 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1534 set_reg_list_error (mismatch_detail
, idx
, num
);
1538 case AARCH64_OPND_LVt_AL
:
1539 case AARCH64_OPND_LEt
:
1540 assert (num
>= 1 && num
<= 4);
1541 /* The number of registers should be equal to that of the structure
1543 if (opnd
->reglist
.num_regs
!= num
)
1545 set_reg_list_error (mismatch_detail
, idx
, num
);
1554 case AARCH64_OPND_CLASS_IMMEDIATE
:
1555 /* Constraint check on immediate operand. */
1556 imm
= opnd
->imm
.value
;
1557 /* E.g. imm_0_31 constrains value to be 0..31. */
1558 if (qualifier_value_in_range_constraint_p (qualifier
)
1559 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1560 get_upper_bound (qualifier
)))
1562 set_imm_out_of_range_error (mismatch_detail
, idx
,
1563 get_lower_bound (qualifier
),
1564 get_upper_bound (qualifier
));
1570 case AARCH64_OPND_AIMM
:
1571 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1573 set_other_error (mismatch_detail
, idx
,
1574 _("invalid shift operator"));
1577 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1579 set_other_error (mismatch_detail
, idx
,
1580 _("shift amount expected to be 0 or 12"));
1583 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1585 set_other_error (mismatch_detail
, idx
,
1586 _("immediate out of range"));
1591 case AARCH64_OPND_HALF
:
1592 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1593 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1595 set_other_error (mismatch_detail
, idx
,
1596 _("invalid shift operator"));
1599 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1600 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1602 set_other_error (mismatch_detail
, idx
,
1603 _("shift amount should be a multiple of 16"));
1606 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1608 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1612 if (opnd
->imm
.value
< 0)
1614 set_other_error (mismatch_detail
, idx
,
1615 _("negative immediate value not allowed"));
1618 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1620 set_other_error (mismatch_detail
, idx
,
1621 _("immediate out of range"));
1626 case AARCH64_OPND_IMM_MOV
:
1628 int is32
= aarch64_get_qualifier_esize (opnds
[0].qualifier
) == 4;
1629 imm
= opnd
->imm
.value
;
1633 case OP_MOV_IMM_WIDEN
:
1635 /* Fall through... */
1636 case OP_MOV_IMM_WIDE
:
1637 if (!aarch64_wide_constant_p (imm
, is32
, NULL
))
1639 set_other_error (mismatch_detail
, idx
,
1640 _("immediate out of range"));
1644 case OP_MOV_IMM_LOG
:
1645 if (!aarch64_logical_immediate_p (imm
, is32
, NULL
))
1647 set_other_error (mismatch_detail
, idx
,
1648 _("immediate out of range"));
1659 case AARCH64_OPND_NZCV
:
1660 case AARCH64_OPND_CCMP_IMM
:
1661 case AARCH64_OPND_EXCEPTION
:
1662 case AARCH64_OPND_UIMM4
:
1663 case AARCH64_OPND_UIMM7
:
1664 case AARCH64_OPND_UIMM3_OP1
:
1665 case AARCH64_OPND_UIMM3_OP2
:
1666 size
= get_operand_fields_width (get_operand_from_code (type
));
1668 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1670 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1676 case AARCH64_OPND_WIDTH
:
1677 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1678 && opnds
[0].type
== AARCH64_OPND_Rd
);
1679 size
= get_upper_bound (qualifier
);
1680 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1681 /* lsb+width <= reg.size */
1683 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1684 size
- opnds
[idx
-1].imm
.value
);
1689 case AARCH64_OPND_LIMM
:
1691 int is32
= opnds
[0].qualifier
== AARCH64_OPND_QLF_W
;
1692 uint64_t uimm
= opnd
->imm
.value
;
1693 if (opcode
->op
== OP_BIC
)
1695 if (aarch64_logical_immediate_p (uimm
, is32
, NULL
) == FALSE
)
1697 set_other_error (mismatch_detail
, idx
,
1698 _("immediate out of range"));
1704 case AARCH64_OPND_IMM0
:
1705 case AARCH64_OPND_FPIMM0
:
1706 if (opnd
->imm
.value
!= 0)
1708 set_other_error (mismatch_detail
, idx
,
1709 _("immediate zero expected"));
1714 case AARCH64_OPND_SHLL_IMM
:
1716 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1717 if (opnd
->imm
.value
!= size
)
1719 set_other_error (mismatch_detail
, idx
,
1720 _("invalid shift amount"));
1725 case AARCH64_OPND_IMM_VLSL
:
1726 size
= aarch64_get_qualifier_esize (qualifier
);
1727 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1729 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1735 case AARCH64_OPND_IMM_VLSR
:
1736 size
= aarch64_get_qualifier_esize (qualifier
);
1737 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1739 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1744 case AARCH64_OPND_SIMD_IMM
:
1745 case AARCH64_OPND_SIMD_IMM_SFT
:
1746 /* Qualifier check. */
1749 case AARCH64_OPND_QLF_LSL
:
1750 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1752 set_other_error (mismatch_detail
, idx
,
1753 _("invalid shift operator"));
1757 case AARCH64_OPND_QLF_MSL
:
1758 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1760 set_other_error (mismatch_detail
, idx
,
1761 _("invalid shift operator"));
1765 case AARCH64_OPND_QLF_NIL
:
1766 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1768 set_other_error (mismatch_detail
, idx
,
1769 _("shift is not permitted"));
1777 /* Is the immediate valid? */
1779 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1781 /* uimm8 or simm8 */
1782 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1784 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1788 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1791 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1792 ffffffffgggggggghhhhhhhh'. */
1793 set_other_error (mismatch_detail
, idx
,
1794 _("invalid value for immediate"));
1797 /* Is the shift amount valid? */
1798 switch (opnd
->shifter
.kind
)
1800 case AARCH64_MOD_LSL
:
1801 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1802 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1804 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1808 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1810 set_unaligned_error (mismatch_detail
, idx
, 8);
1814 case AARCH64_MOD_MSL
:
1815 /* Only 8 and 16 are valid shift amount. */
1816 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1818 set_other_error (mismatch_detail
, idx
,
1819 _("shift amount expected to be 0 or 16"));
1824 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1826 set_other_error (mismatch_detail
, idx
,
1827 _("invalid shift operator"));
1834 case AARCH64_OPND_FPIMM
:
1835 case AARCH64_OPND_SIMD_FPIMM
:
1836 if (opnd
->imm
.is_fp
== 0)
1838 set_other_error (mismatch_detail
, idx
,
1839 _("floating-point immediate expected"));
1842 /* The value is expected to be an 8-bit floating-point constant with
1843 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1844 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1846 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1848 set_other_error (mismatch_detail
, idx
,
1849 _("immediate out of range"));
1852 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1854 set_other_error (mismatch_detail
, idx
,
1855 _("invalid shift operator"));
1865 case AARCH64_OPND_CLASS_CP_REG
:
1866 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1867 valid range: C0 - C15. */
1868 if (opnd
->reg
.regno
> 15)
1870 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1875 case AARCH64_OPND_CLASS_SYSTEM
:
1878 case AARCH64_OPND_PSTATEFIELD
:
1879 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1881 The immediate must be #0 or #1. */
1882 if (opnd
->pstatefield
== 0x04 /* PAN. */
1883 && opnds
[1].imm
.value
> 1)
1885 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1888 /* MSR SPSel, #uimm4
1889 Uses uimm4 as a control value to select the stack pointer: if
1890 bit 0 is set it selects the current exception level's stack
1891 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1892 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1893 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1895 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1904 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1905 /* Get the upper bound for the element index. */
1906 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1907 /* Index out-of-range. */
1908 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1910 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1913 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1914 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1915 number is encoded in "size:M:Rm":
1921 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
1922 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
1924 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1929 case AARCH64_OPND_CLASS_MODIFIED_REG
:
1930 assert (idx
== 1 || idx
== 2);
1933 case AARCH64_OPND_Rm_EXT
:
1934 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
1935 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1937 set_other_error (mismatch_detail
, idx
,
1938 _("extend operator expected"));
1941 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1942 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1943 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1945 if (!aarch64_stack_pointer_p (opnds
+ 0)
1946 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
1948 if (!opnd
->shifter
.operator_present
)
1950 set_other_error (mismatch_detail
, idx
,
1951 _("missing extend operator"));
1954 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
1956 set_other_error (mismatch_detail
, idx
,
1957 _("'LSL' operator not allowed"));
1961 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
1962 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
1963 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
1965 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
1968 /* In the 64-bit form, the final register operand is written as Wm
1969 for all but the (possibly omitted) UXTX/LSL and SXTX
1971 N.B. GAS allows X register to be used with any operator as a
1972 programming convenience. */
1973 if (qualifier
== AARCH64_OPND_QLF_X
1974 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
1975 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
1976 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
1978 set_other_error (mismatch_detail
, idx
, _("W register expected"));
1983 case AARCH64_OPND_Rm_SFT
:
1984 /* ROR is not available to the shifted register operand in
1985 arithmetic instructions. */
1986 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
1988 set_other_error (mismatch_detail
, idx
,
1989 _("shift operator expected"));
1992 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
1993 && opcode
->iclass
!= log_shift
)
1995 set_other_error (mismatch_detail
, idx
,
1996 _("'ROR' operator not allowed"));
1999 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2000 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2002 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2019 /* Main entrypoint for the operand constraint checking.
2021 Return 1 if operands of *INST meet the constraint applied by the operand
2022 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2023 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2024 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2025 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2026 error kind when it is notified that an instruction does not pass the check).
2028 Un-determined operand qualifiers may get established during the process. */
2031 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2032 aarch64_operand_error
*mismatch_detail
)
2036 DEBUG_TRACE ("enter");
2038 /* Match operands' qualifier.
2039 *INST has already had qualifier establish for some, if not all, of
2040 its operands; we need to find out whether these established
2041 qualifiers match one of the qualifier sequence in
2042 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2043 with the corresponding qualifier in such a sequence.
2044 Only basic operand constraint checking is done here; the more thorough
2045 constraint checking will carried out by operand_general_constraint_met_p,
2046 which has be to called after this in order to get all of the operands'
2047 qualifiers established. */
2048 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2050 DEBUG_TRACE ("FAIL on operand qualifier matching");
2051 if (mismatch_detail
)
2053 /* Return an error type to indicate that it is the qualifier
2054 matching failure; we don't care about which operand as there
2055 are enough information in the opcode table to reproduce it. */
2056 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2057 mismatch_detail
->index
= -1;
2058 mismatch_detail
->error
= NULL
;
2063 /* Match operands' constraint. */
2064 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2066 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2067 if (type
== AARCH64_OPND_NIL
)
2069 if (inst
->operands
[i
].skip
)
2071 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2074 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2075 inst
->opcode
, mismatch_detail
) == 0)
2077 DEBUG_TRACE ("FAIL on operand %d", i
);
2082 DEBUG_TRACE ("PASS");
2087 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2088 Also updates the TYPE of each INST->OPERANDS with the corresponding
2089 value of OPCODE->OPERANDS.
2091 Note that some operand qualifiers may need to be manually cleared by
2092 the caller before it further calls the aarch64_opcode_encode; by
2093 doing this, it helps the qualifier matching facilities work
2096 const aarch64_opcode
*
2097 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2100 const aarch64_opcode
*old
= inst
->opcode
;
2102 inst
->opcode
= opcode
;
2104 /* Update the operand types. */
2105 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2107 inst
->operands
[i
].type
= opcode
->operands
[i
];
2108 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2112 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2118 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2121 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2122 if (operands
[i
] == operand
)
2124 else if (operands
[i
] == AARCH64_OPND_NIL
)
2129 /* [0][0] 32-bit integer regs with sp Wn
2130 [0][1] 64-bit integer regs with sp Xn sf=1
2131 [1][0] 32-bit integer regs with #0 Wn
2132 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2133 static const char *int_reg
[2][2][32] = {
2136 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2137 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2138 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2139 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", "wsp" },
2140 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2141 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2142 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2143 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", "sp" } },
2144 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2145 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2146 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2147 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", R32
"zr" },
2148 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2149 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2150 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2151 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", R64
"zr" } }
2156 /* Return the integer register name.
2157 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2159 static inline const char *
2160 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2162 const int has_zr
= sp_reg_p
? 0 : 1;
2163 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2164 return int_reg
[has_zr
][is_64
][regno
];
2167 /* Like get_int_reg_name, but IS_64 is always 1. */
2169 static inline const char *
2170 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2172 const int has_zr
= sp_reg_p
? 0 : 1;
2173 return int_reg
[has_zr
][1][regno
];
2176 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2196 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2197 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2198 (depending on the type of the instruction). IMM8 will be expanded to a
2199 single-precision floating-point value (SIZE == 4) or a double-precision
2200 floating-point value (SIZE == 8). A half-precision floating-point value
2201 (SIZE == 2) is expanded to a single-precision floating-point value. The
2202 expanded value is returned. */
2205 expand_fp_imm (int size
, uint32_t imm8
)
2208 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2210 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2211 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2212 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2213 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2214 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2217 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2218 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2219 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2220 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2221 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2224 else if (size
== 4 || size
== 2)
2226 imm
= (imm8_7
<< 31) /* imm8<7> */
2227 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2228 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2229 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2233 /* An unsupported size. */
2240 /* Produce the string representation of the register list operand *OPND
2241 in the buffer pointed by BUF of size SIZE. */
2243 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
)
2245 const int num_regs
= opnd
->reglist
.num_regs
;
2246 const int first_reg
= opnd
->reglist
.first_regno
;
2247 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2248 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2249 char tb
[8]; /* Temporary buffer. */
2251 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2252 assert (num_regs
>= 1 && num_regs
<= 4);
2254 /* Prepare the index if any. */
2255 if (opnd
->reglist
.has_index
)
2256 snprintf (tb
, 8, "[%d]", opnd
->reglist
.index
);
2260 /* The hyphenated form is preferred for disassembly if there are
2261 more than two registers in the list, and the register numbers
2262 are monotonically increasing in increments of one. */
2263 if (num_regs
> 2 && last_reg
> first_reg
)
2264 snprintf (buf
, size
, "{v%d.%s-v%d.%s}%s", first_reg
, qlf_name
,
2265 last_reg
, qlf_name
, tb
);
2268 const int reg0
= first_reg
;
2269 const int reg1
= (first_reg
+ 1) & 0x1f;
2270 const int reg2
= (first_reg
+ 2) & 0x1f;
2271 const int reg3
= (first_reg
+ 3) & 0x1f;
2276 snprintf (buf
, size
, "{v%d.%s}%s", reg0
, qlf_name
, tb
);
2279 snprintf (buf
, size
, "{v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2280 reg1
, qlf_name
, tb
);
2283 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2284 reg1
, qlf_name
, reg2
, qlf_name
, tb
);
2287 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2288 reg0
, qlf_name
, reg1
, qlf_name
, reg2
, qlf_name
,
2289 reg3
, qlf_name
, tb
);
2295 /* Produce the string representation of the register offset address operand
2296 *OPND in the buffer pointed by BUF of size SIZE. */
2298 print_register_offset_address (char *buf
, size_t size
,
2299 const aarch64_opnd_info
*opnd
)
2301 const size_t tblen
= 16;
2302 char tb
[tblen
]; /* Temporary buffer. */
2303 bfd_boolean lsl_p
= FALSE
; /* Is LSL shift operator? */
2304 bfd_boolean wm_p
= FALSE
; /* Should Rm be Wm? */
2305 bfd_boolean print_extend_p
= TRUE
;
2306 bfd_boolean print_amount_p
= TRUE
;
2307 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2309 switch (opnd
->shifter
.kind
)
2311 case AARCH64_MOD_UXTW
: wm_p
= TRUE
; break;
2312 case AARCH64_MOD_LSL
: lsl_p
= TRUE
; break;
2313 case AARCH64_MOD_SXTW
: wm_p
= TRUE
; break;
2314 case AARCH64_MOD_SXTX
: break;
2315 default: assert (0);
2318 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2319 || !opnd
->shifter
.amount_present
))
2321 /* Not print the shift/extend amount when the amount is zero and
2322 when it is not the special case of 8-bit load/store instruction. */
2323 print_amount_p
= FALSE
;
2324 /* Likewise, no need to print the shift operator LSL in such a
2327 print_extend_p
= FALSE
;
2330 /* Prepare for the extend/shift. */
2334 snprintf (tb
, tblen
, ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2336 snprintf (tb
, tblen
, ",%s", shift_name
);
2341 snprintf (buf
, size
, "[%s,%s%s]",
2342 get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2343 get_int_reg_name (opnd
->addr
.offset
.regno
,
2344 wm_p
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
,
2349 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2350 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2351 PC, PCREL_P and ADDRESS are used to pass in and return information about
2352 the PC-relative address calculation, where the PC value is passed in
2353 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2354 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2355 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2357 The function serves both the disassembler and the assembler diagnostics
2358 issuer, which is the reason why it lives in this file. */
2361 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2362 const aarch64_opcode
*opcode
,
2363 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2367 const char *name
= NULL
;
2368 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2369 enum aarch64_modifier_kind kind
;
2378 case AARCH64_OPND_Rd
:
2379 case AARCH64_OPND_Rn
:
2380 case AARCH64_OPND_Rm
:
2381 case AARCH64_OPND_Rt
:
2382 case AARCH64_OPND_Rt2
:
2383 case AARCH64_OPND_Rs
:
2384 case AARCH64_OPND_Ra
:
2385 case AARCH64_OPND_Rt_SYS
:
2386 case AARCH64_OPND_PAIRREG
:
2387 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2388 the <ic_op>, therefore we we use opnd->present to override the
2389 generic optional-ness information. */
2390 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2392 /* Omit the operand, e.g. RET. */
2393 if (optional_operand_p (opcode
, idx
)
2394 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2396 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2397 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2398 snprintf (buf
, size
, "%s",
2399 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2402 case AARCH64_OPND_Rd_SP
:
2403 case AARCH64_OPND_Rn_SP
:
2404 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2405 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2406 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2407 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2408 snprintf (buf
, size
, "%s",
2409 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2412 case AARCH64_OPND_Rm_EXT
:
2413 kind
= opnd
->shifter
.kind
;
2414 assert (idx
== 1 || idx
== 2);
2415 if ((aarch64_stack_pointer_p (opnds
)
2416 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2417 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2418 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2419 && kind
== AARCH64_MOD_UXTW
)
2420 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2421 && kind
== AARCH64_MOD_UXTX
)))
2423 /* 'LSL' is the preferred form in this case. */
2424 kind
= AARCH64_MOD_LSL
;
2425 if (opnd
->shifter
.amount
== 0)
2427 /* Shifter omitted. */
2428 snprintf (buf
, size
, "%s",
2429 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2433 if (opnd
->shifter
.amount
)
2434 snprintf (buf
, size
, "%s, %s #%d",
2435 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2436 aarch64_operand_modifiers
[kind
].name
,
2437 opnd
->shifter
.amount
);
2439 snprintf (buf
, size
, "%s, %s",
2440 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2441 aarch64_operand_modifiers
[kind
].name
);
2444 case AARCH64_OPND_Rm_SFT
:
2445 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2446 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2447 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2448 snprintf (buf
, size
, "%s",
2449 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2451 snprintf (buf
, size
, "%s, %s #%d",
2452 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2453 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2454 opnd
->shifter
.amount
);
2457 case AARCH64_OPND_Fd
:
2458 case AARCH64_OPND_Fn
:
2459 case AARCH64_OPND_Fm
:
2460 case AARCH64_OPND_Fa
:
2461 case AARCH64_OPND_Ft
:
2462 case AARCH64_OPND_Ft2
:
2463 case AARCH64_OPND_Sd
:
2464 case AARCH64_OPND_Sn
:
2465 case AARCH64_OPND_Sm
:
2466 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2470 case AARCH64_OPND_Vd
:
2471 case AARCH64_OPND_Vn
:
2472 case AARCH64_OPND_Vm
:
2473 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2474 aarch64_get_qualifier_name (opnd
->qualifier
));
2477 case AARCH64_OPND_Ed
:
2478 case AARCH64_OPND_En
:
2479 case AARCH64_OPND_Em
:
2480 snprintf (buf
, size
, "v%d.%s[%d]", opnd
->reglane
.regno
,
2481 aarch64_get_qualifier_name (opnd
->qualifier
),
2482 opnd
->reglane
.index
);
2485 case AARCH64_OPND_VdD1
:
2486 case AARCH64_OPND_VnD1
:
2487 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2490 case AARCH64_OPND_LVn
:
2491 case AARCH64_OPND_LVt
:
2492 case AARCH64_OPND_LVt_AL
:
2493 case AARCH64_OPND_LEt
:
2494 print_register_list (buf
, size
, opnd
);
2497 case AARCH64_OPND_Cn
:
2498 case AARCH64_OPND_Cm
:
2499 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2502 case AARCH64_OPND_IDX
:
2503 case AARCH64_OPND_IMM
:
2504 case AARCH64_OPND_WIDTH
:
2505 case AARCH64_OPND_UIMM3_OP1
:
2506 case AARCH64_OPND_UIMM3_OP2
:
2507 case AARCH64_OPND_BIT_NUM
:
2508 case AARCH64_OPND_IMM_VLSL
:
2509 case AARCH64_OPND_IMM_VLSR
:
2510 case AARCH64_OPND_SHLL_IMM
:
2511 case AARCH64_OPND_IMM0
:
2512 case AARCH64_OPND_IMMR
:
2513 case AARCH64_OPND_IMMS
:
2514 case AARCH64_OPND_FBITS
:
2515 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2518 case AARCH64_OPND_IMM_MOV
:
2519 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2521 case 4: /* e.g. MOV Wd, #<imm32>. */
2523 int imm32
= opnd
->imm
.value
;
2524 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2527 case 8: /* e.g. MOV Xd, #<imm64>. */
2528 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2529 opnd
->imm
.value
, opnd
->imm
.value
);
2531 default: assert (0);
2535 case AARCH64_OPND_FPIMM0
:
2536 snprintf (buf
, size
, "#0.0");
2539 case AARCH64_OPND_LIMM
:
2540 case AARCH64_OPND_AIMM
:
2541 case AARCH64_OPND_HALF
:
2542 if (opnd
->shifter
.amount
)
2543 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2544 opnd
->shifter
.amount
);
2546 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2549 case AARCH64_OPND_SIMD_IMM
:
2550 case AARCH64_OPND_SIMD_IMM_SFT
:
2551 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2552 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2553 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2555 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2556 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2557 opnd
->shifter
.amount
);
2560 case AARCH64_OPND_FPIMM
:
2561 case AARCH64_OPND_SIMD_FPIMM
:
2562 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2564 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2567 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2568 snprintf (buf
, size
, "#%.18e", c
.f
);
2571 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2574 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2575 snprintf (buf
, size
, "#%.18e", c
.f
);
2578 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2581 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2582 snprintf (buf
, size
, "#%.18e", c
.d
);
2585 default: assert (0);
2589 case AARCH64_OPND_CCMP_IMM
:
2590 case AARCH64_OPND_NZCV
:
2591 case AARCH64_OPND_EXCEPTION
:
2592 case AARCH64_OPND_UIMM4
:
2593 case AARCH64_OPND_UIMM7
:
2594 if (optional_operand_p (opcode
, idx
) == TRUE
2595 && (opnd
->imm
.value
==
2596 (int64_t) get_optional_operand_default_value (opcode
)))
2597 /* Omit the operand, e.g. DCPS1. */
2599 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2602 case AARCH64_OPND_COND
:
2603 case AARCH64_OPND_COND1
:
2604 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2607 case AARCH64_OPND_ADDR_ADRP
:
2608 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2614 /* This is not necessary during the disassembling, as print_address_func
2615 in the disassemble_info will take care of the printing. But some
2616 other callers may be still interested in getting the string in *STR,
2617 so here we do snprintf regardless. */
2618 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2621 case AARCH64_OPND_ADDR_PCREL14
:
2622 case AARCH64_OPND_ADDR_PCREL19
:
2623 case AARCH64_OPND_ADDR_PCREL21
:
2624 case AARCH64_OPND_ADDR_PCREL26
:
2625 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2630 /* This is not necessary during the disassembling, as print_address_func
2631 in the disassemble_info will take care of the printing. But some
2632 other callers may be still interested in getting the string in *STR,
2633 so here we do snprintf regardless. */
2634 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2637 case AARCH64_OPND_ADDR_SIMPLE
:
2638 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2639 case AARCH64_OPND_SIMD_ADDR_POST
:
2640 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2641 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2643 if (opnd
->addr
.offset
.is_reg
)
2644 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2646 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2649 snprintf (buf
, size
, "[%s]", name
);
2652 case AARCH64_OPND_ADDR_REGOFF
:
2653 print_register_offset_address (buf
, size
, opnd
);
2656 case AARCH64_OPND_ADDR_SIMM7
:
2657 case AARCH64_OPND_ADDR_SIMM9
:
2658 case AARCH64_OPND_ADDR_SIMM9_2
:
2659 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2660 if (opnd
->addr
.writeback
)
2662 if (opnd
->addr
.preind
)
2663 snprintf (buf
, size
, "[%s,#%d]!", name
, opnd
->addr
.offset
.imm
);
2665 snprintf (buf
, size
, "[%s],#%d", name
, opnd
->addr
.offset
.imm
);
2669 if (opnd
->addr
.offset
.imm
)
2670 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2672 snprintf (buf
, size
, "[%s]", name
);
2676 case AARCH64_OPND_ADDR_UIMM12
:
2677 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2678 if (opnd
->addr
.offset
.imm
)
2679 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2681 snprintf (buf
, size
, "[%s]", name
);
2684 case AARCH64_OPND_SYSREG
:
2685 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2686 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2687 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2689 if (aarch64_sys_regs
[i
].name
)
2690 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2693 /* Implementation defined system register. */
2694 unsigned int value
= opnd
->sysreg
;
2695 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2696 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2701 case AARCH64_OPND_PSTATEFIELD
:
2702 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2703 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2705 assert (aarch64_pstatefields
[i
].name
);
2706 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2709 case AARCH64_OPND_SYSREG_AT
:
2710 case AARCH64_OPND_SYSREG_DC
:
2711 case AARCH64_OPND_SYSREG_IC
:
2712 case AARCH64_OPND_SYSREG_TLBI
:
2713 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2716 case AARCH64_OPND_BARRIER
:
2717 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2720 case AARCH64_OPND_BARRIER_ISB
:
2721 /* Operand can be omitted, e.g. in DCPS1. */
2722 if (! optional_operand_p (opcode
, idx
)
2723 || (opnd
->barrier
->value
2724 != get_optional_operand_default_value (opcode
)))
2725 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2728 case AARCH64_OPND_PRFOP
:
2729 if (opnd
->prfop
->name
!= NULL
)
2730 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2732 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2735 case AARCH64_OPND_BARRIER_PSB
:
2736 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
2744 #define CPENC(op0,op1,crn,crm,op2) \
2745 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2746 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2747 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2748 /* for 3.9.10 System Instructions */
2749 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2771 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2776 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2781 #define F_HASXT 0x4 /* System instruction register <Xt>
2785 /* TODO there are two more issues need to be resolved
2786 1. handle read-only and write-only system registers
2787 2. handle cpu-implementation-defined system registers. */
2788 const aarch64_sys_reg aarch64_sys_regs
[] =
2790 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2791 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2792 { "elr_el1", CPEN_(0,C0
,1), 0 },
2793 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2794 { "sp_el0", CPEN_(0,C1
,0), 0 },
2795 { "spsel", CPEN_(0,C2
,0), 0 },
2796 { "daif", CPEN_(3,C2
,1), 0 },
2797 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2798 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2799 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
2800 { "nzcv", CPEN_(3,C2
,0), 0 },
2801 { "fpcr", CPEN_(3,C4
,0), 0 },
2802 { "fpsr", CPEN_(3,C4
,1), 0 },
2803 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2804 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2805 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2806 { "elr_el2", CPEN_(4,C0
,1), 0 },
2807 { "sp_el1", CPEN_(4,C1
,0), 0 },
2808 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2809 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2810 { "spsr_und", CPEN_(4,C3
,2), 0 },
2811 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2812 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2813 { "elr_el3", CPEN_(6,C0
,1), 0 },
2814 { "sp_el2", CPEN_(6,C1
,0), 0 },
2815 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2816 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2817 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2818 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2819 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2820 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2821 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2822 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2823 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2824 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2825 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2826 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2827 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2828 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2829 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2830 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2831 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2832 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2833 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2834 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2835 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2836 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2837 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2838 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2839 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2840 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2841 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2842 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2843 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2844 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
2845 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
2846 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
2847 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
2848 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
2849 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
2850 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
2851 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
2852 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
2853 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
2854 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
2855 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
2856 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
2857 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
2858 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
2859 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
2860 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
2861 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
2862 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
2863 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
2864 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
2865 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
2866 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
2867 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
2868 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
2869 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
2870 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
2871 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
2872 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
2873 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
2874 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
2875 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
2876 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
2877 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
2878 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
2879 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
2880 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
2881 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
2882 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
2883 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
2884 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
2885 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
2886 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
2887 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
2888 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
2889 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
2890 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
2891 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
2892 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
2893 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
2894 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
2895 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
2896 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
2897 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
2898 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
2899 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
2900 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
2901 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
2902 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
2903 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
2904 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
2905 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
2906 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
2907 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
2908 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
2909 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
2910 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
2911 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
2912 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
2913 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
2914 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
2915 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
2916 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
2917 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
2918 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
2919 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
2920 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
2921 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
2922 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
2923 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
2924 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
2925 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
2926 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
2927 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
2928 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
2929 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
2930 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
2931 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
2932 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
2933 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
2934 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
2935 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
2936 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
2937 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
2938 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
2939 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
2940 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
2941 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
2942 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
2943 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
2944 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
2945 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
2946 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
2947 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
2948 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
2949 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
2950 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
2951 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
2952 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
2953 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
2954 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
2955 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
2956 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
2957 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
2958 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
2959 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
2960 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
2961 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
2962 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
2963 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
2964 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
2965 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
2966 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
2967 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
2968 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
2969 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
2970 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
2971 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
2972 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
2973 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
2974 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
2975 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
2976 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
2977 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
2978 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
2979 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
2980 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
2981 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
2982 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
2983 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
2984 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
2985 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
2986 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
2987 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
2988 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
2989 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
2990 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
2991 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
2992 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
2993 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
2994 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
2995 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
2996 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
2997 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
2998 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
2999 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3000 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3001 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3002 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3003 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3004 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3005 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3006 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3007 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3008 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3009 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3010 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3011 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3012 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3013 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3014 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3015 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3016 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3017 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3018 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3019 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3020 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3021 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3022 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3023 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3024 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3025 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3026 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3027 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3028 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3029 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3030 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3031 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3032 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3033 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3034 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3035 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3036 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3037 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3038 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3039 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3040 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3041 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3042 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3043 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3044 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3045 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3046 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3047 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3048 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3049 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3050 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3051 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3052 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3053 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3054 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3055 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3056 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3057 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3058 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3059 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3060 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3061 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3062 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3063 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3064 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3065 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3066 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3067 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3068 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3069 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3070 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3071 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3072 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3073 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3074 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3075 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3076 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3077 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3078 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3079 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3080 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3081 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3082 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3083 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3084 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3085 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3086 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3087 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3088 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3089 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3090 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3091 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3092 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3093 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3094 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3095 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3096 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3097 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3098 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3099 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3100 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3101 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3102 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3103 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3104 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3105 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3106 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3107 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3108 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3109 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3110 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3111 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3112 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3113 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3114 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3115 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3116 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3117 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3118 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3119 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3120 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3121 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3122 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3123 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3124 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3125 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3126 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3127 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3128 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3129 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3130 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3131 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3132 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3133 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3134 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3135 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3136 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3137 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3138 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3139 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3140 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3141 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3142 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3143 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3144 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3145 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3146 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3147 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3148 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3149 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3150 { 0, CPENC(0,0,0,0,0), 0 },
3154 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3156 return (reg
->flags
& F_DEPRECATED
) != 0;
3160 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3161 const aarch64_sys_reg
*reg
)
3163 if (!(reg
->flags
& F_ARCHEXT
))
3166 /* PAN. Values are from aarch64_sys_regs. */
3167 if (reg
->value
== CPEN_(0,C2
,3)
3168 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3171 /* Virtualization host extensions: system registers. */
3172 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3173 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3174 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3175 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3176 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3177 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3180 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3181 if ((reg
->value
== CPEN_ (5, C0
, 0)
3182 || reg
->value
== CPEN_ (5, C0
, 1)
3183 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3184 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3185 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3186 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3187 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3188 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3189 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3190 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3191 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3192 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3193 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3194 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3195 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3196 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3197 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3200 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3201 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3202 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3203 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3204 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3205 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3206 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3207 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3210 /* ARMv8.2 features. */
3212 /* ID_AA64MMFR2_EL1. */
3213 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3214 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3218 if (reg
->value
== CPEN_ (0, C2
, 4)
3219 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3222 /* RAS extension. */
3224 /* ERRIDR_EL1 and ERRSELR_EL1. */
3225 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3226 || reg
->value
== CPENC (3, 0, C5
, C3
, 1))
3227 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3230 /* ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1, ERXMISC0_EL1 AND
3232 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3233 || reg
->value
== CPENC (3, 0, C5
, C3
,1)
3234 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3235 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3236 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3237 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3238 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3241 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3242 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3243 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3244 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3245 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3248 /* Statistical Profiling extension. */
3249 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3250 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3251 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3252 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3253 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3254 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3255 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3256 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3257 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3258 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3259 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3260 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3261 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3262 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3268 const aarch64_sys_reg aarch64_pstatefields
[] =
3270 { "spsel", 0x05, 0 },
3271 { "daifset", 0x1e, 0 },
3272 { "daifclr", 0x1f, 0 },
3273 { "pan", 0x04, F_ARCHEXT
},
3274 { "uao", 0x03, F_ARCHEXT
},
3275 { 0, CPENC(0,0,0,0,0), 0 },
3279 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3280 const aarch64_sys_reg
*reg
)
3282 if (!(reg
->flags
& F_ARCHEXT
))
3285 /* PAN. Values are from aarch64_pstatefields. */
3286 if (reg
->value
== 0x04
3287 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3290 /* UAO. Values are from aarch64_pstatefields. */
3291 if (reg
->value
== 0x03
3292 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3298 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3300 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3301 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3302 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3303 { 0, CPENS(0,0,0,0), 0 }
3306 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3308 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3309 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3310 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3311 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3312 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3313 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3314 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3315 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3316 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3317 { 0, CPENS(0,0,0,0), 0 }
3320 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3322 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3323 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3324 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3325 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3326 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3327 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3328 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3329 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3330 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3331 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3332 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3333 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3334 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3335 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3336 { 0, CPENS(0,0,0,0), 0 }
3339 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3341 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3342 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3343 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3344 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3345 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3346 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3347 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3348 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3349 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3350 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3351 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3352 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3353 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3354 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3355 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3356 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3357 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3358 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3359 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3360 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3361 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3362 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3363 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3364 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3365 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3366 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3367 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3368 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3369 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3370 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3371 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3372 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3373 { 0, CPENS(0,0,0,0), 0 }
3377 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3379 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3383 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3384 const aarch64_sys_ins_reg
*reg
)
3386 if (!(reg
->flags
& F_ARCHEXT
))
3389 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3390 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3391 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3394 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3395 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3396 || reg
->value
== CPENS (0, C7
, C9
, 1))
3397 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3420 /* Include the opcode description table as well as the operand description
3422 #include "aarch64-tbl.h"