1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type
)
207 return aarch64_operands
[type
].op_class
;
211 aarch64_get_operand_name (enum aarch64_opnd type
)
213 return aarch64_operands
[type
].name
;
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
219 aarch64_get_operand_desc (enum aarch64_opnd type
)
221 return aarch64_operands
[type
].desc
;
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds
[16] =
230 {{"cc", "lo", "ul"}, 0x3},
246 get_cond_from_value (aarch64_insn value
)
249 return &aarch64_conds
[(unsigned int) value
];
253 get_inverted_cond (const aarch64_cond
*cond
)
255 return &aarch64_conds
[cond
->value
^ 0x1];
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
285 return desc
- aarch64_operand_modifiers
;
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
291 return aarch64_operand_modifiers
[kind
].value
;
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
296 bfd_boolean extend_p
)
298 if (extend_p
== TRUE
)
299 return AARCH64_MOD_UXTB
+ value
;
301 return AARCH64_MOD_LSL
- value
;
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
307 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
314 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
318 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
338 /* Table describing the operands supported by the aliases of the HINT
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
345 const struct aarch64_name_value_pair aarch64_hint_options
[] =
350 /* op -> op: load = 0 instruction = 1 store = 2
352 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
353 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
354 const struct aarch64_name_value_pair aarch64_prfops
[32] =
356 { "pldl1keep", B(0, 1, 0) },
357 { "pldl1strm", B(0, 1, 1) },
358 { "pldl2keep", B(0, 2, 0) },
359 { "pldl2strm", B(0, 2, 1) },
360 { "pldl3keep", B(0, 3, 0) },
361 { "pldl3strm", B(0, 3, 1) },
364 { "plil1keep", B(1, 1, 0) },
365 { "plil1strm", B(1, 1, 1) },
366 { "plil2keep", B(1, 2, 0) },
367 { "plil2strm", B(1, 2, 1) },
368 { "plil3keep", B(1, 3, 0) },
369 { "plil3strm", B(1, 3, 1) },
372 { "pstl1keep", B(2, 1, 0) },
373 { "pstl1strm", B(2, 1, 1) },
374 { "pstl2keep", B(2, 2, 0) },
375 { "pstl2strm", B(2, 2, 1) },
376 { "pstl3keep", B(2, 3, 0) },
377 { "pstl3strm", B(2, 3, 1) },
391 /* Utilities on value constraint. */
394 value_in_range_p (int64_t value
, int low
, int high
)
396 return (value
>= low
&& value
<= high
) ? 1 : 0;
400 value_aligned_p (int64_t value
, int align
)
402 return ((value
& (align
- 1)) == 0) ? 1 : 0;
405 /* A signed value fits in a field. */
407 value_fit_signed_field_p (int64_t value
, unsigned width
)
410 if (width
< sizeof (value
) * 8)
412 int64_t lim
= (int64_t)1 << (width
- 1);
413 if (value
>= -lim
&& value
< lim
)
419 /* An unsigned value fits in a field. */
421 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
424 if (width
< sizeof (value
) * 8)
426 int64_t lim
= (int64_t)1 << width
;
427 if (value
>= 0 && value
< lim
)
433 /* Return 1 if OPERAND is SP or WSP. */
435 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
437 return ((aarch64_get_operand_class (operand
->type
)
438 == AARCH64_OPND_CLASS_INT_REG
)
439 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
440 && operand
->reg
.regno
== 31);
443 /* Return 1 if OPERAND is XZR or WZP. */
445 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
447 return ((aarch64_get_operand_class (operand
->type
)
448 == AARCH64_OPND_CLASS_INT_REG
)
449 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
450 && operand
->reg
.regno
== 31);
453 /* Return true if the operand *OPERAND that has the operand code
454 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
455 qualified by the qualifier TARGET. */
458 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
459 aarch64_opnd_qualifier_t target
)
461 switch (operand
->qualifier
)
463 case AARCH64_OPND_QLF_W
:
464 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
467 case AARCH64_OPND_QLF_X
:
468 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
471 case AARCH64_OPND_QLF_WSP
:
472 if (target
== AARCH64_OPND_QLF_W
473 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
476 case AARCH64_OPND_QLF_SP
:
477 if (target
== AARCH64_OPND_QLF_X
478 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
488 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
489 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
491 Return NIL if more than one expected qualifiers are found. */
493 aarch64_opnd_qualifier_t
494 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
496 const aarch64_opnd_qualifier_t known_qlf
,
503 When the known qualifier is NIL, we have to assume that there is only
504 one qualifier sequence in the *QSEQ_LIST and return the corresponding
505 qualifier directly. One scenario is that for instruction
506 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
507 which has only one possible valid qualifier sequence
509 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
510 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
512 Because the qualifier NIL has dual roles in the qualifier sequence:
513 it can mean no qualifier for the operand, or the qualifer sequence is
514 not in use (when all qualifiers in the sequence are NILs), we have to
515 handle this special case here. */
516 if (known_qlf
== AARCH64_OPND_NIL
)
518 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
519 return qseq_list
[0][idx
];
522 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
524 if (qseq_list
[i
][known_idx
] == known_qlf
)
527 /* More than one sequences are found to have KNOWN_QLF at
529 return AARCH64_OPND_NIL
;
534 return qseq_list
[saved_i
][idx
];
537 enum operand_qualifier_kind
545 /* Operand qualifier description. */
546 struct operand_qualifier_data
548 /* The usage of the three data fields depends on the qualifier kind. */
555 enum operand_qualifier_kind kind
;
558 /* Indexed by the operand qualifier enumerators. */
559 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
561 {0, 0, 0, "NIL", OQK_NIL
},
563 /* Operand variant qualifiers.
565 element size, number of elements and common value for encoding. */
567 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
568 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
569 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
570 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
572 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
573 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
574 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
575 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
576 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
578 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
579 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
580 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
581 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
582 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
583 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
584 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
585 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
586 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
588 /* Qualifiers constraining the value range.
590 Lower bound, higher bound, unused. */
592 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
593 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
594 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
595 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
596 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
597 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
599 /* Qualifiers for miscellaneous purpose.
601 unused, unused and unused. */
606 {0, 0, 0, "retrieving", 0},
609 static inline bfd_boolean
610 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
612 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
616 static inline bfd_boolean
617 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
619 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
624 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
626 return aarch64_opnd_qualifiers
[qualifier
].desc
;
629 /* Given an operand qualifier, return the expected data element size
630 of a qualified operand. */
632 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
634 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
635 return aarch64_opnd_qualifiers
[qualifier
].data0
;
639 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
641 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
642 return aarch64_opnd_qualifiers
[qualifier
].data1
;
646 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
648 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
649 return aarch64_opnd_qualifiers
[qualifier
].data2
;
653 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
655 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
656 return aarch64_opnd_qualifiers
[qualifier
].data0
;
660 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
662 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
663 return aarch64_opnd_qualifiers
[qualifier
].data1
;
668 aarch64_verbose (const char *str
, ...)
679 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
683 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
684 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
689 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
690 const aarch64_opnd_qualifier_t
*qualifier
)
693 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
695 aarch64_verbose ("dump_match_qualifiers:");
696 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
697 curr
[i
] = opnd
[i
].qualifier
;
698 dump_qualifier_sequence (curr
);
699 aarch64_verbose ("against");
700 dump_qualifier_sequence (qualifier
);
702 #endif /* DEBUG_AARCH64 */
704 /* TODO improve this, we can have an extra field at the runtime to
705 store the number of operands rather than calculating it every time. */
708 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
711 const enum aarch64_opnd
*opnds
= opcode
->operands
;
712 while (opnds
[i
++] != AARCH64_OPND_NIL
)
715 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
719 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
720 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
722 N.B. on the entry, it is very likely that only some operands in *INST
723 have had their qualifiers been established.
725 If STOP_AT is not -1, the function will only try to match
726 the qualifier sequence for operands before and including the operand
727 of index STOP_AT; and on success *RET will only be filled with the first
728 (STOP_AT+1) qualifiers.
730 A couple examples of the matching algorithm:
738 Apart from serving the main encoding routine, this can also be called
739 during or after the operand decoding. */
742 aarch64_find_best_match (const aarch64_inst
*inst
,
743 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
744 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
748 const aarch64_opnd_qualifier_t
*qualifiers
;
750 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
753 DEBUG_TRACE ("SUCCEED: no operand");
757 if (stop_at
< 0 || stop_at
>= num_opnds
)
758 stop_at
= num_opnds
- 1;
760 /* For each pattern. */
761 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
764 qualifiers
= *qualifiers_list
;
766 /* Start as positive. */
769 DEBUG_TRACE ("%d", i
);
772 dump_match_qualifiers (inst
->operands
, qualifiers
);
775 /* Most opcodes has much fewer patterns in the list.
776 First NIL qualifier indicates the end in the list. */
777 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
779 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
785 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
787 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
789 /* Either the operand does not have qualifier, or the qualifier
790 for the operand needs to be deduced from the qualifier
792 In the latter case, any constraint checking related with
793 the obtained qualifier should be done later in
794 operand_general_constraint_met_p. */
797 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
799 /* Unless the target qualifier can also qualify the operand
800 (which has already had a non-nil qualifier), non-equal
801 qualifiers are generally un-matched. */
802 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
811 continue; /* Equal qualifiers are certainly matched. */
814 /* Qualifiers established. */
821 /* Fill the result in *RET. */
823 qualifiers
= *qualifiers_list
;
825 DEBUG_TRACE ("complete qualifiers using list %d", i
);
828 dump_qualifier_sequence (qualifiers
);
831 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
832 ret
[j
] = *qualifiers
;
833 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
834 ret
[j
] = AARCH64_OPND_QLF_NIL
;
836 DEBUG_TRACE ("SUCCESS");
840 DEBUG_TRACE ("FAIL");
844 /* Operand qualifier matching and resolving.
846 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
847 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
849 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
853 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
856 aarch64_opnd_qualifier_seq_t qualifiers
;
858 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
861 DEBUG_TRACE ("matching FAIL");
865 /* Update the qualifiers. */
866 if (update_p
== TRUE
)
867 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
869 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
871 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
872 "update %s with %s for operand %d",
873 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
874 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
875 inst
->operands
[i
].qualifier
= qualifiers
[i
];
878 DEBUG_TRACE ("matching SUCCESS");
882 /* Return TRUE if VALUE is a wide constant that can be moved into a general
885 IS32 indicates whether value is a 32-bit immediate or not.
886 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
887 amount will be returned in *SHIFT_AMOUNT. */
890 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
894 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
898 /* Allow all zeros or all ones in top 32-bits, so that
899 32-bit constant expressions like ~0x80000000 are
901 uint64_t ext
= value
;
902 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
903 /* Immediate out of range. */
905 value
&= (int64_t) 0xffffffff;
908 /* first, try movz then movn */
910 if ((value
& ((int64_t) 0xffff << 0)) == value
)
912 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
914 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
916 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
921 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
925 if (shift_amount
!= NULL
)
926 *shift_amount
= amount
;
928 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
933 /* Build the accepted values for immediate logical SIMD instructions.
935 The standard encodings of the immediate value are:
936 N imms immr SIMD size R S
937 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
938 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
939 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
940 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
941 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
942 0 11110s 00000r 2 UInt(r) UInt(s)
943 where all-ones value of S is reserved.
945 Let's call E the SIMD size.
947 The immediate value is: S+1 bits '1' rotated to the right by R.
949 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
950 (remember S != E - 1). */
952 #define TOTAL_IMM_NB 5334
957 aarch64_insn encoding
;
960 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
963 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
965 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
966 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
968 if (imm1
->imm
< imm2
->imm
)
970 if (imm1
->imm
> imm2
->imm
)
975 /* immediate bitfield standard encoding
976 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
977 1 ssssss rrrrrr 64 rrrrrr ssssss
978 0 0sssss 0rrrrr 32 rrrrr sssss
979 0 10ssss 00rrrr 16 rrrr ssss
980 0 110sss 000rrr 8 rrr sss
981 0 1110ss 0000rr 4 rr ss
982 0 11110s 00000r 2 r s */
984 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
986 return (is64
<< 12) | (r
<< 6) | s
;
990 build_immediate_table (void)
992 uint32_t log_e
, e
, s
, r
, s_mask
;
998 for (log_e
= 1; log_e
<= 6; log_e
++)
1000 /* Get element size. */
1005 mask
= 0xffffffffffffffffull
;
1011 mask
= (1ull << e
) - 1;
1013 1 ((1 << 4) - 1) << 2 = 111100
1014 2 ((1 << 3) - 1) << 3 = 111000
1015 3 ((1 << 2) - 1) << 4 = 110000
1016 4 ((1 << 1) - 1) << 5 = 100000
1017 5 ((1 << 0) - 1) << 6 = 000000 */
1018 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1020 for (s
= 0; s
< e
- 1; s
++)
1021 for (r
= 0; r
< e
; r
++)
1023 /* s+1 consecutive bits to 1 (s < 63) */
1024 imm
= (1ull << (s
+ 1)) - 1;
1025 /* rotate right by r */
1027 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1028 /* replicate the constant depending on SIMD size */
1031 case 1: imm
= (imm
<< 2) | imm
;
1032 case 2: imm
= (imm
<< 4) | imm
;
1033 case 3: imm
= (imm
<< 8) | imm
;
1034 case 4: imm
= (imm
<< 16) | imm
;
1035 case 5: imm
= (imm
<< 32) | imm
;
1039 simd_immediates
[nb_imms
].imm
= imm
;
1040 simd_immediates
[nb_imms
].encoding
=
1041 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1045 assert (nb_imms
== TOTAL_IMM_NB
);
1046 qsort(simd_immediates
, nb_imms
,
1047 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1050 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1051 be accepted by logical (immediate) instructions
1052 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1054 IS32 indicates whether or not VALUE is a 32-bit immediate.
1055 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1056 VALUE will be returned in *ENCODING. */
1059 aarch64_logical_immediate_p (uint64_t value
, int is32
, aarch64_insn
*encoding
)
1061 simd_imm_encoding imm_enc
;
1062 const simd_imm_encoding
*imm_encoding
;
1063 static bfd_boolean initialized
= FALSE
;
1065 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1068 if (initialized
== FALSE
)
1070 build_immediate_table ();
1076 /* Allow all zeros or all ones in top 32-bits, so that
1077 constant expressions like ~1 are permitted. */
1078 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1081 /* Replicate the 32 lower bits to the 32 upper bits. */
1082 value
&= 0xffffffff;
1083 value
|= value
<< 32;
1086 imm_enc
.imm
= value
;
1087 imm_encoding
= (const simd_imm_encoding
*)
1088 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1089 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1090 if (imm_encoding
== NULL
)
1092 DEBUG_TRACE ("exit with FALSE");
1095 if (encoding
!= NULL
)
1096 *encoding
= imm_encoding
->encoding
;
1097 DEBUG_TRACE ("exit with TRUE");
1101 /* If 64-bit immediate IMM is in the format of
1102 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1103 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1104 of value "abcdefgh". Otherwise return -1. */
1106 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1112 for (i
= 0; i
< 8; i
++)
1114 byte
= (imm
>> (8 * i
)) & 0xff;
1117 else if (byte
!= 0x00)
1123 /* Utility inline functions for operand_general_constraint_met_p. */
1126 set_error (aarch64_operand_error
*mismatch_detail
,
1127 enum aarch64_operand_error_kind kind
, int idx
,
1130 if (mismatch_detail
== NULL
)
1132 mismatch_detail
->kind
= kind
;
1133 mismatch_detail
->index
= idx
;
1134 mismatch_detail
->error
= error
;
1138 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1141 if (mismatch_detail
== NULL
)
1143 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1147 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1148 int idx
, int lower_bound
, int upper_bound
,
1151 if (mismatch_detail
== NULL
)
1153 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1154 mismatch_detail
->data
[0] = lower_bound
;
1155 mismatch_detail
->data
[1] = upper_bound
;
1159 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1160 int idx
, int lower_bound
, int upper_bound
)
1162 if (mismatch_detail
== NULL
)
1164 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1165 _("immediate value"));
1169 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1170 int idx
, int lower_bound
, int upper_bound
)
1172 if (mismatch_detail
== NULL
)
1174 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1175 _("immediate offset"));
1179 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1180 int idx
, int lower_bound
, int upper_bound
)
1182 if (mismatch_detail
== NULL
)
1184 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1185 _("register number"));
1189 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1190 int idx
, int lower_bound
, int upper_bound
)
1192 if (mismatch_detail
== NULL
)
1194 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1195 _("register element index"));
1199 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1200 int idx
, int lower_bound
, int upper_bound
)
1202 if (mismatch_detail
== NULL
)
1204 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1209 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1212 if (mismatch_detail
== NULL
)
1214 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1215 mismatch_detail
->data
[0] = alignment
;
1219 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1222 if (mismatch_detail
== NULL
)
1224 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1225 mismatch_detail
->data
[0] = expected_num
;
1229 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1232 if (mismatch_detail
== NULL
)
1234 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1237 /* General constraint checking based on operand code.
1239 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1240 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1242 This function has to be called after the qualifiers for all operands
1245 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1246 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1247 of error message during the disassembling where error message is not
1248 wanted. We avoid the dynamic construction of strings of error messages
1249 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1250 use a combination of error code, static string and some integer data to
1251 represent an error. */
1254 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1255 enum aarch64_opnd type
,
1256 const aarch64_opcode
*opcode
,
1257 aarch64_operand_error
*mismatch_detail
)
1262 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1263 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1265 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1267 switch (aarch64_operands
[type
].op_class
)
1269 case AARCH64_OPND_CLASS_INT_REG
:
1270 /* Check pair reg constraints for cas* instructions. */
1271 if (type
== AARCH64_OPND_PAIRREG
)
1273 assert (idx
== 1 || idx
== 3);
1274 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1276 set_syntax_error (mismatch_detail
, idx
- 1,
1277 _("reg pair must start from even reg"));
1280 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1282 set_syntax_error (mismatch_detail
, idx
,
1283 _("reg pair must be contiguous"));
1289 /* <Xt> may be optional in some IC and TLBI instructions. */
1290 if (type
== AARCH64_OPND_Rt_SYS
)
1292 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1293 == AARCH64_OPND_CLASS_SYSTEM
));
1294 if (opnds
[1].present
1295 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1297 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1300 if (!opnds
[1].present
1301 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1303 set_other_error (mismatch_detail
, idx
, _("missing register"));
1309 case AARCH64_OPND_QLF_WSP
:
1310 case AARCH64_OPND_QLF_SP
:
1311 if (!aarch64_stack_pointer_p (opnd
))
1313 set_other_error (mismatch_detail
, idx
,
1314 _("stack pointer register expected"));
1323 case AARCH64_OPND_CLASS_COND
:
1324 if (type
== AARCH64_OPND_COND1
1325 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1327 /* Not allow AL or NV. */
1328 set_syntax_error (mismatch_detail
, idx
, NULL
);
1332 case AARCH64_OPND_CLASS_ADDRESS
:
1333 /* Check writeback. */
1334 switch (opcode
->iclass
)
1338 case ldstnapair_offs
:
1341 if (opnd
->addr
.writeback
== 1)
1343 set_syntax_error (mismatch_detail
, idx
,
1344 _("unexpected address writeback"));
1349 case ldstpair_indexed
:
1352 if (opnd
->addr
.writeback
== 0)
1354 set_syntax_error (mismatch_detail
, idx
,
1355 _("address writeback expected"));
1360 assert (opnd
->addr
.writeback
== 0);
1365 case AARCH64_OPND_ADDR_SIMM7
:
1366 /* Scaled signed 7 bits immediate offset. */
1367 /* Get the size of the data element that is accessed, which may be
1368 different from that of the source register size,
1369 e.g. in strb/ldrb. */
1370 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1371 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1373 set_offset_out_of_range_error (mismatch_detail
, idx
,
1374 -64 * size
, 63 * size
);
1377 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1379 set_unaligned_error (mismatch_detail
, idx
, size
);
1383 case AARCH64_OPND_ADDR_SIMM9
:
1384 /* Unscaled signed 9 bits immediate offset. */
1385 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1387 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1392 case AARCH64_OPND_ADDR_SIMM9_2
:
1393 /* Unscaled signed 9 bits immediate offset, which has to be negative
1395 size
= aarch64_get_qualifier_esize (qualifier
);
1396 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1397 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1398 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1400 set_other_error (mismatch_detail
, idx
,
1401 _("negative or unaligned offset expected"));
1404 case AARCH64_OPND_SIMD_ADDR_POST
:
1405 /* AdvSIMD load/store multiple structures, post-index. */
1407 if (opnd
->addr
.offset
.is_reg
)
1409 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1413 set_other_error (mismatch_detail
, idx
,
1414 _("invalid register offset"));
1420 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1421 unsigned num_bytes
; /* total number of bytes transferred. */
1422 /* The opcode dependent area stores the number of elements in
1423 each structure to be loaded/stored. */
1424 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1425 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1426 /* Special handling of loading single structure to all lane. */
1427 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1428 * aarch64_get_qualifier_esize (prev
->qualifier
);
1430 num_bytes
= prev
->reglist
.num_regs
1431 * aarch64_get_qualifier_esize (prev
->qualifier
)
1432 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1433 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1435 set_other_error (mismatch_detail
, idx
,
1436 _("invalid post-increment amount"));
1442 case AARCH64_OPND_ADDR_REGOFF
:
1443 /* Get the size of the data element that is accessed, which may be
1444 different from that of the source register size,
1445 e.g. in strb/ldrb. */
1446 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1447 /* It is either no shift or shift by the binary logarithm of SIZE. */
1448 if (opnd
->shifter
.amount
!= 0
1449 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1451 set_other_error (mismatch_detail
, idx
,
1452 _("invalid shift amount"));
1455 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1457 switch (opnd
->shifter
.kind
)
1459 case AARCH64_MOD_UXTW
:
1460 case AARCH64_MOD_LSL
:
1461 case AARCH64_MOD_SXTW
:
1462 case AARCH64_MOD_SXTX
: break;
1464 set_other_error (mismatch_detail
, idx
,
1465 _("invalid extend/shift operator"));
1470 case AARCH64_OPND_ADDR_UIMM12
:
1471 imm
= opnd
->addr
.offset
.imm
;
1472 /* Get the size of the data element that is accessed, which may be
1473 different from that of the source register size,
1474 e.g. in strb/ldrb. */
1475 size
= aarch64_get_qualifier_esize (qualifier
);
1476 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1478 set_offset_out_of_range_error (mismatch_detail
, idx
,
1482 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1484 set_unaligned_error (mismatch_detail
, idx
, size
);
1489 case AARCH64_OPND_ADDR_PCREL14
:
1490 case AARCH64_OPND_ADDR_PCREL19
:
1491 case AARCH64_OPND_ADDR_PCREL21
:
1492 case AARCH64_OPND_ADDR_PCREL26
:
1493 imm
= opnd
->imm
.value
;
1494 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1496 /* The offset value in a PC-relative branch instruction is alway
1497 4-byte aligned and is encoded without the lowest 2 bits. */
1498 if (!value_aligned_p (imm
, 4))
1500 set_unaligned_error (mismatch_detail
, idx
, 4);
1503 /* Right shift by 2 so that we can carry out the following check
1507 size
= get_operand_fields_width (get_operand_from_code (type
));
1508 if (!value_fit_signed_field_p (imm
, size
))
1510 set_other_error (mismatch_detail
, idx
,
1511 _("immediate out of range"));
1521 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1522 /* The opcode dependent area stores the number of elements in
1523 each structure to be loaded/stored. */
1524 num
= get_opcode_dependent_value (opcode
);
1527 case AARCH64_OPND_LVt
:
1528 assert (num
>= 1 && num
<= 4);
1529 /* Unless LD1/ST1, the number of registers should be equal to that
1530 of the structure elements. */
1531 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1533 set_reg_list_error (mismatch_detail
, idx
, num
);
1537 case AARCH64_OPND_LVt_AL
:
1538 case AARCH64_OPND_LEt
:
1539 assert (num
>= 1 && num
<= 4);
1540 /* The number of registers should be equal to that of the structure
1542 if (opnd
->reglist
.num_regs
!= num
)
1544 set_reg_list_error (mismatch_detail
, idx
, num
);
1553 case AARCH64_OPND_CLASS_IMMEDIATE
:
1554 /* Constraint check on immediate operand. */
1555 imm
= opnd
->imm
.value
;
1556 /* E.g. imm_0_31 constrains value to be 0..31. */
1557 if (qualifier_value_in_range_constraint_p (qualifier
)
1558 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1559 get_upper_bound (qualifier
)))
1561 set_imm_out_of_range_error (mismatch_detail
, idx
,
1562 get_lower_bound (qualifier
),
1563 get_upper_bound (qualifier
));
1569 case AARCH64_OPND_AIMM
:
1570 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1572 set_other_error (mismatch_detail
, idx
,
1573 _("invalid shift operator"));
1576 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1578 set_other_error (mismatch_detail
, idx
,
1579 _("shift amount expected to be 0 or 12"));
1582 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1584 set_other_error (mismatch_detail
, idx
,
1585 _("immediate out of range"));
1590 case AARCH64_OPND_HALF
:
1591 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1592 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1594 set_other_error (mismatch_detail
, idx
,
1595 _("invalid shift operator"));
1598 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1599 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1601 set_other_error (mismatch_detail
, idx
,
1602 _("shift amount should be a multiple of 16"));
1605 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1607 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1611 if (opnd
->imm
.value
< 0)
1613 set_other_error (mismatch_detail
, idx
,
1614 _("negative immediate value not allowed"));
1617 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1619 set_other_error (mismatch_detail
, idx
,
1620 _("immediate out of range"));
1625 case AARCH64_OPND_IMM_MOV
:
1627 int is32
= aarch64_get_qualifier_esize (opnds
[0].qualifier
) == 4;
1628 imm
= opnd
->imm
.value
;
1632 case OP_MOV_IMM_WIDEN
:
1634 /* Fall through... */
1635 case OP_MOV_IMM_WIDE
:
1636 if (!aarch64_wide_constant_p (imm
, is32
, NULL
))
1638 set_other_error (mismatch_detail
, idx
,
1639 _("immediate out of range"));
1643 case OP_MOV_IMM_LOG
:
1644 if (!aarch64_logical_immediate_p (imm
, is32
, NULL
))
1646 set_other_error (mismatch_detail
, idx
,
1647 _("immediate out of range"));
1658 case AARCH64_OPND_NZCV
:
1659 case AARCH64_OPND_CCMP_IMM
:
1660 case AARCH64_OPND_EXCEPTION
:
1661 case AARCH64_OPND_UIMM4
:
1662 case AARCH64_OPND_UIMM7
:
1663 case AARCH64_OPND_UIMM3_OP1
:
1664 case AARCH64_OPND_UIMM3_OP2
:
1665 size
= get_operand_fields_width (get_operand_from_code (type
));
1667 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1669 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1675 case AARCH64_OPND_WIDTH
:
1676 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1677 && opnds
[0].type
== AARCH64_OPND_Rd
);
1678 size
= get_upper_bound (qualifier
);
1679 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1680 /* lsb+width <= reg.size */
1682 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1683 size
- opnds
[idx
-1].imm
.value
);
1688 case AARCH64_OPND_LIMM
:
1690 int is32
= opnds
[0].qualifier
== AARCH64_OPND_QLF_W
;
1691 uint64_t uimm
= opnd
->imm
.value
;
1692 if (opcode
->op
== OP_BIC
)
1694 if (aarch64_logical_immediate_p (uimm
, is32
, NULL
) == FALSE
)
1696 set_other_error (mismatch_detail
, idx
,
1697 _("immediate out of range"));
1703 case AARCH64_OPND_IMM0
:
1704 case AARCH64_OPND_FPIMM0
:
1705 if (opnd
->imm
.value
!= 0)
1707 set_other_error (mismatch_detail
, idx
,
1708 _("immediate zero expected"));
1713 case AARCH64_OPND_SHLL_IMM
:
1715 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1716 if (opnd
->imm
.value
!= size
)
1718 set_other_error (mismatch_detail
, idx
,
1719 _("invalid shift amount"));
1724 case AARCH64_OPND_IMM_VLSL
:
1725 size
= aarch64_get_qualifier_esize (qualifier
);
1726 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1728 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1734 case AARCH64_OPND_IMM_VLSR
:
1735 size
= aarch64_get_qualifier_esize (qualifier
);
1736 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1738 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1743 case AARCH64_OPND_SIMD_IMM
:
1744 case AARCH64_OPND_SIMD_IMM_SFT
:
1745 /* Qualifier check. */
1748 case AARCH64_OPND_QLF_LSL
:
1749 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1751 set_other_error (mismatch_detail
, idx
,
1752 _("invalid shift operator"));
1756 case AARCH64_OPND_QLF_MSL
:
1757 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1759 set_other_error (mismatch_detail
, idx
,
1760 _("invalid shift operator"));
1764 case AARCH64_OPND_QLF_NIL
:
1765 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1767 set_other_error (mismatch_detail
, idx
,
1768 _("shift is not permitted"));
1776 /* Is the immediate valid? */
1778 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1780 /* uimm8 or simm8 */
1781 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1783 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1787 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1790 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1791 ffffffffgggggggghhhhhhhh'. */
1792 set_other_error (mismatch_detail
, idx
,
1793 _("invalid value for immediate"));
1796 /* Is the shift amount valid? */
1797 switch (opnd
->shifter
.kind
)
1799 case AARCH64_MOD_LSL
:
1800 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1801 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1803 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1807 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1809 set_unaligned_error (mismatch_detail
, idx
, 8);
1813 case AARCH64_MOD_MSL
:
1814 /* Only 8 and 16 are valid shift amount. */
1815 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1817 set_other_error (mismatch_detail
, idx
,
1818 _("shift amount expected to be 0 or 16"));
1823 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1825 set_other_error (mismatch_detail
, idx
,
1826 _("invalid shift operator"));
1833 case AARCH64_OPND_FPIMM
:
1834 case AARCH64_OPND_SIMD_FPIMM
:
1835 if (opnd
->imm
.is_fp
== 0)
1837 set_other_error (mismatch_detail
, idx
,
1838 _("floating-point immediate expected"));
1841 /* The value is expected to be an 8-bit floating-point constant with
1842 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1843 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1845 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1847 set_other_error (mismatch_detail
, idx
,
1848 _("immediate out of range"));
1851 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1853 set_other_error (mismatch_detail
, idx
,
1854 _("invalid shift operator"));
1864 case AARCH64_OPND_CLASS_CP_REG
:
1865 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1866 valid range: C0 - C15. */
1867 if (opnd
->reg
.regno
> 15)
1869 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1874 case AARCH64_OPND_CLASS_SYSTEM
:
1877 case AARCH64_OPND_PSTATEFIELD
:
1878 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1880 The immediate must be #0 or #1. */
1881 if (opnd
->pstatefield
== 0x04 /* PAN. */
1882 && opnds
[1].imm
.value
> 1)
1884 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1887 /* MSR SPSel, #uimm4
1888 Uses uimm4 as a control value to select the stack pointer: if
1889 bit 0 is set it selects the current exception level's stack
1890 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1891 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1892 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1894 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1903 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1904 /* Get the upper bound for the element index. */
1905 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1906 /* Index out-of-range. */
1907 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1909 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1912 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1913 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1914 number is encoded in "size:M:Rm":
1920 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
1921 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
1923 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1928 case AARCH64_OPND_CLASS_MODIFIED_REG
:
1929 assert (idx
== 1 || idx
== 2);
1932 case AARCH64_OPND_Rm_EXT
:
1933 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
1934 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1936 set_other_error (mismatch_detail
, idx
,
1937 _("extend operator expected"));
1940 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1941 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1942 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1944 if (!aarch64_stack_pointer_p (opnds
+ 0)
1945 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
1947 if (!opnd
->shifter
.operator_present
)
1949 set_other_error (mismatch_detail
, idx
,
1950 _("missing extend operator"));
1953 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
1955 set_other_error (mismatch_detail
, idx
,
1956 _("'LSL' operator not allowed"));
1960 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
1961 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
1962 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
1964 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
1967 /* In the 64-bit form, the final register operand is written as Wm
1968 for all but the (possibly omitted) UXTX/LSL and SXTX
1970 N.B. GAS allows X register to be used with any operator as a
1971 programming convenience. */
1972 if (qualifier
== AARCH64_OPND_QLF_X
1973 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
1974 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
1975 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
1977 set_other_error (mismatch_detail
, idx
, _("W register expected"));
1982 case AARCH64_OPND_Rm_SFT
:
1983 /* ROR is not available to the shifted register operand in
1984 arithmetic instructions. */
1985 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
1987 set_other_error (mismatch_detail
, idx
,
1988 _("shift operator expected"));
1991 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
1992 && opcode
->iclass
!= log_shift
)
1994 set_other_error (mismatch_detail
, idx
,
1995 _("'ROR' operator not allowed"));
1998 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
1999 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2001 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2018 /* Main entrypoint for the operand constraint checking.
2020 Return 1 if operands of *INST meet the constraint applied by the operand
2021 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2022 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2023 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2024 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2025 error kind when it is notified that an instruction does not pass the check).
2027 Un-determined operand qualifiers may get established during the process. */
2030 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2031 aarch64_operand_error
*mismatch_detail
)
2035 DEBUG_TRACE ("enter");
2037 /* Match operands' qualifier.
2038 *INST has already had qualifier establish for some, if not all, of
2039 its operands; we need to find out whether these established
2040 qualifiers match one of the qualifier sequence in
2041 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2042 with the corresponding qualifier in such a sequence.
2043 Only basic operand constraint checking is done here; the more thorough
2044 constraint checking will carried out by operand_general_constraint_met_p,
2045 which has be to called after this in order to get all of the operands'
2046 qualifiers established. */
2047 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2049 DEBUG_TRACE ("FAIL on operand qualifier matching");
2050 if (mismatch_detail
)
2052 /* Return an error type to indicate that it is the qualifier
2053 matching failure; we don't care about which operand as there
2054 are enough information in the opcode table to reproduce it. */
2055 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2056 mismatch_detail
->index
= -1;
2057 mismatch_detail
->error
= NULL
;
2062 /* Match operands' constraint. */
2063 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2065 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2066 if (type
== AARCH64_OPND_NIL
)
2068 if (inst
->operands
[i
].skip
)
2070 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2073 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2074 inst
->opcode
, mismatch_detail
) == 0)
2076 DEBUG_TRACE ("FAIL on operand %d", i
);
2081 DEBUG_TRACE ("PASS");
2086 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2087 Also updates the TYPE of each INST->OPERANDS with the corresponding
2088 value of OPCODE->OPERANDS.
2090 Note that some operand qualifiers may need to be manually cleared by
2091 the caller before it further calls the aarch64_opcode_encode; by
2092 doing this, it helps the qualifier matching facilities work
2095 const aarch64_opcode
*
2096 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2099 const aarch64_opcode
*old
= inst
->opcode
;
2101 inst
->opcode
= opcode
;
2103 /* Update the operand types. */
2104 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2106 inst
->operands
[i
].type
= opcode
->operands
[i
];
2107 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2111 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2117 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2120 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2121 if (operands
[i
] == operand
)
2123 else if (operands
[i
] == AARCH64_OPND_NIL
)
2128 /* [0][0] 32-bit integer regs with sp Wn
2129 [0][1] 64-bit integer regs with sp Xn sf=1
2130 [1][0] 32-bit integer regs with #0 Wn
2131 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2132 static const char *int_reg
[2][2][32] = {
2135 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2136 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2137 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2138 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", "wsp" },
2139 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2140 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2141 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2142 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", "sp" } },
2143 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2144 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2145 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2146 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", R32
"zr" },
2147 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2148 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2149 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2150 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", R64
"zr" } }
2155 /* Return the integer register name.
2156 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2158 static inline const char *
2159 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2161 const int has_zr
= sp_reg_p
? 0 : 1;
2162 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2163 return int_reg
[has_zr
][is_64
][regno
];
2166 /* Like get_int_reg_name, but IS_64 is always 1. */
2168 static inline const char *
2169 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2171 const int has_zr
= sp_reg_p
? 0 : 1;
2172 return int_reg
[has_zr
][1][regno
];
2175 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2195 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2196 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2197 (depending on the type of the instruction). IMM8 will be expanded to a
2198 single-precision floating-point value (SIZE == 4) or a double-precision
2199 floating-point value (SIZE == 8). A half-precision floating-point value
2200 (SIZE == 2) is expanded to a single-precision floating-point value. The
2201 expanded value is returned. */
2204 expand_fp_imm (int size
, uint32_t imm8
)
2207 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2209 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2210 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2211 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2212 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2213 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2216 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2217 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2218 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2219 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2220 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2223 else if (size
== 4 || size
== 2)
2225 imm
= (imm8_7
<< 31) /* imm8<7> */
2226 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2227 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2228 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2232 /* An unsupported size. */
2239 /* Produce the string representation of the register list operand *OPND
2240 in the buffer pointed by BUF of size SIZE. */
2242 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
)
2244 const int num_regs
= opnd
->reglist
.num_regs
;
2245 const int first_reg
= opnd
->reglist
.first_regno
;
2246 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2247 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2248 char tb
[8]; /* Temporary buffer. */
2250 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2251 assert (num_regs
>= 1 && num_regs
<= 4);
2253 /* Prepare the index if any. */
2254 if (opnd
->reglist
.has_index
)
2255 snprintf (tb
, 8, "[%d]", opnd
->reglist
.index
);
2259 /* The hyphenated form is preferred for disassembly if there are
2260 more than two registers in the list, and the register numbers
2261 are monotonically increasing in increments of one. */
2262 if (num_regs
> 2 && last_reg
> first_reg
)
2263 snprintf (buf
, size
, "{v%d.%s-v%d.%s}%s", first_reg
, qlf_name
,
2264 last_reg
, qlf_name
, tb
);
2267 const int reg0
= first_reg
;
2268 const int reg1
= (first_reg
+ 1) & 0x1f;
2269 const int reg2
= (first_reg
+ 2) & 0x1f;
2270 const int reg3
= (first_reg
+ 3) & 0x1f;
2275 snprintf (buf
, size
, "{v%d.%s}%s", reg0
, qlf_name
, tb
);
2278 snprintf (buf
, size
, "{v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2279 reg1
, qlf_name
, tb
);
2282 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2283 reg1
, qlf_name
, reg2
, qlf_name
, tb
);
2286 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2287 reg0
, qlf_name
, reg1
, qlf_name
, reg2
, qlf_name
,
2288 reg3
, qlf_name
, tb
);
2294 /* Produce the string representation of the register offset address operand
2295 *OPND in the buffer pointed by BUF of size SIZE. */
2297 print_register_offset_address (char *buf
, size_t size
,
2298 const aarch64_opnd_info
*opnd
)
2300 const size_t tblen
= 16;
2301 char tb
[tblen
]; /* Temporary buffer. */
2302 bfd_boolean lsl_p
= FALSE
; /* Is LSL shift operator? */
2303 bfd_boolean wm_p
= FALSE
; /* Should Rm be Wm? */
2304 bfd_boolean print_extend_p
= TRUE
;
2305 bfd_boolean print_amount_p
= TRUE
;
2306 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2308 switch (opnd
->shifter
.kind
)
2310 case AARCH64_MOD_UXTW
: wm_p
= TRUE
; break;
2311 case AARCH64_MOD_LSL
: lsl_p
= TRUE
; break;
2312 case AARCH64_MOD_SXTW
: wm_p
= TRUE
; break;
2313 case AARCH64_MOD_SXTX
: break;
2314 default: assert (0);
2317 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2318 || !opnd
->shifter
.amount_present
))
2320 /* Not print the shift/extend amount when the amount is zero and
2321 when it is not the special case of 8-bit load/store instruction. */
2322 print_amount_p
= FALSE
;
2323 /* Likewise, no need to print the shift operator LSL in such a
2326 print_extend_p
= FALSE
;
2329 /* Prepare for the extend/shift. */
2333 snprintf (tb
, tblen
, ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2335 snprintf (tb
, tblen
, ",%s", shift_name
);
2340 snprintf (buf
, size
, "[%s,%s%s]",
2341 get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2342 get_int_reg_name (opnd
->addr
.offset
.regno
,
2343 wm_p
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
,
2348 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2349 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2350 PC, PCREL_P and ADDRESS are used to pass in and return information about
2351 the PC-relative address calculation, where the PC value is passed in
2352 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2353 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2354 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2356 The function serves both the disassembler and the assembler diagnostics
2357 issuer, which is the reason why it lives in this file. */
2360 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2361 const aarch64_opcode
*opcode
,
2362 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2366 const char *name
= NULL
;
2367 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2368 enum aarch64_modifier_kind kind
;
2377 case AARCH64_OPND_Rd
:
2378 case AARCH64_OPND_Rn
:
2379 case AARCH64_OPND_Rm
:
2380 case AARCH64_OPND_Rt
:
2381 case AARCH64_OPND_Rt2
:
2382 case AARCH64_OPND_Rs
:
2383 case AARCH64_OPND_Ra
:
2384 case AARCH64_OPND_Rt_SYS
:
2385 case AARCH64_OPND_PAIRREG
:
2386 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2387 the <ic_op>, therefore we we use opnd->present to override the
2388 generic optional-ness information. */
2389 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2391 /* Omit the operand, e.g. RET. */
2392 if (optional_operand_p (opcode
, idx
)
2393 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2395 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2396 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2397 snprintf (buf
, size
, "%s",
2398 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2401 case AARCH64_OPND_Rd_SP
:
2402 case AARCH64_OPND_Rn_SP
:
2403 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2404 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2405 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2406 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2407 snprintf (buf
, size
, "%s",
2408 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2411 case AARCH64_OPND_Rm_EXT
:
2412 kind
= opnd
->shifter
.kind
;
2413 assert (idx
== 1 || idx
== 2);
2414 if ((aarch64_stack_pointer_p (opnds
)
2415 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2416 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2417 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2418 && kind
== AARCH64_MOD_UXTW
)
2419 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2420 && kind
== AARCH64_MOD_UXTX
)))
2422 /* 'LSL' is the preferred form in this case. */
2423 kind
= AARCH64_MOD_LSL
;
2424 if (opnd
->shifter
.amount
== 0)
2426 /* Shifter omitted. */
2427 snprintf (buf
, size
, "%s",
2428 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2432 if (opnd
->shifter
.amount
)
2433 snprintf (buf
, size
, "%s, %s #%d",
2434 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2435 aarch64_operand_modifiers
[kind
].name
,
2436 opnd
->shifter
.amount
);
2438 snprintf (buf
, size
, "%s, %s",
2439 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2440 aarch64_operand_modifiers
[kind
].name
);
2443 case AARCH64_OPND_Rm_SFT
:
2444 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2445 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2446 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2447 snprintf (buf
, size
, "%s",
2448 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2450 snprintf (buf
, size
, "%s, %s #%d",
2451 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2452 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2453 opnd
->shifter
.amount
);
2456 case AARCH64_OPND_Fd
:
2457 case AARCH64_OPND_Fn
:
2458 case AARCH64_OPND_Fm
:
2459 case AARCH64_OPND_Fa
:
2460 case AARCH64_OPND_Ft
:
2461 case AARCH64_OPND_Ft2
:
2462 case AARCH64_OPND_Sd
:
2463 case AARCH64_OPND_Sn
:
2464 case AARCH64_OPND_Sm
:
2465 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2469 case AARCH64_OPND_Vd
:
2470 case AARCH64_OPND_Vn
:
2471 case AARCH64_OPND_Vm
:
2472 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2473 aarch64_get_qualifier_name (opnd
->qualifier
));
2476 case AARCH64_OPND_Ed
:
2477 case AARCH64_OPND_En
:
2478 case AARCH64_OPND_Em
:
2479 snprintf (buf
, size
, "v%d.%s[%d]", opnd
->reglane
.regno
,
2480 aarch64_get_qualifier_name (opnd
->qualifier
),
2481 opnd
->reglane
.index
);
2484 case AARCH64_OPND_VdD1
:
2485 case AARCH64_OPND_VnD1
:
2486 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2489 case AARCH64_OPND_LVn
:
2490 case AARCH64_OPND_LVt
:
2491 case AARCH64_OPND_LVt_AL
:
2492 case AARCH64_OPND_LEt
:
2493 print_register_list (buf
, size
, opnd
);
2496 case AARCH64_OPND_Cn
:
2497 case AARCH64_OPND_Cm
:
2498 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2501 case AARCH64_OPND_IDX
:
2502 case AARCH64_OPND_IMM
:
2503 case AARCH64_OPND_WIDTH
:
2504 case AARCH64_OPND_UIMM3_OP1
:
2505 case AARCH64_OPND_UIMM3_OP2
:
2506 case AARCH64_OPND_BIT_NUM
:
2507 case AARCH64_OPND_IMM_VLSL
:
2508 case AARCH64_OPND_IMM_VLSR
:
2509 case AARCH64_OPND_SHLL_IMM
:
2510 case AARCH64_OPND_IMM0
:
2511 case AARCH64_OPND_IMMR
:
2512 case AARCH64_OPND_IMMS
:
2513 case AARCH64_OPND_FBITS
:
2514 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2517 case AARCH64_OPND_IMM_MOV
:
2518 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2520 case 4: /* e.g. MOV Wd, #<imm32>. */
2522 int imm32
= opnd
->imm
.value
;
2523 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2526 case 8: /* e.g. MOV Xd, #<imm64>. */
2527 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2528 opnd
->imm
.value
, opnd
->imm
.value
);
2530 default: assert (0);
2534 case AARCH64_OPND_FPIMM0
:
2535 snprintf (buf
, size
, "#0.0");
2538 case AARCH64_OPND_LIMM
:
2539 case AARCH64_OPND_AIMM
:
2540 case AARCH64_OPND_HALF
:
2541 if (opnd
->shifter
.amount
)
2542 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2543 opnd
->shifter
.amount
);
2545 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2548 case AARCH64_OPND_SIMD_IMM
:
2549 case AARCH64_OPND_SIMD_IMM_SFT
:
2550 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2551 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2552 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2554 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2555 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2556 opnd
->shifter
.amount
);
2559 case AARCH64_OPND_FPIMM
:
2560 case AARCH64_OPND_SIMD_FPIMM
:
2561 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2563 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2566 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2567 snprintf (buf
, size
, "#%.18e", c
.f
);
2570 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2573 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2574 snprintf (buf
, size
, "#%.18e", c
.f
);
2577 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2580 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2581 snprintf (buf
, size
, "#%.18e", c
.d
);
2584 default: assert (0);
2588 case AARCH64_OPND_CCMP_IMM
:
2589 case AARCH64_OPND_NZCV
:
2590 case AARCH64_OPND_EXCEPTION
:
2591 case AARCH64_OPND_UIMM4
:
2592 case AARCH64_OPND_UIMM7
:
2593 if (optional_operand_p (opcode
, idx
) == TRUE
2594 && (opnd
->imm
.value
==
2595 (int64_t) get_optional_operand_default_value (opcode
)))
2596 /* Omit the operand, e.g. DCPS1. */
2598 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2601 case AARCH64_OPND_COND
:
2602 case AARCH64_OPND_COND1
:
2603 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2606 case AARCH64_OPND_ADDR_ADRP
:
2607 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2613 /* This is not necessary during the disassembling, as print_address_func
2614 in the disassemble_info will take care of the printing. But some
2615 other callers may be still interested in getting the string in *STR,
2616 so here we do snprintf regardless. */
2617 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2620 case AARCH64_OPND_ADDR_PCREL14
:
2621 case AARCH64_OPND_ADDR_PCREL19
:
2622 case AARCH64_OPND_ADDR_PCREL21
:
2623 case AARCH64_OPND_ADDR_PCREL26
:
2624 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2629 /* This is not necessary during the disassembling, as print_address_func
2630 in the disassemble_info will take care of the printing. But some
2631 other callers may be still interested in getting the string in *STR,
2632 so here we do snprintf regardless. */
2633 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2636 case AARCH64_OPND_ADDR_SIMPLE
:
2637 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2638 case AARCH64_OPND_SIMD_ADDR_POST
:
2639 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2640 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2642 if (opnd
->addr
.offset
.is_reg
)
2643 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2645 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2648 snprintf (buf
, size
, "[%s]", name
);
2651 case AARCH64_OPND_ADDR_REGOFF
:
2652 print_register_offset_address (buf
, size
, opnd
);
2655 case AARCH64_OPND_ADDR_SIMM7
:
2656 case AARCH64_OPND_ADDR_SIMM9
:
2657 case AARCH64_OPND_ADDR_SIMM9_2
:
2658 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2659 if (opnd
->addr
.writeback
)
2661 if (opnd
->addr
.preind
)
2662 snprintf (buf
, size
, "[%s,#%d]!", name
, opnd
->addr
.offset
.imm
);
2664 snprintf (buf
, size
, "[%s],#%d", name
, opnd
->addr
.offset
.imm
);
2668 if (opnd
->addr
.offset
.imm
)
2669 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2671 snprintf (buf
, size
, "[%s]", name
);
2675 case AARCH64_OPND_ADDR_UIMM12
:
2676 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2677 if (opnd
->addr
.offset
.imm
)
2678 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2680 snprintf (buf
, size
, "[%s]", name
);
2683 case AARCH64_OPND_SYSREG
:
2684 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2685 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2686 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2688 if (aarch64_sys_regs
[i
].name
)
2689 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2692 /* Implementation defined system register. */
2693 unsigned int value
= opnd
->sysreg
;
2694 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2695 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2700 case AARCH64_OPND_PSTATEFIELD
:
2701 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2702 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2704 assert (aarch64_pstatefields
[i
].name
);
2705 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2708 case AARCH64_OPND_SYSREG_AT
:
2709 case AARCH64_OPND_SYSREG_DC
:
2710 case AARCH64_OPND_SYSREG_IC
:
2711 case AARCH64_OPND_SYSREG_TLBI
:
2712 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2715 case AARCH64_OPND_BARRIER
:
2716 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2719 case AARCH64_OPND_BARRIER_ISB
:
2720 /* Operand can be omitted, e.g. in DCPS1. */
2721 if (! optional_operand_p (opcode
, idx
)
2722 || (opnd
->barrier
->value
2723 != get_optional_operand_default_value (opcode
)))
2724 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2727 case AARCH64_OPND_PRFOP
:
2728 if (opnd
->prfop
->name
!= NULL
)
2729 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2731 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2739 #define CPENC(op0,op1,crn,crm,op2) \
2740 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2741 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2742 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2743 /* for 3.9.10 System Instructions */
2744 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2766 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2771 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2776 #define F_HASXT 0x4 /* System instruction register <Xt>
2780 /* TODO there are two more issues need to be resolved
2781 1. handle read-only and write-only system registers
2782 2. handle cpu-implementation-defined system registers. */
2783 const aarch64_sys_reg aarch64_sys_regs
[] =
2785 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2786 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2787 { "elr_el1", CPEN_(0,C0
,1), 0 },
2788 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2789 { "sp_el0", CPEN_(0,C1
,0), 0 },
2790 { "spsel", CPEN_(0,C2
,0), 0 },
2791 { "daif", CPEN_(3,C2
,1), 0 },
2792 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2793 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2794 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
2795 { "nzcv", CPEN_(3,C2
,0), 0 },
2796 { "fpcr", CPEN_(3,C4
,0), 0 },
2797 { "fpsr", CPEN_(3,C4
,1), 0 },
2798 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2799 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2800 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2801 { "elr_el2", CPEN_(4,C0
,1), 0 },
2802 { "sp_el1", CPEN_(4,C1
,0), 0 },
2803 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2804 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2805 { "spsr_und", CPEN_(4,C3
,2), 0 },
2806 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2807 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2808 { "elr_el3", CPEN_(6,C0
,1), 0 },
2809 { "sp_el2", CPEN_(6,C1
,0), 0 },
2810 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2811 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2812 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2813 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2814 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2815 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2816 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2817 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2818 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2819 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2820 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2821 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2822 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2823 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2824 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2825 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2826 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2827 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2828 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2829 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2830 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2831 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2832 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2833 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2834 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2835 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2836 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2837 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2838 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2839 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
2840 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
2841 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
2842 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
2843 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
2844 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
2845 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
2846 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
2847 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
2848 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
2849 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
2850 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
2851 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
2852 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
2853 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
2854 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
2855 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
2856 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
2857 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
2858 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
2859 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
2860 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
2861 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
2862 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
2863 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
2864 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
2865 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
2866 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
2867 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
2868 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
2869 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
2870 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
2871 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
2872 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
2873 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
2874 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
2875 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
2876 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
2877 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
2878 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
2879 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
2880 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
2881 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
2882 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
2883 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
2884 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
2885 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
2886 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
2887 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
2888 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
2889 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
2890 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
2891 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
2892 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
2893 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
2894 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
2895 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
2896 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
2897 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
2898 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
2899 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
2900 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
2901 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
2902 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
2903 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
2904 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
2905 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
2906 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
2907 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
2908 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
2909 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
2910 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
2911 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
2912 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
2913 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
2914 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
2915 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
2916 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
2917 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
2918 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
2919 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
2920 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
2921 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
2922 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
2923 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
2924 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
2925 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
2926 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
2927 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
2928 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
2929 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
2930 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
2931 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
2932 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
2933 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
2934 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
2935 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
2936 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
2937 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
2938 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
2939 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
2940 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
2941 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
2942 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
2943 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
2944 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
2945 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
2946 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
2947 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
2948 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
2949 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
2950 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
2951 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
2952 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
2953 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
2954 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
2955 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
2956 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
2957 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
2958 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
2959 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
2960 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
2961 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
2962 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
2963 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
2964 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
2965 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
2966 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
2967 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
2968 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
2969 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
2970 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
2971 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
2972 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
2973 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
2974 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
2975 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
2976 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
2977 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
2978 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
2979 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
2980 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
2981 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
2982 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
2983 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
2984 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
2985 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
2986 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
2987 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
2988 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
2989 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
2990 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
2991 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
2992 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
2993 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
2994 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
2995 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
2996 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
2997 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
2998 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
2999 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3000 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3001 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3002 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3003 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3004 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3005 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3006 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3007 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3008 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3009 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3010 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3011 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3012 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3013 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3014 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3015 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3016 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3017 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3018 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3019 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3020 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3021 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3022 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3023 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3024 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3025 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3026 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3027 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3028 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3029 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3030 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3031 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3032 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3033 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3034 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3035 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3036 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3037 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3038 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3039 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3040 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3041 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3042 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3043 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3044 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3045 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3046 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3047 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3048 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3049 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3050 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3051 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3052 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3053 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3054 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3055 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3056 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3057 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3058 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3059 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3060 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3061 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3062 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3063 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3064 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3065 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3066 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3067 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3068 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3069 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3070 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3071 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3072 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3073 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3074 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3075 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3076 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3077 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3078 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3079 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3080 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3081 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3082 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3083 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3084 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3085 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3086 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3087 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3088 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3089 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3090 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3091 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3092 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3093 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3094 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3095 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3096 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3097 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3098 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3099 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3100 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3101 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3102 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3103 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3104 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3105 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3106 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3107 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3108 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3109 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3110 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3111 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3112 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3113 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3114 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3115 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3116 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3117 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3118 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3119 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3120 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3121 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3122 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3123 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3124 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3125 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3126 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3127 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3128 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3129 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3130 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3131 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3132 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3133 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3134 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3135 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3136 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3137 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3138 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3139 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3140 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3141 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3142 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3143 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3144 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3145 { 0, CPENC(0,0,0,0,0), 0 },
3149 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3151 return (reg
->flags
& F_DEPRECATED
) != 0;
3155 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3156 const aarch64_sys_reg
*reg
)
3158 if (!(reg
->flags
& F_ARCHEXT
))
3161 /* PAN. Values are from aarch64_sys_regs. */
3162 if (reg
->value
== CPEN_(0,C2
,3)
3163 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3166 /* Virtualization host extensions: system registers. */
3167 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3168 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3169 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3170 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3171 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3172 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3175 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3176 if ((reg
->value
== CPEN_ (5, C0
, 0)
3177 || reg
->value
== CPEN_ (5, C0
, 1)
3178 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3179 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3180 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3181 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3182 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3183 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3184 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3185 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3186 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3187 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3188 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3189 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3190 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3191 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3192 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3195 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3196 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3197 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3198 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3199 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3200 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3201 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3202 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3204 /* ARMv8.2 features. */
3206 /* ID_AA64MMFR2_EL1. */
3207 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3208 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3212 if (reg
->value
== CPEN_ (0, C2
, 4)
3213 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3216 /* RAS extension. */
3218 /* ERRIDR_EL1 and ERRSELR_EL1. */
3219 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3220 || reg
->value
== CPENC (3, 0, C5
, C3
, 1))
3221 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3224 /* ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1, ERXMISC0_EL1 AND
3226 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3227 || reg
->value
== CPENC (3, 0, C5
, C3
,1)
3228 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3229 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3230 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3231 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3232 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3235 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3236 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3237 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3238 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3239 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3242 /* Statistical Profiling extension. */
3243 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3244 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3245 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3246 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3247 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3248 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3249 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3250 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3251 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3252 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3253 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3254 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3255 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3256 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3262 const aarch64_sys_reg aarch64_pstatefields
[] =
3264 { "spsel", 0x05, 0 },
3265 { "daifset", 0x1e, 0 },
3266 { "daifclr", 0x1f, 0 },
3267 { "pan", 0x04, F_ARCHEXT
},
3268 { "uao", 0x03, F_ARCHEXT
},
3269 { 0, CPENC(0,0,0,0,0), 0 },
3273 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3274 const aarch64_sys_reg
*reg
)
3276 if (!(reg
->flags
& F_ARCHEXT
))
3279 /* PAN. Values are from aarch64_pstatefields. */
3280 if (reg
->value
== 0x04
3281 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3284 /* UAO. Values are from aarch64_pstatefields. */
3285 if (reg
->value
== 0x03
3286 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3289 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3290 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3291 || reg
->value
== CPENS (0, C7
, C9
, 1))
3292 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3298 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3300 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3301 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3302 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3303 { 0, CPENS(0,0,0,0), 0 }
3306 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3308 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3309 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3310 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3311 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3312 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3313 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3314 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3315 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3316 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3317 { 0, CPENS(0,0,0,0), 0 }
3320 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3322 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3323 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3324 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3325 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3326 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3327 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3328 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3329 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3330 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3331 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3332 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3333 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3334 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3335 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3336 { 0, CPENS(0,0,0,0), 0 }
3339 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3341 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3342 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3343 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3344 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3345 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3346 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3347 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3348 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3349 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3350 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3351 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3352 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3353 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3354 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3355 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3356 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3357 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3358 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3359 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3360 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3361 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3362 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3363 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3364 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3365 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3366 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3367 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3368 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3369 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3370 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3371 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3372 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3373 { 0, CPENS(0,0,0,0), 0 }
3377 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3379 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3383 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3384 const aarch64_sys_ins_reg
*reg
)
3386 if (!(reg
->flags
& F_ARCHEXT
))
3389 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3390 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3391 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3414 /* Include the opcode description table as well as the operand description
3416 #include "aarch64-tbl.h"