[binutils][aarch64] Introduce SVE_IMM_ROT3 operand.
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
298 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
299 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
300 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
301 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
302 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
303 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
304 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
305 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
306 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
307 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
308 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
309 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
310 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
311 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
312 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
313 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
314 { 16, 4 }, /* SVE_tsz: triangular size select. */
315 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
316 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
317 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
318 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
319 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
320 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
321 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
322 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
323 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
324 { 22, 1 }, /* sz: 1-bit element size select. */
325 };
326
327 enum aarch64_operand_class
328 aarch64_get_operand_class (enum aarch64_opnd type)
329 {
330 return aarch64_operands[type].op_class;
331 }
332
333 const char *
334 aarch64_get_operand_name (enum aarch64_opnd type)
335 {
336 return aarch64_operands[type].name;
337 }
338
339 /* Get operand description string.
340 This is usually for the diagnosis purpose. */
341 const char *
342 aarch64_get_operand_desc (enum aarch64_opnd type)
343 {
344 return aarch64_operands[type].desc;
345 }
346
347 /* Table of all conditional affixes. */
348 const aarch64_cond aarch64_conds[16] =
349 {
350 {{"eq", "none"}, 0x0},
351 {{"ne", "any"}, 0x1},
352 {{"cs", "hs", "nlast"}, 0x2},
353 {{"cc", "lo", "ul", "last"}, 0x3},
354 {{"mi", "first"}, 0x4},
355 {{"pl", "nfrst"}, 0x5},
356 {{"vs"}, 0x6},
357 {{"vc"}, 0x7},
358 {{"hi", "pmore"}, 0x8},
359 {{"ls", "plast"}, 0x9},
360 {{"ge", "tcont"}, 0xa},
361 {{"lt", "tstop"}, 0xb},
362 {{"gt"}, 0xc},
363 {{"le"}, 0xd},
364 {{"al"}, 0xe},
365 {{"nv"}, 0xf},
366 };
367
368 const aarch64_cond *
369 get_cond_from_value (aarch64_insn value)
370 {
371 assert (value < 16);
372 return &aarch64_conds[(unsigned int) value];
373 }
374
375 const aarch64_cond *
376 get_inverted_cond (const aarch64_cond *cond)
377 {
378 return &aarch64_conds[cond->value ^ 0x1];
379 }
380
381 /* Table describing the operand extension/shifting operators; indexed by
382 enum aarch64_modifier_kind.
383
384 The value column provides the most common values for encoding modifiers,
385 which enables table-driven encoding/decoding for the modifiers. */
386 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
387 {
388 {"none", 0x0},
389 {"msl", 0x0},
390 {"ror", 0x3},
391 {"asr", 0x2},
392 {"lsr", 0x1},
393 {"lsl", 0x0},
394 {"uxtb", 0x0},
395 {"uxth", 0x1},
396 {"uxtw", 0x2},
397 {"uxtx", 0x3},
398 {"sxtb", 0x4},
399 {"sxth", 0x5},
400 {"sxtw", 0x6},
401 {"sxtx", 0x7},
402 {"mul", 0x0},
403 {"mul vl", 0x0},
404 {NULL, 0},
405 };
406
407 enum aarch64_modifier_kind
408 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
409 {
410 return desc - aarch64_operand_modifiers;
411 }
412
413 aarch64_insn
414 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
415 {
416 return aarch64_operand_modifiers[kind].value;
417 }
418
419 enum aarch64_modifier_kind
420 aarch64_get_operand_modifier_from_value (aarch64_insn value,
421 bfd_boolean extend_p)
422 {
423 if (extend_p == TRUE)
424 return AARCH64_MOD_UXTB + value;
425 else
426 return AARCH64_MOD_LSL - value;
427 }
428
429 bfd_boolean
430 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
431 {
432 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
433 ? TRUE : FALSE;
434 }
435
436 static inline bfd_boolean
437 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
438 {
439 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
440 ? TRUE : FALSE;
441 }
442
443 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
444 {
445 { "#0x00", 0x0 },
446 { "oshld", 0x1 },
447 { "oshst", 0x2 },
448 { "osh", 0x3 },
449 { "#0x04", 0x4 },
450 { "nshld", 0x5 },
451 { "nshst", 0x6 },
452 { "nsh", 0x7 },
453 { "#0x08", 0x8 },
454 { "ishld", 0x9 },
455 { "ishst", 0xa },
456 { "ish", 0xb },
457 { "#0x0c", 0xc },
458 { "ld", 0xd },
459 { "st", 0xe },
460 { "sy", 0xf },
461 };
462
463 /* Table describing the operands supported by the aliases of the HINT
464 instruction.
465
466 The name column is the operand that is accepted for the alias. The value
467 column is the hint number of the alias. The list of operands is terminated
468 by NULL in the name column. */
469
470 const struct aarch64_name_value_pair aarch64_hint_options[] =
471 {
472 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
473 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
474 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
475 { "c", HINT_OPD_C }, /* BTI C. */
476 { "j", HINT_OPD_J }, /* BTI J. */
477 { "jc", HINT_OPD_JC }, /* BTI JC. */
478 { NULL, HINT_OPD_NULL },
479 };
480
481 /* op -> op: load = 0 instruction = 1 store = 2
482 l -> level: 1-3
483 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
484 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
485 const struct aarch64_name_value_pair aarch64_prfops[32] =
486 {
487 { "pldl1keep", B(0, 1, 0) },
488 { "pldl1strm", B(0, 1, 1) },
489 { "pldl2keep", B(0, 2, 0) },
490 { "pldl2strm", B(0, 2, 1) },
491 { "pldl3keep", B(0, 3, 0) },
492 { "pldl3strm", B(0, 3, 1) },
493 { NULL, 0x06 },
494 { NULL, 0x07 },
495 { "plil1keep", B(1, 1, 0) },
496 { "plil1strm", B(1, 1, 1) },
497 { "plil2keep", B(1, 2, 0) },
498 { "plil2strm", B(1, 2, 1) },
499 { "plil3keep", B(1, 3, 0) },
500 { "plil3strm", B(1, 3, 1) },
501 { NULL, 0x0e },
502 { NULL, 0x0f },
503 { "pstl1keep", B(2, 1, 0) },
504 { "pstl1strm", B(2, 1, 1) },
505 { "pstl2keep", B(2, 2, 0) },
506 { "pstl2strm", B(2, 2, 1) },
507 { "pstl3keep", B(2, 3, 0) },
508 { "pstl3strm", B(2, 3, 1) },
509 { NULL, 0x16 },
510 { NULL, 0x17 },
511 { NULL, 0x18 },
512 { NULL, 0x19 },
513 { NULL, 0x1a },
514 { NULL, 0x1b },
515 { NULL, 0x1c },
516 { NULL, 0x1d },
517 { NULL, 0x1e },
518 { NULL, 0x1f },
519 };
520 #undef B
521 \f
522 /* Utilities on value constraint. */
523
524 static inline int
525 value_in_range_p (int64_t value, int low, int high)
526 {
527 return (value >= low && value <= high) ? 1 : 0;
528 }
529
530 /* Return true if VALUE is a multiple of ALIGN. */
531 static inline int
532 value_aligned_p (int64_t value, int align)
533 {
534 return (value % align) == 0;
535 }
536
537 /* A signed value fits in a field. */
538 static inline int
539 value_fit_signed_field_p (int64_t value, unsigned width)
540 {
541 assert (width < 32);
542 if (width < sizeof (value) * 8)
543 {
544 int64_t lim = (int64_t)1 << (width - 1);
545 if (value >= -lim && value < lim)
546 return 1;
547 }
548 return 0;
549 }
550
551 /* An unsigned value fits in a field. */
552 static inline int
553 value_fit_unsigned_field_p (int64_t value, unsigned width)
554 {
555 assert (width < 32);
556 if (width < sizeof (value) * 8)
557 {
558 int64_t lim = (int64_t)1 << width;
559 if (value >= 0 && value < lim)
560 return 1;
561 }
562 return 0;
563 }
564
565 /* Return 1 if OPERAND is SP or WSP. */
566 int
567 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
568 {
569 return ((aarch64_get_operand_class (operand->type)
570 == AARCH64_OPND_CLASS_INT_REG)
571 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
572 && operand->reg.regno == 31);
573 }
574
575 /* Return 1 if OPERAND is XZR or WZP. */
576 int
577 aarch64_zero_register_p (const aarch64_opnd_info *operand)
578 {
579 return ((aarch64_get_operand_class (operand->type)
580 == AARCH64_OPND_CLASS_INT_REG)
581 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
582 && operand->reg.regno == 31);
583 }
584
585 /* Return true if the operand *OPERAND that has the operand code
586 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
587 qualified by the qualifier TARGET. */
588
589 static inline int
590 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
591 aarch64_opnd_qualifier_t target)
592 {
593 switch (operand->qualifier)
594 {
595 case AARCH64_OPND_QLF_W:
596 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
597 return 1;
598 break;
599 case AARCH64_OPND_QLF_X:
600 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
601 return 1;
602 break;
603 case AARCH64_OPND_QLF_WSP:
604 if (target == AARCH64_OPND_QLF_W
605 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
606 return 1;
607 break;
608 case AARCH64_OPND_QLF_SP:
609 if (target == AARCH64_OPND_QLF_X
610 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
611 return 1;
612 break;
613 default:
614 break;
615 }
616
617 return 0;
618 }
619
620 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
621 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
622
623 Return NIL if more than one expected qualifiers are found. */
624
625 aarch64_opnd_qualifier_t
626 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
627 int idx,
628 const aarch64_opnd_qualifier_t known_qlf,
629 int known_idx)
630 {
631 int i, saved_i;
632
633 /* Special case.
634
635 When the known qualifier is NIL, we have to assume that there is only
636 one qualifier sequence in the *QSEQ_LIST and return the corresponding
637 qualifier directly. One scenario is that for instruction
638 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
639 which has only one possible valid qualifier sequence
640 NIL, S_D
641 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
642 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
643
644 Because the qualifier NIL has dual roles in the qualifier sequence:
645 it can mean no qualifier for the operand, or the qualifer sequence is
646 not in use (when all qualifiers in the sequence are NILs), we have to
647 handle this special case here. */
648 if (known_qlf == AARCH64_OPND_NIL)
649 {
650 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
651 return qseq_list[0][idx];
652 }
653
654 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
655 {
656 if (qseq_list[i][known_idx] == known_qlf)
657 {
658 if (saved_i != -1)
659 /* More than one sequences are found to have KNOWN_QLF at
660 KNOWN_IDX. */
661 return AARCH64_OPND_NIL;
662 saved_i = i;
663 }
664 }
665
666 return qseq_list[saved_i][idx];
667 }
668
669 enum operand_qualifier_kind
670 {
671 OQK_NIL,
672 OQK_OPD_VARIANT,
673 OQK_VALUE_IN_RANGE,
674 OQK_MISC,
675 };
676
677 /* Operand qualifier description. */
678 struct operand_qualifier_data
679 {
680 /* The usage of the three data fields depends on the qualifier kind. */
681 int data0;
682 int data1;
683 int data2;
684 /* Description. */
685 const char *desc;
686 /* Kind. */
687 enum operand_qualifier_kind kind;
688 };
689
690 /* Indexed by the operand qualifier enumerators. */
691 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
692 {
693 {0, 0, 0, "NIL", OQK_NIL},
694
695 /* Operand variant qualifiers.
696 First 3 fields:
697 element size, number of elements and common value for encoding. */
698
699 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
700 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
701 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
702 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
703
704 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
705 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
706 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
707 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
708 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
709 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
710
711 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
712 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
713 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
714 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
715 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
716 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
717 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
718 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
719 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
720 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
721 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
722
723 {0, 0, 0, "z", OQK_OPD_VARIANT},
724 {0, 0, 0, "m", OQK_OPD_VARIANT},
725
726 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
727 {16, 0, 0, "tag", OQK_OPD_VARIANT},
728
729 /* Qualifiers constraining the value range.
730 First 3 fields:
731 Lower bound, higher bound, unused. */
732
733 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
734 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
735 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
736 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
737 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
738 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
739 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
740
741 /* Qualifiers for miscellaneous purpose.
742 First 3 fields:
743 unused, unused and unused. */
744
745 {0, 0, 0, "lsl", 0},
746 {0, 0, 0, "msl", 0},
747
748 {0, 0, 0, "retrieving", 0},
749 };
750
751 static inline bfd_boolean
752 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
753 {
754 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
755 ? TRUE : FALSE;
756 }
757
758 static inline bfd_boolean
759 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
760 {
761 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
762 ? TRUE : FALSE;
763 }
764
765 const char*
766 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
767 {
768 return aarch64_opnd_qualifiers[qualifier].desc;
769 }
770
771 /* Given an operand qualifier, return the expected data element size
772 of a qualified operand. */
773 unsigned char
774 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
775 {
776 assert (operand_variant_qualifier_p (qualifier) == TRUE);
777 return aarch64_opnd_qualifiers[qualifier].data0;
778 }
779
780 unsigned char
781 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
782 {
783 assert (operand_variant_qualifier_p (qualifier) == TRUE);
784 return aarch64_opnd_qualifiers[qualifier].data1;
785 }
786
787 aarch64_insn
788 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
789 {
790 assert (operand_variant_qualifier_p (qualifier) == TRUE);
791 return aarch64_opnd_qualifiers[qualifier].data2;
792 }
793
794 static int
795 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
796 {
797 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
798 return aarch64_opnd_qualifiers[qualifier].data0;
799 }
800
801 static int
802 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
803 {
804 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
805 return aarch64_opnd_qualifiers[qualifier].data1;
806 }
807
808 #ifdef DEBUG_AARCH64
809 void
810 aarch64_verbose (const char *str, ...)
811 {
812 va_list ap;
813 va_start (ap, str);
814 printf ("#### ");
815 vprintf (str, ap);
816 printf ("\n");
817 va_end (ap);
818 }
819
820 static inline void
821 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
822 {
823 int i;
824 printf ("#### \t");
825 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
826 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
827 printf ("\n");
828 }
829
830 static void
831 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
832 const aarch64_opnd_qualifier_t *qualifier)
833 {
834 int i;
835 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
836
837 aarch64_verbose ("dump_match_qualifiers:");
838 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
839 curr[i] = opnd[i].qualifier;
840 dump_qualifier_sequence (curr);
841 aarch64_verbose ("against");
842 dump_qualifier_sequence (qualifier);
843 }
844 #endif /* DEBUG_AARCH64 */
845
846 /* This function checks if the given instruction INSN is a destructive
847 instruction based on the usage of the registers. It does not recognize
848 unary destructive instructions. */
849 bfd_boolean
850 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
851 {
852 int i = 0;
853 const enum aarch64_opnd *opnds = opcode->operands;
854
855 if (opnds[0] == AARCH64_OPND_NIL)
856 return FALSE;
857
858 while (opnds[++i] != AARCH64_OPND_NIL)
859 if (opnds[i] == opnds[0])
860 return TRUE;
861
862 return FALSE;
863 }
864
865 /* TODO improve this, we can have an extra field at the runtime to
866 store the number of operands rather than calculating it every time. */
867
868 int
869 aarch64_num_of_operands (const aarch64_opcode *opcode)
870 {
871 int i = 0;
872 const enum aarch64_opnd *opnds = opcode->operands;
873 while (opnds[i++] != AARCH64_OPND_NIL)
874 ;
875 --i;
876 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
877 return i;
878 }
879
880 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
881 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
882
883 N.B. on the entry, it is very likely that only some operands in *INST
884 have had their qualifiers been established.
885
886 If STOP_AT is not -1, the function will only try to match
887 the qualifier sequence for operands before and including the operand
888 of index STOP_AT; and on success *RET will only be filled with the first
889 (STOP_AT+1) qualifiers.
890
891 A couple examples of the matching algorithm:
892
893 X,W,NIL should match
894 X,W,NIL
895
896 NIL,NIL should match
897 X ,NIL
898
899 Apart from serving the main encoding routine, this can also be called
900 during or after the operand decoding. */
901
902 int
903 aarch64_find_best_match (const aarch64_inst *inst,
904 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
905 int stop_at, aarch64_opnd_qualifier_t *ret)
906 {
907 int found = 0;
908 int i, num_opnds;
909 const aarch64_opnd_qualifier_t *qualifiers;
910
911 num_opnds = aarch64_num_of_operands (inst->opcode);
912 if (num_opnds == 0)
913 {
914 DEBUG_TRACE ("SUCCEED: no operand");
915 return 1;
916 }
917
918 if (stop_at < 0 || stop_at >= num_opnds)
919 stop_at = num_opnds - 1;
920
921 /* For each pattern. */
922 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
923 {
924 int j;
925 qualifiers = *qualifiers_list;
926
927 /* Start as positive. */
928 found = 1;
929
930 DEBUG_TRACE ("%d", i);
931 #ifdef DEBUG_AARCH64
932 if (debug_dump)
933 dump_match_qualifiers (inst->operands, qualifiers);
934 #endif
935
936 /* Most opcodes has much fewer patterns in the list.
937 First NIL qualifier indicates the end in the list. */
938 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
939 {
940 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
941 if (i)
942 found = 0;
943 break;
944 }
945
946 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
947 {
948 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
949 {
950 /* Either the operand does not have qualifier, or the qualifier
951 for the operand needs to be deduced from the qualifier
952 sequence.
953 In the latter case, any constraint checking related with
954 the obtained qualifier should be done later in
955 operand_general_constraint_met_p. */
956 continue;
957 }
958 else if (*qualifiers != inst->operands[j].qualifier)
959 {
960 /* Unless the target qualifier can also qualify the operand
961 (which has already had a non-nil qualifier), non-equal
962 qualifiers are generally un-matched. */
963 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
964 continue;
965 else
966 {
967 found = 0;
968 break;
969 }
970 }
971 else
972 continue; /* Equal qualifiers are certainly matched. */
973 }
974
975 /* Qualifiers established. */
976 if (found == 1)
977 break;
978 }
979
980 if (found == 1)
981 {
982 /* Fill the result in *RET. */
983 int j;
984 qualifiers = *qualifiers_list;
985
986 DEBUG_TRACE ("complete qualifiers using list %d", i);
987 #ifdef DEBUG_AARCH64
988 if (debug_dump)
989 dump_qualifier_sequence (qualifiers);
990 #endif
991
992 for (j = 0; j <= stop_at; ++j, ++qualifiers)
993 ret[j] = *qualifiers;
994 for (; j < AARCH64_MAX_OPND_NUM; ++j)
995 ret[j] = AARCH64_OPND_QLF_NIL;
996
997 DEBUG_TRACE ("SUCCESS");
998 return 1;
999 }
1000
1001 DEBUG_TRACE ("FAIL");
1002 return 0;
1003 }
1004
1005 /* Operand qualifier matching and resolving.
1006
1007 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1008 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1009
1010 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1011 succeeds. */
1012
1013 static int
1014 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1015 {
1016 int i, nops;
1017 aarch64_opnd_qualifier_seq_t qualifiers;
1018
1019 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1020 qualifiers))
1021 {
1022 DEBUG_TRACE ("matching FAIL");
1023 return 0;
1024 }
1025
1026 if (inst->opcode->flags & F_STRICT)
1027 {
1028 /* Require an exact qualifier match, even for NIL qualifiers. */
1029 nops = aarch64_num_of_operands (inst->opcode);
1030 for (i = 0; i < nops; ++i)
1031 if (inst->operands[i].qualifier != qualifiers[i])
1032 return FALSE;
1033 }
1034
1035 /* Update the qualifiers. */
1036 if (update_p == TRUE)
1037 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1038 {
1039 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1040 break;
1041 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1042 "update %s with %s for operand %d",
1043 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1044 aarch64_get_qualifier_name (qualifiers[i]), i);
1045 inst->operands[i].qualifier = qualifiers[i];
1046 }
1047
1048 DEBUG_TRACE ("matching SUCCESS");
1049 return 1;
1050 }
1051
1052 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1053 register by MOVZ.
1054
1055 IS32 indicates whether value is a 32-bit immediate or not.
1056 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1057 amount will be returned in *SHIFT_AMOUNT. */
1058
1059 bfd_boolean
1060 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1061 {
1062 int amount;
1063
1064 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1065
1066 if (is32)
1067 {
1068 /* Allow all zeros or all ones in top 32-bits, so that
1069 32-bit constant expressions like ~0x80000000 are
1070 permitted. */
1071 uint64_t ext = value;
1072 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1073 /* Immediate out of range. */
1074 return FALSE;
1075 value &= (int64_t) 0xffffffff;
1076 }
1077
1078 /* first, try movz then movn */
1079 amount = -1;
1080 if ((value & ((int64_t) 0xffff << 0)) == value)
1081 amount = 0;
1082 else if ((value & ((int64_t) 0xffff << 16)) == value)
1083 amount = 16;
1084 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1085 amount = 32;
1086 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1087 amount = 48;
1088
1089 if (amount == -1)
1090 {
1091 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1092 return FALSE;
1093 }
1094
1095 if (shift_amount != NULL)
1096 *shift_amount = amount;
1097
1098 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1099
1100 return TRUE;
1101 }
1102
1103 /* Build the accepted values for immediate logical SIMD instructions.
1104
1105 The standard encodings of the immediate value are:
1106 N imms immr SIMD size R S
1107 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1108 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1109 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1110 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1111 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1112 0 11110s 00000r 2 UInt(r) UInt(s)
1113 where all-ones value of S is reserved.
1114
1115 Let's call E the SIMD size.
1116
1117 The immediate value is: S+1 bits '1' rotated to the right by R.
1118
1119 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1120 (remember S != E - 1). */
1121
1122 #define TOTAL_IMM_NB 5334
1123
1124 typedef struct
1125 {
1126 uint64_t imm;
1127 aarch64_insn encoding;
1128 } simd_imm_encoding;
1129
1130 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1131
1132 static int
1133 simd_imm_encoding_cmp(const void *i1, const void *i2)
1134 {
1135 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1136 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1137
1138 if (imm1->imm < imm2->imm)
1139 return -1;
1140 if (imm1->imm > imm2->imm)
1141 return +1;
1142 return 0;
1143 }
1144
1145 /* immediate bitfield standard encoding
1146 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1147 1 ssssss rrrrrr 64 rrrrrr ssssss
1148 0 0sssss 0rrrrr 32 rrrrr sssss
1149 0 10ssss 00rrrr 16 rrrr ssss
1150 0 110sss 000rrr 8 rrr sss
1151 0 1110ss 0000rr 4 rr ss
1152 0 11110s 00000r 2 r s */
1153 static inline int
1154 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1155 {
1156 return (is64 << 12) | (r << 6) | s;
1157 }
1158
1159 static void
1160 build_immediate_table (void)
1161 {
1162 uint32_t log_e, e, s, r, s_mask;
1163 uint64_t mask, imm;
1164 int nb_imms;
1165 int is64;
1166
1167 nb_imms = 0;
1168 for (log_e = 1; log_e <= 6; log_e++)
1169 {
1170 /* Get element size. */
1171 e = 1u << log_e;
1172 if (log_e == 6)
1173 {
1174 is64 = 1;
1175 mask = 0xffffffffffffffffull;
1176 s_mask = 0;
1177 }
1178 else
1179 {
1180 is64 = 0;
1181 mask = (1ull << e) - 1;
1182 /* log_e s_mask
1183 1 ((1 << 4) - 1) << 2 = 111100
1184 2 ((1 << 3) - 1) << 3 = 111000
1185 3 ((1 << 2) - 1) << 4 = 110000
1186 4 ((1 << 1) - 1) << 5 = 100000
1187 5 ((1 << 0) - 1) << 6 = 000000 */
1188 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1189 }
1190 for (s = 0; s < e - 1; s++)
1191 for (r = 0; r < e; r++)
1192 {
1193 /* s+1 consecutive bits to 1 (s < 63) */
1194 imm = (1ull << (s + 1)) - 1;
1195 /* rotate right by r */
1196 if (r != 0)
1197 imm = (imm >> r) | ((imm << (e - r)) & mask);
1198 /* replicate the constant depending on SIMD size */
1199 switch (log_e)
1200 {
1201 case 1: imm = (imm << 2) | imm;
1202 /* Fall through. */
1203 case 2: imm = (imm << 4) | imm;
1204 /* Fall through. */
1205 case 3: imm = (imm << 8) | imm;
1206 /* Fall through. */
1207 case 4: imm = (imm << 16) | imm;
1208 /* Fall through. */
1209 case 5: imm = (imm << 32) | imm;
1210 /* Fall through. */
1211 case 6: break;
1212 default: abort ();
1213 }
1214 simd_immediates[nb_imms].imm = imm;
1215 simd_immediates[nb_imms].encoding =
1216 encode_immediate_bitfield(is64, s | s_mask, r);
1217 nb_imms++;
1218 }
1219 }
1220 assert (nb_imms == TOTAL_IMM_NB);
1221 qsort(simd_immediates, nb_imms,
1222 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1223 }
1224
1225 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1226 be accepted by logical (immediate) instructions
1227 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1228
1229 ESIZE is the number of bytes in the decoded immediate value.
1230 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1231 VALUE will be returned in *ENCODING. */
1232
1233 bfd_boolean
1234 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1235 {
1236 simd_imm_encoding imm_enc;
1237 const simd_imm_encoding *imm_encoding;
1238 static bfd_boolean initialized = FALSE;
1239 uint64_t upper;
1240 int i;
1241
1242 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1243 value, esize);
1244
1245 if (!initialized)
1246 {
1247 build_immediate_table ();
1248 initialized = TRUE;
1249 }
1250
1251 /* Allow all zeros or all ones in top bits, so that
1252 constant expressions like ~1 are permitted. */
1253 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1254 if ((value & ~upper) != value && (value | upper) != value)
1255 return FALSE;
1256
1257 /* Replicate to a full 64-bit value. */
1258 value &= ~upper;
1259 for (i = esize * 8; i < 64; i *= 2)
1260 value |= (value << i);
1261
1262 imm_enc.imm = value;
1263 imm_encoding = (const simd_imm_encoding *)
1264 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1265 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1266 if (imm_encoding == NULL)
1267 {
1268 DEBUG_TRACE ("exit with FALSE");
1269 return FALSE;
1270 }
1271 if (encoding != NULL)
1272 *encoding = imm_encoding->encoding;
1273 DEBUG_TRACE ("exit with TRUE");
1274 return TRUE;
1275 }
1276
1277 /* If 64-bit immediate IMM is in the format of
1278 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1279 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1280 of value "abcdefgh". Otherwise return -1. */
1281 int
1282 aarch64_shrink_expanded_imm8 (uint64_t imm)
1283 {
1284 int i, ret;
1285 uint32_t byte;
1286
1287 ret = 0;
1288 for (i = 0; i < 8; i++)
1289 {
1290 byte = (imm >> (8 * i)) & 0xff;
1291 if (byte == 0xff)
1292 ret |= 1 << i;
1293 else if (byte != 0x00)
1294 return -1;
1295 }
1296 return ret;
1297 }
1298
1299 /* Utility inline functions for operand_general_constraint_met_p. */
1300
1301 static inline void
1302 set_error (aarch64_operand_error *mismatch_detail,
1303 enum aarch64_operand_error_kind kind, int idx,
1304 const char* error)
1305 {
1306 if (mismatch_detail == NULL)
1307 return;
1308 mismatch_detail->kind = kind;
1309 mismatch_detail->index = idx;
1310 mismatch_detail->error = error;
1311 }
1312
1313 static inline void
1314 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1315 const char* error)
1316 {
1317 if (mismatch_detail == NULL)
1318 return;
1319 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1320 }
1321
1322 static inline void
1323 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1324 int idx, int lower_bound, int upper_bound,
1325 const char* error)
1326 {
1327 if (mismatch_detail == NULL)
1328 return;
1329 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1330 mismatch_detail->data[0] = lower_bound;
1331 mismatch_detail->data[1] = upper_bound;
1332 }
1333
1334 static inline void
1335 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1336 int idx, int lower_bound, int upper_bound)
1337 {
1338 if (mismatch_detail == NULL)
1339 return;
1340 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1341 _("immediate value"));
1342 }
1343
1344 static inline void
1345 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1346 int idx, int lower_bound, int upper_bound)
1347 {
1348 if (mismatch_detail == NULL)
1349 return;
1350 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1351 _("immediate offset"));
1352 }
1353
1354 static inline void
1355 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1356 int idx, int lower_bound, int upper_bound)
1357 {
1358 if (mismatch_detail == NULL)
1359 return;
1360 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1361 _("register number"));
1362 }
1363
1364 static inline void
1365 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1366 int idx, int lower_bound, int upper_bound)
1367 {
1368 if (mismatch_detail == NULL)
1369 return;
1370 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1371 _("register element index"));
1372 }
1373
1374 static inline void
1375 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1376 int idx, int lower_bound, int upper_bound)
1377 {
1378 if (mismatch_detail == NULL)
1379 return;
1380 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1381 _("shift amount"));
1382 }
1383
1384 /* Report that the MUL modifier in operand IDX should be in the range
1385 [LOWER_BOUND, UPPER_BOUND]. */
1386 static inline void
1387 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1388 int idx, int lower_bound, int upper_bound)
1389 {
1390 if (mismatch_detail == NULL)
1391 return;
1392 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1393 _("multiplier"));
1394 }
1395
1396 static inline void
1397 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1398 int alignment)
1399 {
1400 if (mismatch_detail == NULL)
1401 return;
1402 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1403 mismatch_detail->data[0] = alignment;
1404 }
1405
1406 static inline void
1407 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1408 int expected_num)
1409 {
1410 if (mismatch_detail == NULL)
1411 return;
1412 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1413 mismatch_detail->data[0] = expected_num;
1414 }
1415
1416 static inline void
1417 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1418 const char* error)
1419 {
1420 if (mismatch_detail == NULL)
1421 return;
1422 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1423 }
1424
1425 /* General constraint checking based on operand code.
1426
1427 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1428 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1429
1430 This function has to be called after the qualifiers for all operands
1431 have been resolved.
1432
1433 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1434 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1435 of error message during the disassembling where error message is not
1436 wanted. We avoid the dynamic construction of strings of error messages
1437 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1438 use a combination of error code, static string and some integer data to
1439 represent an error. */
1440
1441 static int
1442 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1443 enum aarch64_opnd type,
1444 const aarch64_opcode *opcode,
1445 aarch64_operand_error *mismatch_detail)
1446 {
1447 unsigned num, modifiers, shift;
1448 unsigned char size;
1449 int64_t imm, min_value, max_value;
1450 uint64_t uvalue, mask;
1451 const aarch64_opnd_info *opnd = opnds + idx;
1452 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1453
1454 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1455
1456 switch (aarch64_operands[type].op_class)
1457 {
1458 case AARCH64_OPND_CLASS_INT_REG:
1459 /* Check pair reg constraints for cas* instructions. */
1460 if (type == AARCH64_OPND_PAIRREG)
1461 {
1462 assert (idx == 1 || idx == 3);
1463 if (opnds[idx - 1].reg.regno % 2 != 0)
1464 {
1465 set_syntax_error (mismatch_detail, idx - 1,
1466 _("reg pair must start from even reg"));
1467 return 0;
1468 }
1469 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1470 {
1471 set_syntax_error (mismatch_detail, idx,
1472 _("reg pair must be contiguous"));
1473 return 0;
1474 }
1475 break;
1476 }
1477
1478 /* <Xt> may be optional in some IC and TLBI instructions. */
1479 if (type == AARCH64_OPND_Rt_SYS)
1480 {
1481 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1482 == AARCH64_OPND_CLASS_SYSTEM));
1483 if (opnds[1].present
1484 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1485 {
1486 set_other_error (mismatch_detail, idx, _("extraneous register"));
1487 return 0;
1488 }
1489 if (!opnds[1].present
1490 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1491 {
1492 set_other_error (mismatch_detail, idx, _("missing register"));
1493 return 0;
1494 }
1495 }
1496 switch (qualifier)
1497 {
1498 case AARCH64_OPND_QLF_WSP:
1499 case AARCH64_OPND_QLF_SP:
1500 if (!aarch64_stack_pointer_p (opnd))
1501 {
1502 set_other_error (mismatch_detail, idx,
1503 _("stack pointer register expected"));
1504 return 0;
1505 }
1506 break;
1507 default:
1508 break;
1509 }
1510 break;
1511
1512 case AARCH64_OPND_CLASS_SVE_REG:
1513 switch (type)
1514 {
1515 case AARCH64_OPND_SVE_Zm3_INDEX:
1516 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1517 case AARCH64_OPND_SVE_Zm4_INDEX:
1518 size = get_operand_fields_width (get_operand_from_code (type));
1519 shift = get_operand_specific_data (&aarch64_operands[type]);
1520 mask = (1 << shift) - 1;
1521 if (opnd->reg.regno > mask)
1522 {
1523 assert (mask == 7 || mask == 15);
1524 set_other_error (mismatch_detail, idx,
1525 mask == 15
1526 ? _("z0-z15 expected")
1527 : _("z0-z7 expected"));
1528 return 0;
1529 }
1530 mask = (1 << (size - shift)) - 1;
1531 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1532 {
1533 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1534 return 0;
1535 }
1536 break;
1537
1538 case AARCH64_OPND_SVE_Zn_INDEX:
1539 size = aarch64_get_qualifier_esize (opnd->qualifier);
1540 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1541 {
1542 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1543 0, 64 / size - 1);
1544 return 0;
1545 }
1546 break;
1547
1548 case AARCH64_OPND_SVE_ZnxN:
1549 case AARCH64_OPND_SVE_ZtxN:
1550 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1551 {
1552 set_other_error (mismatch_detail, idx,
1553 _("invalid register list"));
1554 return 0;
1555 }
1556 break;
1557
1558 default:
1559 break;
1560 }
1561 break;
1562
1563 case AARCH64_OPND_CLASS_PRED_REG:
1564 if (opnd->reg.regno >= 8
1565 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1566 {
1567 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1568 return 0;
1569 }
1570 break;
1571
1572 case AARCH64_OPND_CLASS_COND:
1573 if (type == AARCH64_OPND_COND1
1574 && (opnds[idx].cond->value & 0xe) == 0xe)
1575 {
1576 /* Not allow AL or NV. */
1577 set_syntax_error (mismatch_detail, idx, NULL);
1578 }
1579 break;
1580
1581 case AARCH64_OPND_CLASS_ADDRESS:
1582 /* Check writeback. */
1583 switch (opcode->iclass)
1584 {
1585 case ldst_pos:
1586 case ldst_unscaled:
1587 case ldstnapair_offs:
1588 case ldstpair_off:
1589 case ldst_unpriv:
1590 if (opnd->addr.writeback == 1)
1591 {
1592 set_syntax_error (mismatch_detail, idx,
1593 _("unexpected address writeback"));
1594 return 0;
1595 }
1596 break;
1597 case ldst_imm10:
1598 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1599 {
1600 set_syntax_error (mismatch_detail, idx,
1601 _("unexpected address writeback"));
1602 return 0;
1603 }
1604 break;
1605 case ldst_imm9:
1606 case ldstpair_indexed:
1607 case asisdlsep:
1608 case asisdlsop:
1609 if (opnd->addr.writeback == 0)
1610 {
1611 set_syntax_error (mismatch_detail, idx,
1612 _("address writeback expected"));
1613 return 0;
1614 }
1615 break;
1616 default:
1617 assert (opnd->addr.writeback == 0);
1618 break;
1619 }
1620 switch (type)
1621 {
1622 case AARCH64_OPND_ADDR_SIMM7:
1623 /* Scaled signed 7 bits immediate offset. */
1624 /* Get the size of the data element that is accessed, which may be
1625 different from that of the source register size,
1626 e.g. in strb/ldrb. */
1627 size = aarch64_get_qualifier_esize (opnd->qualifier);
1628 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1629 {
1630 set_offset_out_of_range_error (mismatch_detail, idx,
1631 -64 * size, 63 * size);
1632 return 0;
1633 }
1634 if (!value_aligned_p (opnd->addr.offset.imm, size))
1635 {
1636 set_unaligned_error (mismatch_detail, idx, size);
1637 return 0;
1638 }
1639 break;
1640 case AARCH64_OPND_ADDR_OFFSET:
1641 case AARCH64_OPND_ADDR_SIMM9:
1642 /* Unscaled signed 9 bits immediate offset. */
1643 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1644 {
1645 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1646 return 0;
1647 }
1648 break;
1649
1650 case AARCH64_OPND_ADDR_SIMM9_2:
1651 /* Unscaled signed 9 bits immediate offset, which has to be negative
1652 or unaligned. */
1653 size = aarch64_get_qualifier_esize (qualifier);
1654 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1655 && !value_aligned_p (opnd->addr.offset.imm, size))
1656 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1657 return 1;
1658 set_other_error (mismatch_detail, idx,
1659 _("negative or unaligned offset expected"));
1660 return 0;
1661
1662 case AARCH64_OPND_ADDR_SIMM10:
1663 /* Scaled signed 10 bits immediate offset. */
1664 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1665 {
1666 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1667 return 0;
1668 }
1669 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1670 {
1671 set_unaligned_error (mismatch_detail, idx, 8);
1672 return 0;
1673 }
1674 break;
1675
1676 case AARCH64_OPND_ADDR_SIMM11:
1677 /* Signed 11 bits immediate offset (multiple of 16). */
1678 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1679 {
1680 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1681 return 0;
1682 }
1683
1684 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1685 {
1686 set_unaligned_error (mismatch_detail, idx, 16);
1687 return 0;
1688 }
1689 break;
1690
1691 case AARCH64_OPND_ADDR_SIMM13:
1692 /* Signed 13 bits immediate offset (multiple of 16). */
1693 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1694 {
1695 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1696 return 0;
1697 }
1698
1699 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1700 {
1701 set_unaligned_error (mismatch_detail, idx, 16);
1702 return 0;
1703 }
1704 break;
1705
1706 case AARCH64_OPND_SIMD_ADDR_POST:
1707 /* AdvSIMD load/store multiple structures, post-index. */
1708 assert (idx == 1);
1709 if (opnd->addr.offset.is_reg)
1710 {
1711 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1712 return 1;
1713 else
1714 {
1715 set_other_error (mismatch_detail, idx,
1716 _("invalid register offset"));
1717 return 0;
1718 }
1719 }
1720 else
1721 {
1722 const aarch64_opnd_info *prev = &opnds[idx-1];
1723 unsigned num_bytes; /* total number of bytes transferred. */
1724 /* The opcode dependent area stores the number of elements in
1725 each structure to be loaded/stored. */
1726 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1727 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1728 /* Special handling of loading single structure to all lane. */
1729 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1730 * aarch64_get_qualifier_esize (prev->qualifier);
1731 else
1732 num_bytes = prev->reglist.num_regs
1733 * aarch64_get_qualifier_esize (prev->qualifier)
1734 * aarch64_get_qualifier_nelem (prev->qualifier);
1735 if ((int) num_bytes != opnd->addr.offset.imm)
1736 {
1737 set_other_error (mismatch_detail, idx,
1738 _("invalid post-increment amount"));
1739 return 0;
1740 }
1741 }
1742 break;
1743
1744 case AARCH64_OPND_ADDR_REGOFF:
1745 /* Get the size of the data element that is accessed, which may be
1746 different from that of the source register size,
1747 e.g. in strb/ldrb. */
1748 size = aarch64_get_qualifier_esize (opnd->qualifier);
1749 /* It is either no shift or shift by the binary logarithm of SIZE. */
1750 if (opnd->shifter.amount != 0
1751 && opnd->shifter.amount != (int)get_logsz (size))
1752 {
1753 set_other_error (mismatch_detail, idx,
1754 _("invalid shift amount"));
1755 return 0;
1756 }
1757 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1758 operators. */
1759 switch (opnd->shifter.kind)
1760 {
1761 case AARCH64_MOD_UXTW:
1762 case AARCH64_MOD_LSL:
1763 case AARCH64_MOD_SXTW:
1764 case AARCH64_MOD_SXTX: break;
1765 default:
1766 set_other_error (mismatch_detail, idx,
1767 _("invalid extend/shift operator"));
1768 return 0;
1769 }
1770 break;
1771
1772 case AARCH64_OPND_ADDR_UIMM12:
1773 imm = opnd->addr.offset.imm;
1774 /* Get the size of the data element that is accessed, which may be
1775 different from that of the source register size,
1776 e.g. in strb/ldrb. */
1777 size = aarch64_get_qualifier_esize (qualifier);
1778 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1779 {
1780 set_offset_out_of_range_error (mismatch_detail, idx,
1781 0, 4095 * size);
1782 return 0;
1783 }
1784 if (!value_aligned_p (opnd->addr.offset.imm, size))
1785 {
1786 set_unaligned_error (mismatch_detail, idx, size);
1787 return 0;
1788 }
1789 break;
1790
1791 case AARCH64_OPND_ADDR_PCREL14:
1792 case AARCH64_OPND_ADDR_PCREL19:
1793 case AARCH64_OPND_ADDR_PCREL21:
1794 case AARCH64_OPND_ADDR_PCREL26:
1795 imm = opnd->imm.value;
1796 if (operand_need_shift_by_two (get_operand_from_code (type)))
1797 {
1798 /* The offset value in a PC-relative branch instruction is alway
1799 4-byte aligned and is encoded without the lowest 2 bits. */
1800 if (!value_aligned_p (imm, 4))
1801 {
1802 set_unaligned_error (mismatch_detail, idx, 4);
1803 return 0;
1804 }
1805 /* Right shift by 2 so that we can carry out the following check
1806 canonically. */
1807 imm >>= 2;
1808 }
1809 size = get_operand_fields_width (get_operand_from_code (type));
1810 if (!value_fit_signed_field_p (imm, size))
1811 {
1812 set_other_error (mismatch_detail, idx,
1813 _("immediate out of range"));
1814 return 0;
1815 }
1816 break;
1817
1818 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1819 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1820 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1821 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1822 min_value = -8;
1823 max_value = 7;
1824 sve_imm_offset_vl:
1825 assert (!opnd->addr.offset.is_reg);
1826 assert (opnd->addr.preind);
1827 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1828 min_value *= num;
1829 max_value *= num;
1830 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1831 || (opnd->shifter.operator_present
1832 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1833 {
1834 set_other_error (mismatch_detail, idx,
1835 _("invalid addressing mode"));
1836 return 0;
1837 }
1838 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1839 {
1840 set_offset_out_of_range_error (mismatch_detail, idx,
1841 min_value, max_value);
1842 return 0;
1843 }
1844 if (!value_aligned_p (opnd->addr.offset.imm, num))
1845 {
1846 set_unaligned_error (mismatch_detail, idx, num);
1847 return 0;
1848 }
1849 break;
1850
1851 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1852 min_value = -32;
1853 max_value = 31;
1854 goto sve_imm_offset_vl;
1855
1856 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1857 min_value = -256;
1858 max_value = 255;
1859 goto sve_imm_offset_vl;
1860
1861 case AARCH64_OPND_SVE_ADDR_RI_U6:
1862 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1863 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1864 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1865 min_value = 0;
1866 max_value = 63;
1867 sve_imm_offset:
1868 assert (!opnd->addr.offset.is_reg);
1869 assert (opnd->addr.preind);
1870 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1871 min_value *= num;
1872 max_value *= num;
1873 if (opnd->shifter.operator_present
1874 || opnd->shifter.amount_present)
1875 {
1876 set_other_error (mismatch_detail, idx,
1877 _("invalid addressing mode"));
1878 return 0;
1879 }
1880 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1881 {
1882 set_offset_out_of_range_error (mismatch_detail, idx,
1883 min_value, max_value);
1884 return 0;
1885 }
1886 if (!value_aligned_p (opnd->addr.offset.imm, num))
1887 {
1888 set_unaligned_error (mismatch_detail, idx, num);
1889 return 0;
1890 }
1891 break;
1892
1893 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1894 min_value = -8;
1895 max_value = 7;
1896 goto sve_imm_offset;
1897
1898 case AARCH64_OPND_SVE_ADDR_R:
1899 case AARCH64_OPND_SVE_ADDR_RR:
1900 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1901 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1902 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1903 case AARCH64_OPND_SVE_ADDR_RX:
1904 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1905 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1906 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1907 case AARCH64_OPND_SVE_ADDR_RZ:
1908 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1909 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1910 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1911 modifiers = 1 << AARCH64_MOD_LSL;
1912 sve_rr_operand:
1913 assert (opnd->addr.offset.is_reg);
1914 assert (opnd->addr.preind);
1915 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1916 && opnd->addr.offset.regno == 31)
1917 {
1918 set_other_error (mismatch_detail, idx,
1919 _("index register xzr is not allowed"));
1920 return 0;
1921 }
1922 if (((1 << opnd->shifter.kind) & modifiers) == 0
1923 || (opnd->shifter.amount
1924 != get_operand_specific_data (&aarch64_operands[type])))
1925 {
1926 set_other_error (mismatch_detail, idx,
1927 _("invalid addressing mode"));
1928 return 0;
1929 }
1930 break;
1931
1932 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1933 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1934 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1935 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1936 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1937 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1938 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1939 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1940 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1941 goto sve_rr_operand;
1942
1943 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1944 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1945 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1946 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1947 min_value = 0;
1948 max_value = 31;
1949 goto sve_imm_offset;
1950
1951 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1952 modifiers = 1 << AARCH64_MOD_LSL;
1953 sve_zz_operand:
1954 assert (opnd->addr.offset.is_reg);
1955 assert (opnd->addr.preind);
1956 if (((1 << opnd->shifter.kind) & modifiers) == 0
1957 || opnd->shifter.amount < 0
1958 || opnd->shifter.amount > 3)
1959 {
1960 set_other_error (mismatch_detail, idx,
1961 _("invalid addressing mode"));
1962 return 0;
1963 }
1964 break;
1965
1966 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1967 modifiers = (1 << AARCH64_MOD_SXTW);
1968 goto sve_zz_operand;
1969
1970 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1971 modifiers = 1 << AARCH64_MOD_UXTW;
1972 goto sve_zz_operand;
1973
1974 default:
1975 break;
1976 }
1977 break;
1978
1979 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1980 if (type == AARCH64_OPND_LEt)
1981 {
1982 /* Get the upper bound for the element index. */
1983 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1984 if (!value_in_range_p (opnd->reglist.index, 0, num))
1985 {
1986 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1987 return 0;
1988 }
1989 }
1990 /* The opcode dependent area stores the number of elements in
1991 each structure to be loaded/stored. */
1992 num = get_opcode_dependent_value (opcode);
1993 switch (type)
1994 {
1995 case AARCH64_OPND_LVt:
1996 assert (num >= 1 && num <= 4);
1997 /* Unless LD1/ST1, the number of registers should be equal to that
1998 of the structure elements. */
1999 if (num != 1 && opnd->reglist.num_regs != num)
2000 {
2001 set_reg_list_error (mismatch_detail, idx, num);
2002 return 0;
2003 }
2004 break;
2005 case AARCH64_OPND_LVt_AL:
2006 case AARCH64_OPND_LEt:
2007 assert (num >= 1 && num <= 4);
2008 /* The number of registers should be equal to that of the structure
2009 elements. */
2010 if (opnd->reglist.num_regs != num)
2011 {
2012 set_reg_list_error (mismatch_detail, idx, num);
2013 return 0;
2014 }
2015 break;
2016 default:
2017 break;
2018 }
2019 break;
2020
2021 case AARCH64_OPND_CLASS_IMMEDIATE:
2022 /* Constraint check on immediate operand. */
2023 imm = opnd->imm.value;
2024 /* E.g. imm_0_31 constrains value to be 0..31. */
2025 if (qualifier_value_in_range_constraint_p (qualifier)
2026 && !value_in_range_p (imm, get_lower_bound (qualifier),
2027 get_upper_bound (qualifier)))
2028 {
2029 set_imm_out_of_range_error (mismatch_detail, idx,
2030 get_lower_bound (qualifier),
2031 get_upper_bound (qualifier));
2032 return 0;
2033 }
2034
2035 switch (type)
2036 {
2037 case AARCH64_OPND_AIMM:
2038 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2039 {
2040 set_other_error (mismatch_detail, idx,
2041 _("invalid shift operator"));
2042 return 0;
2043 }
2044 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2045 {
2046 set_other_error (mismatch_detail, idx,
2047 _("shift amount must be 0 or 12"));
2048 return 0;
2049 }
2050 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2051 {
2052 set_other_error (mismatch_detail, idx,
2053 _("immediate out of range"));
2054 return 0;
2055 }
2056 break;
2057
2058 case AARCH64_OPND_HALF:
2059 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2060 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2061 {
2062 set_other_error (mismatch_detail, idx,
2063 _("invalid shift operator"));
2064 return 0;
2065 }
2066 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2067 if (!value_aligned_p (opnd->shifter.amount, 16))
2068 {
2069 set_other_error (mismatch_detail, idx,
2070 _("shift amount must be a multiple of 16"));
2071 return 0;
2072 }
2073 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2074 {
2075 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2076 0, size * 8 - 16);
2077 return 0;
2078 }
2079 if (opnd->imm.value < 0)
2080 {
2081 set_other_error (mismatch_detail, idx,
2082 _("negative immediate value not allowed"));
2083 return 0;
2084 }
2085 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2086 {
2087 set_other_error (mismatch_detail, idx,
2088 _("immediate out of range"));
2089 return 0;
2090 }
2091 break;
2092
2093 case AARCH64_OPND_IMM_MOV:
2094 {
2095 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2096 imm = opnd->imm.value;
2097 assert (idx == 1);
2098 switch (opcode->op)
2099 {
2100 case OP_MOV_IMM_WIDEN:
2101 imm = ~imm;
2102 /* Fall through. */
2103 case OP_MOV_IMM_WIDE:
2104 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2105 {
2106 set_other_error (mismatch_detail, idx,
2107 _("immediate out of range"));
2108 return 0;
2109 }
2110 break;
2111 case OP_MOV_IMM_LOG:
2112 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2113 {
2114 set_other_error (mismatch_detail, idx,
2115 _("immediate out of range"));
2116 return 0;
2117 }
2118 break;
2119 default:
2120 assert (0);
2121 return 0;
2122 }
2123 }
2124 break;
2125
2126 case AARCH64_OPND_NZCV:
2127 case AARCH64_OPND_CCMP_IMM:
2128 case AARCH64_OPND_EXCEPTION:
2129 case AARCH64_OPND_TME_UIMM16:
2130 case AARCH64_OPND_UIMM4:
2131 case AARCH64_OPND_UIMM4_ADDG:
2132 case AARCH64_OPND_UIMM7:
2133 case AARCH64_OPND_UIMM3_OP1:
2134 case AARCH64_OPND_UIMM3_OP2:
2135 case AARCH64_OPND_SVE_UIMM3:
2136 case AARCH64_OPND_SVE_UIMM7:
2137 case AARCH64_OPND_SVE_UIMM8:
2138 case AARCH64_OPND_SVE_UIMM8_53:
2139 size = get_operand_fields_width (get_operand_from_code (type));
2140 assert (size < 32);
2141 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2142 {
2143 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2144 (1 << size) - 1);
2145 return 0;
2146 }
2147 break;
2148
2149 case AARCH64_OPND_UIMM10:
2150 /* Scaled unsigned 10 bits immediate offset. */
2151 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2152 {
2153 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2154 return 0;
2155 }
2156
2157 if (!value_aligned_p (opnd->imm.value, 16))
2158 {
2159 set_unaligned_error (mismatch_detail, idx, 16);
2160 return 0;
2161 }
2162 break;
2163
2164 case AARCH64_OPND_SIMM5:
2165 case AARCH64_OPND_SVE_SIMM5:
2166 case AARCH64_OPND_SVE_SIMM5B:
2167 case AARCH64_OPND_SVE_SIMM6:
2168 case AARCH64_OPND_SVE_SIMM8:
2169 size = get_operand_fields_width (get_operand_from_code (type));
2170 assert (size < 32);
2171 if (!value_fit_signed_field_p (opnd->imm.value, size))
2172 {
2173 set_imm_out_of_range_error (mismatch_detail, idx,
2174 -(1 << (size - 1)),
2175 (1 << (size - 1)) - 1);
2176 return 0;
2177 }
2178 break;
2179
2180 case AARCH64_OPND_WIDTH:
2181 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2182 && opnds[0].type == AARCH64_OPND_Rd);
2183 size = get_upper_bound (qualifier);
2184 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2185 /* lsb+width <= reg.size */
2186 {
2187 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2188 size - opnds[idx-1].imm.value);
2189 return 0;
2190 }
2191 break;
2192
2193 case AARCH64_OPND_LIMM:
2194 case AARCH64_OPND_SVE_LIMM:
2195 {
2196 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2197 uint64_t uimm = opnd->imm.value;
2198 if (opcode->op == OP_BIC)
2199 uimm = ~uimm;
2200 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2201 {
2202 set_other_error (mismatch_detail, idx,
2203 _("immediate out of range"));
2204 return 0;
2205 }
2206 }
2207 break;
2208
2209 case AARCH64_OPND_IMM0:
2210 case AARCH64_OPND_FPIMM0:
2211 if (opnd->imm.value != 0)
2212 {
2213 set_other_error (mismatch_detail, idx,
2214 _("immediate zero expected"));
2215 return 0;
2216 }
2217 break;
2218
2219 case AARCH64_OPND_IMM_ROT1:
2220 case AARCH64_OPND_IMM_ROT2:
2221 case AARCH64_OPND_SVE_IMM_ROT2:
2222 if (opnd->imm.value != 0
2223 && opnd->imm.value != 90
2224 && opnd->imm.value != 180
2225 && opnd->imm.value != 270)
2226 {
2227 set_other_error (mismatch_detail, idx,
2228 _("rotate expected to be 0, 90, 180 or 270"));
2229 return 0;
2230 }
2231 break;
2232
2233 case AARCH64_OPND_IMM_ROT3:
2234 case AARCH64_OPND_SVE_IMM_ROT1:
2235 case AARCH64_OPND_SVE_IMM_ROT3:
2236 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2237 {
2238 set_other_error (mismatch_detail, idx,
2239 _("rotate expected to be 90 or 270"));
2240 return 0;
2241 }
2242 break;
2243
2244 case AARCH64_OPND_SHLL_IMM:
2245 assert (idx == 2);
2246 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2247 if (opnd->imm.value != size)
2248 {
2249 set_other_error (mismatch_detail, idx,
2250 _("invalid shift amount"));
2251 return 0;
2252 }
2253 break;
2254
2255 case AARCH64_OPND_IMM_VLSL:
2256 size = aarch64_get_qualifier_esize (qualifier);
2257 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2258 {
2259 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2260 size * 8 - 1);
2261 return 0;
2262 }
2263 break;
2264
2265 case AARCH64_OPND_IMM_VLSR:
2266 size = aarch64_get_qualifier_esize (qualifier);
2267 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2268 {
2269 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2270 return 0;
2271 }
2272 break;
2273
2274 case AARCH64_OPND_SIMD_IMM:
2275 case AARCH64_OPND_SIMD_IMM_SFT:
2276 /* Qualifier check. */
2277 switch (qualifier)
2278 {
2279 case AARCH64_OPND_QLF_LSL:
2280 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2281 {
2282 set_other_error (mismatch_detail, idx,
2283 _("invalid shift operator"));
2284 return 0;
2285 }
2286 break;
2287 case AARCH64_OPND_QLF_MSL:
2288 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2289 {
2290 set_other_error (mismatch_detail, idx,
2291 _("invalid shift operator"));
2292 return 0;
2293 }
2294 break;
2295 case AARCH64_OPND_QLF_NIL:
2296 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("shift is not permitted"));
2300 return 0;
2301 }
2302 break;
2303 default:
2304 assert (0);
2305 return 0;
2306 }
2307 /* Is the immediate valid? */
2308 assert (idx == 1);
2309 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2310 {
2311 /* uimm8 or simm8 */
2312 if (!value_in_range_p (opnd->imm.value, -128, 255))
2313 {
2314 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2315 return 0;
2316 }
2317 }
2318 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2319 {
2320 /* uimm64 is not
2321 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2322 ffffffffgggggggghhhhhhhh'. */
2323 set_other_error (mismatch_detail, idx,
2324 _("invalid value for immediate"));
2325 return 0;
2326 }
2327 /* Is the shift amount valid? */
2328 switch (opnd->shifter.kind)
2329 {
2330 case AARCH64_MOD_LSL:
2331 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2332 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2333 {
2334 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2335 (size - 1) * 8);
2336 return 0;
2337 }
2338 if (!value_aligned_p (opnd->shifter.amount, 8))
2339 {
2340 set_unaligned_error (mismatch_detail, idx, 8);
2341 return 0;
2342 }
2343 break;
2344 case AARCH64_MOD_MSL:
2345 /* Only 8 and 16 are valid shift amount. */
2346 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2347 {
2348 set_other_error (mismatch_detail, idx,
2349 _("shift amount must be 0 or 16"));
2350 return 0;
2351 }
2352 break;
2353 default:
2354 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2355 {
2356 set_other_error (mismatch_detail, idx,
2357 _("invalid shift operator"));
2358 return 0;
2359 }
2360 break;
2361 }
2362 break;
2363
2364 case AARCH64_OPND_FPIMM:
2365 case AARCH64_OPND_SIMD_FPIMM:
2366 case AARCH64_OPND_SVE_FPIMM8:
2367 if (opnd->imm.is_fp == 0)
2368 {
2369 set_other_error (mismatch_detail, idx,
2370 _("floating-point immediate expected"));
2371 return 0;
2372 }
2373 /* The value is expected to be an 8-bit floating-point constant with
2374 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2375 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2376 instruction). */
2377 if (!value_in_range_p (opnd->imm.value, 0, 255))
2378 {
2379 set_other_error (mismatch_detail, idx,
2380 _("immediate out of range"));
2381 return 0;
2382 }
2383 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2384 {
2385 set_other_error (mismatch_detail, idx,
2386 _("invalid shift operator"));
2387 return 0;
2388 }
2389 break;
2390
2391 case AARCH64_OPND_SVE_AIMM:
2392 min_value = 0;
2393 sve_aimm:
2394 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2395 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2396 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2397 uvalue = opnd->imm.value;
2398 shift = opnd->shifter.amount;
2399 if (size == 1)
2400 {
2401 if (shift != 0)
2402 {
2403 set_other_error (mismatch_detail, idx,
2404 _("no shift amount allowed for"
2405 " 8-bit constants"));
2406 return 0;
2407 }
2408 }
2409 else
2410 {
2411 if (shift != 0 && shift != 8)
2412 {
2413 set_other_error (mismatch_detail, idx,
2414 _("shift amount must be 0 or 8"));
2415 return 0;
2416 }
2417 if (shift == 0 && (uvalue & 0xff) == 0)
2418 {
2419 shift = 8;
2420 uvalue = (int64_t) uvalue / 256;
2421 }
2422 }
2423 mask >>= shift;
2424 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2425 {
2426 set_other_error (mismatch_detail, idx,
2427 _("immediate too big for element size"));
2428 return 0;
2429 }
2430 uvalue = (uvalue - min_value) & mask;
2431 if (uvalue > 0xff)
2432 {
2433 set_other_error (mismatch_detail, idx,
2434 _("invalid arithmetic immediate"));
2435 return 0;
2436 }
2437 break;
2438
2439 case AARCH64_OPND_SVE_ASIMM:
2440 min_value = -128;
2441 goto sve_aimm;
2442
2443 case AARCH64_OPND_SVE_I1_HALF_ONE:
2444 assert (opnd->imm.is_fp);
2445 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2446 {
2447 set_other_error (mismatch_detail, idx,
2448 _("floating-point value must be 0.5 or 1.0"));
2449 return 0;
2450 }
2451 break;
2452
2453 case AARCH64_OPND_SVE_I1_HALF_TWO:
2454 assert (opnd->imm.is_fp);
2455 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2456 {
2457 set_other_error (mismatch_detail, idx,
2458 _("floating-point value must be 0.5 or 2.0"));
2459 return 0;
2460 }
2461 break;
2462
2463 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2464 assert (opnd->imm.is_fp);
2465 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2466 {
2467 set_other_error (mismatch_detail, idx,
2468 _("floating-point value must be 0.0 or 1.0"));
2469 return 0;
2470 }
2471 break;
2472
2473 case AARCH64_OPND_SVE_INV_LIMM:
2474 {
2475 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2476 uint64_t uimm = ~opnd->imm.value;
2477 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2478 {
2479 set_other_error (mismatch_detail, idx,
2480 _("immediate out of range"));
2481 return 0;
2482 }
2483 }
2484 break;
2485
2486 case AARCH64_OPND_SVE_LIMM_MOV:
2487 {
2488 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2489 uint64_t uimm = opnd->imm.value;
2490 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2491 {
2492 set_other_error (mismatch_detail, idx,
2493 _("immediate out of range"));
2494 return 0;
2495 }
2496 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2497 {
2498 set_other_error (mismatch_detail, idx,
2499 _("invalid replicated MOV immediate"));
2500 return 0;
2501 }
2502 }
2503 break;
2504
2505 case AARCH64_OPND_SVE_PATTERN_SCALED:
2506 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2507 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2508 {
2509 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2510 return 0;
2511 }
2512 break;
2513
2514 case AARCH64_OPND_SVE_SHLIMM_PRED:
2515 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2516 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2517 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2518 {
2519 set_imm_out_of_range_error (mismatch_detail, idx,
2520 0, 8 * size - 1);
2521 return 0;
2522 }
2523 break;
2524
2525 case AARCH64_OPND_SVE_SHRIMM_PRED:
2526 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2527 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2528 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2529 {
2530 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2531 return 0;
2532 }
2533 break;
2534
2535 default:
2536 break;
2537 }
2538 break;
2539
2540 case AARCH64_OPND_CLASS_SYSTEM:
2541 switch (type)
2542 {
2543 case AARCH64_OPND_PSTATEFIELD:
2544 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2545 /* MSR UAO, #uimm4
2546 MSR PAN, #uimm4
2547 MSR SSBS,#uimm4
2548 The immediate must be #0 or #1. */
2549 if ((opnd->pstatefield == 0x03 /* UAO. */
2550 || opnd->pstatefield == 0x04 /* PAN. */
2551 || opnd->pstatefield == 0x19 /* SSBS. */
2552 || opnd->pstatefield == 0x1a) /* DIT. */
2553 && opnds[1].imm.value > 1)
2554 {
2555 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2556 return 0;
2557 }
2558 /* MSR SPSel, #uimm4
2559 Uses uimm4 as a control value to select the stack pointer: if
2560 bit 0 is set it selects the current exception level's stack
2561 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2562 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2563 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2564 {
2565 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2566 return 0;
2567 }
2568 break;
2569 default:
2570 break;
2571 }
2572 break;
2573
2574 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2575 /* Get the upper bound for the element index. */
2576 if (opcode->op == OP_FCMLA_ELEM)
2577 /* FCMLA index range depends on the vector size of other operands
2578 and is halfed because complex numbers take two elements. */
2579 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2580 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2581 else
2582 num = 16;
2583 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2584 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2585
2586 /* Index out-of-range. */
2587 if (!value_in_range_p (opnd->reglane.index, 0, num))
2588 {
2589 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2590 return 0;
2591 }
2592 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2593 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2594 number is encoded in "size:M:Rm":
2595 size <Vm>
2596 00 RESERVED
2597 01 0:Rm
2598 10 M:Rm
2599 11 RESERVED */
2600 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2601 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2602 {
2603 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2604 return 0;
2605 }
2606 break;
2607
2608 case AARCH64_OPND_CLASS_MODIFIED_REG:
2609 assert (idx == 1 || idx == 2);
2610 switch (type)
2611 {
2612 case AARCH64_OPND_Rm_EXT:
2613 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2614 && opnd->shifter.kind != AARCH64_MOD_LSL)
2615 {
2616 set_other_error (mismatch_detail, idx,
2617 _("extend operator expected"));
2618 return 0;
2619 }
2620 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2621 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2622 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2623 case. */
2624 if (!aarch64_stack_pointer_p (opnds + 0)
2625 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2626 {
2627 if (!opnd->shifter.operator_present)
2628 {
2629 set_other_error (mismatch_detail, idx,
2630 _("missing extend operator"));
2631 return 0;
2632 }
2633 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2634 {
2635 set_other_error (mismatch_detail, idx,
2636 _("'LSL' operator not allowed"));
2637 return 0;
2638 }
2639 }
2640 assert (opnd->shifter.operator_present /* Default to LSL. */
2641 || opnd->shifter.kind == AARCH64_MOD_LSL);
2642 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2643 {
2644 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2645 return 0;
2646 }
2647 /* In the 64-bit form, the final register operand is written as Wm
2648 for all but the (possibly omitted) UXTX/LSL and SXTX
2649 operators.
2650 N.B. GAS allows X register to be used with any operator as a
2651 programming convenience. */
2652 if (qualifier == AARCH64_OPND_QLF_X
2653 && opnd->shifter.kind != AARCH64_MOD_LSL
2654 && opnd->shifter.kind != AARCH64_MOD_UXTX
2655 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2656 {
2657 set_other_error (mismatch_detail, idx, _("W register expected"));
2658 return 0;
2659 }
2660 break;
2661
2662 case AARCH64_OPND_Rm_SFT:
2663 /* ROR is not available to the shifted register operand in
2664 arithmetic instructions. */
2665 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2666 {
2667 set_other_error (mismatch_detail, idx,
2668 _("shift operator expected"));
2669 return 0;
2670 }
2671 if (opnd->shifter.kind == AARCH64_MOD_ROR
2672 && opcode->iclass != log_shift)
2673 {
2674 set_other_error (mismatch_detail, idx,
2675 _("'ROR' operator not allowed"));
2676 return 0;
2677 }
2678 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2679 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2680 {
2681 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2682 return 0;
2683 }
2684 break;
2685
2686 default:
2687 break;
2688 }
2689 break;
2690
2691 default:
2692 break;
2693 }
2694
2695 return 1;
2696 }
2697
2698 /* Main entrypoint for the operand constraint checking.
2699
2700 Return 1 if operands of *INST meet the constraint applied by the operand
2701 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2702 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2703 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2704 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2705 error kind when it is notified that an instruction does not pass the check).
2706
2707 Un-determined operand qualifiers may get established during the process. */
2708
2709 int
2710 aarch64_match_operands_constraint (aarch64_inst *inst,
2711 aarch64_operand_error *mismatch_detail)
2712 {
2713 int i;
2714
2715 DEBUG_TRACE ("enter");
2716
2717 /* Check for cases where a source register needs to be the same as the
2718 destination register. Do this before matching qualifiers since if
2719 an instruction has both invalid tying and invalid qualifiers,
2720 the error about qualifiers would suggest several alternative
2721 instructions that also have invalid tying. */
2722 i = inst->opcode->tied_operand;
2723 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2724 {
2725 if (mismatch_detail)
2726 {
2727 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2728 mismatch_detail->index = i;
2729 mismatch_detail->error = NULL;
2730 }
2731 return 0;
2732 }
2733
2734 /* Match operands' qualifier.
2735 *INST has already had qualifier establish for some, if not all, of
2736 its operands; we need to find out whether these established
2737 qualifiers match one of the qualifier sequence in
2738 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2739 with the corresponding qualifier in such a sequence.
2740 Only basic operand constraint checking is done here; the more thorough
2741 constraint checking will carried out by operand_general_constraint_met_p,
2742 which has be to called after this in order to get all of the operands'
2743 qualifiers established. */
2744 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2745 {
2746 DEBUG_TRACE ("FAIL on operand qualifier matching");
2747 if (mismatch_detail)
2748 {
2749 /* Return an error type to indicate that it is the qualifier
2750 matching failure; we don't care about which operand as there
2751 are enough information in the opcode table to reproduce it. */
2752 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2753 mismatch_detail->index = -1;
2754 mismatch_detail->error = NULL;
2755 }
2756 return 0;
2757 }
2758
2759 /* Match operands' constraint. */
2760 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2761 {
2762 enum aarch64_opnd type = inst->opcode->operands[i];
2763 if (type == AARCH64_OPND_NIL)
2764 break;
2765 if (inst->operands[i].skip)
2766 {
2767 DEBUG_TRACE ("skip the incomplete operand %d", i);
2768 continue;
2769 }
2770 if (operand_general_constraint_met_p (inst->operands, i, type,
2771 inst->opcode, mismatch_detail) == 0)
2772 {
2773 DEBUG_TRACE ("FAIL on operand %d", i);
2774 return 0;
2775 }
2776 }
2777
2778 DEBUG_TRACE ("PASS");
2779
2780 return 1;
2781 }
2782
2783 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2784 Also updates the TYPE of each INST->OPERANDS with the corresponding
2785 value of OPCODE->OPERANDS.
2786
2787 Note that some operand qualifiers may need to be manually cleared by
2788 the caller before it further calls the aarch64_opcode_encode; by
2789 doing this, it helps the qualifier matching facilities work
2790 properly. */
2791
2792 const aarch64_opcode*
2793 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2794 {
2795 int i;
2796 const aarch64_opcode *old = inst->opcode;
2797
2798 inst->opcode = opcode;
2799
2800 /* Update the operand types. */
2801 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2802 {
2803 inst->operands[i].type = opcode->operands[i];
2804 if (opcode->operands[i] == AARCH64_OPND_NIL)
2805 break;
2806 }
2807
2808 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2809
2810 return old;
2811 }
2812
2813 int
2814 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2815 {
2816 int i;
2817 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2818 if (operands[i] == operand)
2819 return i;
2820 else if (operands[i] == AARCH64_OPND_NIL)
2821 break;
2822 return -1;
2823 }
2824 \f
2825 /* R0...R30, followed by FOR31. */
2826 #define BANK(R, FOR31) \
2827 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2828 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2829 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2830 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2831 /* [0][0] 32-bit integer regs with sp Wn
2832 [0][1] 64-bit integer regs with sp Xn sf=1
2833 [1][0] 32-bit integer regs with #0 Wn
2834 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2835 static const char *int_reg[2][2][32] = {
2836 #define R32(X) "w" #X
2837 #define R64(X) "x" #X
2838 { BANK (R32, "wsp"), BANK (R64, "sp") },
2839 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2840 #undef R64
2841 #undef R32
2842 };
2843
2844 /* Names of the SVE vector registers, first with .S suffixes,
2845 then with .D suffixes. */
2846
2847 static const char *sve_reg[2][32] = {
2848 #define ZS(X) "z" #X ".s"
2849 #define ZD(X) "z" #X ".d"
2850 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2851 #undef ZD
2852 #undef ZS
2853 };
2854 #undef BANK
2855
2856 /* Return the integer register name.
2857 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2858
2859 static inline const char *
2860 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2861 {
2862 const int has_zr = sp_reg_p ? 0 : 1;
2863 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2864 return int_reg[has_zr][is_64][regno];
2865 }
2866
2867 /* Like get_int_reg_name, but IS_64 is always 1. */
2868
2869 static inline const char *
2870 get_64bit_int_reg_name (int regno, int sp_reg_p)
2871 {
2872 const int has_zr = sp_reg_p ? 0 : 1;
2873 return int_reg[has_zr][1][regno];
2874 }
2875
2876 /* Get the name of the integer offset register in OPND, using the shift type
2877 to decide whether it's a word or doubleword. */
2878
2879 static inline const char *
2880 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2881 {
2882 switch (opnd->shifter.kind)
2883 {
2884 case AARCH64_MOD_UXTW:
2885 case AARCH64_MOD_SXTW:
2886 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2887
2888 case AARCH64_MOD_LSL:
2889 case AARCH64_MOD_SXTX:
2890 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2891
2892 default:
2893 abort ();
2894 }
2895 }
2896
2897 /* Get the name of the SVE vector offset register in OPND, using the operand
2898 qualifier to decide whether the suffix should be .S or .D. */
2899
2900 static inline const char *
2901 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2902 {
2903 assert (qualifier == AARCH64_OPND_QLF_S_S
2904 || qualifier == AARCH64_OPND_QLF_S_D);
2905 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2906 }
2907
2908 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2909
2910 typedef union
2911 {
2912 uint64_t i;
2913 double d;
2914 } double_conv_t;
2915
2916 typedef union
2917 {
2918 uint32_t i;
2919 float f;
2920 } single_conv_t;
2921
2922 typedef union
2923 {
2924 uint32_t i;
2925 float f;
2926 } half_conv_t;
2927
2928 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2929 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2930 (depending on the type of the instruction). IMM8 will be expanded to a
2931 single-precision floating-point value (SIZE == 4) or a double-precision
2932 floating-point value (SIZE == 8). A half-precision floating-point value
2933 (SIZE == 2) is expanded to a single-precision floating-point value. The
2934 expanded value is returned. */
2935
2936 static uint64_t
2937 expand_fp_imm (int size, uint32_t imm8)
2938 {
2939 uint64_t imm = 0;
2940 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2941
2942 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2943 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2944 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2945 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2946 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2947 if (size == 8)
2948 {
2949 imm = (imm8_7 << (63-32)) /* imm8<7> */
2950 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2951 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2952 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2953 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2954 imm <<= 32;
2955 }
2956 else if (size == 4 || size == 2)
2957 {
2958 imm = (imm8_7 << 31) /* imm8<7> */
2959 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2960 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2961 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2962 }
2963 else
2964 {
2965 /* An unsupported size. */
2966 assert (0);
2967 }
2968
2969 return imm;
2970 }
2971
2972 /* Produce the string representation of the register list operand *OPND
2973 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2974 the register name that comes before the register number, such as "v". */
2975 static void
2976 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2977 const char *prefix)
2978 {
2979 const int num_regs = opnd->reglist.num_regs;
2980 const int first_reg = opnd->reglist.first_regno;
2981 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2982 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2983 char tb[8]; /* Temporary buffer. */
2984
2985 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2986 assert (num_regs >= 1 && num_regs <= 4);
2987
2988 /* Prepare the index if any. */
2989 if (opnd->reglist.has_index)
2990 /* PR 21096: The %100 is to silence a warning about possible truncation. */
2991 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
2992 else
2993 tb[0] = '\0';
2994
2995 /* The hyphenated form is preferred for disassembly if there are
2996 more than two registers in the list, and the register numbers
2997 are monotonically increasing in increments of one. */
2998 if (num_regs > 2 && last_reg > first_reg)
2999 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3000 prefix, last_reg, qlf_name, tb);
3001 else
3002 {
3003 const int reg0 = first_reg;
3004 const int reg1 = (first_reg + 1) & 0x1f;
3005 const int reg2 = (first_reg + 2) & 0x1f;
3006 const int reg3 = (first_reg + 3) & 0x1f;
3007
3008 switch (num_regs)
3009 {
3010 case 1:
3011 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3012 break;
3013 case 2:
3014 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3015 prefix, reg1, qlf_name, tb);
3016 break;
3017 case 3:
3018 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3019 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3020 prefix, reg2, qlf_name, tb);
3021 break;
3022 case 4:
3023 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3024 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3025 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3026 break;
3027 }
3028 }
3029 }
3030
3031 /* Print the register+immediate address in OPND to BUF, which has SIZE
3032 characters. BASE is the name of the base register. */
3033
3034 static void
3035 print_immediate_offset_address (char *buf, size_t size,
3036 const aarch64_opnd_info *opnd,
3037 const char *base)
3038 {
3039 if (opnd->addr.writeback)
3040 {
3041 if (opnd->addr.preind)
3042 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3043 else
3044 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3045 }
3046 else
3047 {
3048 if (opnd->shifter.operator_present)
3049 {
3050 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3051 snprintf (buf, size, "[%s, #%d, mul vl]",
3052 base, opnd->addr.offset.imm);
3053 }
3054 else if (opnd->addr.offset.imm)
3055 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3056 else
3057 snprintf (buf, size, "[%s]", base);
3058 }
3059 }
3060
3061 /* Produce the string representation of the register offset address operand
3062 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3063 the names of the base and offset registers. */
3064 static void
3065 print_register_offset_address (char *buf, size_t size,
3066 const aarch64_opnd_info *opnd,
3067 const char *base, const char *offset)
3068 {
3069 char tb[16]; /* Temporary buffer. */
3070 bfd_boolean print_extend_p = TRUE;
3071 bfd_boolean print_amount_p = TRUE;
3072 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3073
3074 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3075 || !opnd->shifter.amount_present))
3076 {
3077 /* Not print the shift/extend amount when the amount is zero and
3078 when it is not the special case of 8-bit load/store instruction. */
3079 print_amount_p = FALSE;
3080 /* Likewise, no need to print the shift operator LSL in such a
3081 situation. */
3082 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3083 print_extend_p = FALSE;
3084 }
3085
3086 /* Prepare for the extend/shift. */
3087 if (print_extend_p)
3088 {
3089 if (print_amount_p)
3090 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3091 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3092 (opnd->shifter.amount % 100));
3093 else
3094 snprintf (tb, sizeof (tb), ", %s", shift_name);
3095 }
3096 else
3097 tb[0] = '\0';
3098
3099 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3100 }
3101
3102 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3103 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3104 PC, PCREL_P and ADDRESS are used to pass in and return information about
3105 the PC-relative address calculation, where the PC value is passed in
3106 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3107 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3108 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3109
3110 The function serves both the disassembler and the assembler diagnostics
3111 issuer, which is the reason why it lives in this file. */
3112
3113 void
3114 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3115 const aarch64_opcode *opcode,
3116 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3117 bfd_vma *address, char** notes)
3118 {
3119 unsigned int i, num_conds;
3120 const char *name = NULL;
3121 const aarch64_opnd_info *opnd = opnds + idx;
3122 enum aarch64_modifier_kind kind;
3123 uint64_t addr, enum_value;
3124
3125 buf[0] = '\0';
3126 if (pcrel_p)
3127 *pcrel_p = 0;
3128
3129 switch (opnd->type)
3130 {
3131 case AARCH64_OPND_Rd:
3132 case AARCH64_OPND_Rn:
3133 case AARCH64_OPND_Rm:
3134 case AARCH64_OPND_Rt:
3135 case AARCH64_OPND_Rt2:
3136 case AARCH64_OPND_Rs:
3137 case AARCH64_OPND_Ra:
3138 case AARCH64_OPND_Rt_SYS:
3139 case AARCH64_OPND_PAIRREG:
3140 case AARCH64_OPND_SVE_Rm:
3141 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3142 the <ic_op>, therefore we use opnd->present to override the
3143 generic optional-ness information. */
3144 if (opnd->type == AARCH64_OPND_Rt_SYS)
3145 {
3146 if (!opnd->present)
3147 break;
3148 }
3149 /* Omit the operand, e.g. RET. */
3150 else if (optional_operand_p (opcode, idx)
3151 && (opnd->reg.regno
3152 == get_optional_operand_default_value (opcode)))
3153 break;
3154 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3155 || opnd->qualifier == AARCH64_OPND_QLF_X);
3156 snprintf (buf, size, "%s",
3157 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3158 break;
3159
3160 case AARCH64_OPND_Rd_SP:
3161 case AARCH64_OPND_Rn_SP:
3162 case AARCH64_OPND_Rt_SP:
3163 case AARCH64_OPND_SVE_Rn_SP:
3164 case AARCH64_OPND_Rm_SP:
3165 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3166 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3167 || opnd->qualifier == AARCH64_OPND_QLF_X
3168 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3169 snprintf (buf, size, "%s",
3170 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3171 break;
3172
3173 case AARCH64_OPND_Rm_EXT:
3174 kind = opnd->shifter.kind;
3175 assert (idx == 1 || idx == 2);
3176 if ((aarch64_stack_pointer_p (opnds)
3177 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3178 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3179 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3180 && kind == AARCH64_MOD_UXTW)
3181 || (opnd->qualifier == AARCH64_OPND_QLF_X
3182 && kind == AARCH64_MOD_UXTX)))
3183 {
3184 /* 'LSL' is the preferred form in this case. */
3185 kind = AARCH64_MOD_LSL;
3186 if (opnd->shifter.amount == 0)
3187 {
3188 /* Shifter omitted. */
3189 snprintf (buf, size, "%s",
3190 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3191 break;
3192 }
3193 }
3194 if (opnd->shifter.amount)
3195 snprintf (buf, size, "%s, %s #%" PRIi64,
3196 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3197 aarch64_operand_modifiers[kind].name,
3198 opnd->shifter.amount);
3199 else
3200 snprintf (buf, size, "%s, %s",
3201 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3202 aarch64_operand_modifiers[kind].name);
3203 break;
3204
3205 case AARCH64_OPND_Rm_SFT:
3206 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3207 || opnd->qualifier == AARCH64_OPND_QLF_X);
3208 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3209 snprintf (buf, size, "%s",
3210 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3211 else
3212 snprintf (buf, size, "%s, %s #%" PRIi64,
3213 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3214 aarch64_operand_modifiers[opnd->shifter.kind].name,
3215 opnd->shifter.amount);
3216 break;
3217
3218 case AARCH64_OPND_Fd:
3219 case AARCH64_OPND_Fn:
3220 case AARCH64_OPND_Fm:
3221 case AARCH64_OPND_Fa:
3222 case AARCH64_OPND_Ft:
3223 case AARCH64_OPND_Ft2:
3224 case AARCH64_OPND_Sd:
3225 case AARCH64_OPND_Sn:
3226 case AARCH64_OPND_Sm:
3227 case AARCH64_OPND_SVE_VZn:
3228 case AARCH64_OPND_SVE_Vd:
3229 case AARCH64_OPND_SVE_Vm:
3230 case AARCH64_OPND_SVE_Vn:
3231 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3232 opnd->reg.regno);
3233 break;
3234
3235 case AARCH64_OPND_Va:
3236 case AARCH64_OPND_Vd:
3237 case AARCH64_OPND_Vn:
3238 case AARCH64_OPND_Vm:
3239 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3240 aarch64_get_qualifier_name (opnd->qualifier));
3241 break;
3242
3243 case AARCH64_OPND_Ed:
3244 case AARCH64_OPND_En:
3245 case AARCH64_OPND_Em:
3246 case AARCH64_OPND_Em16:
3247 case AARCH64_OPND_SM3_IMM2:
3248 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3249 aarch64_get_qualifier_name (opnd->qualifier),
3250 opnd->reglane.index);
3251 break;
3252
3253 case AARCH64_OPND_VdD1:
3254 case AARCH64_OPND_VnD1:
3255 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3256 break;
3257
3258 case AARCH64_OPND_LVn:
3259 case AARCH64_OPND_LVt:
3260 case AARCH64_OPND_LVt_AL:
3261 case AARCH64_OPND_LEt:
3262 print_register_list (buf, size, opnd, "v");
3263 break;
3264
3265 case AARCH64_OPND_SVE_Pd:
3266 case AARCH64_OPND_SVE_Pg3:
3267 case AARCH64_OPND_SVE_Pg4_5:
3268 case AARCH64_OPND_SVE_Pg4_10:
3269 case AARCH64_OPND_SVE_Pg4_16:
3270 case AARCH64_OPND_SVE_Pm:
3271 case AARCH64_OPND_SVE_Pn:
3272 case AARCH64_OPND_SVE_Pt:
3273 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3274 snprintf (buf, size, "p%d", opnd->reg.regno);
3275 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3276 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3277 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3278 aarch64_get_qualifier_name (opnd->qualifier));
3279 else
3280 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3281 aarch64_get_qualifier_name (opnd->qualifier));
3282 break;
3283
3284 case AARCH64_OPND_SVE_Za_5:
3285 case AARCH64_OPND_SVE_Za_16:
3286 case AARCH64_OPND_SVE_Zd:
3287 case AARCH64_OPND_SVE_Zm_5:
3288 case AARCH64_OPND_SVE_Zm_16:
3289 case AARCH64_OPND_SVE_Zn:
3290 case AARCH64_OPND_SVE_Zt:
3291 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3292 snprintf (buf, size, "z%d", opnd->reg.regno);
3293 else
3294 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3295 aarch64_get_qualifier_name (opnd->qualifier));
3296 break;
3297
3298 case AARCH64_OPND_SVE_ZnxN:
3299 case AARCH64_OPND_SVE_ZtxN:
3300 print_register_list (buf, size, opnd, "z");
3301 break;
3302
3303 case AARCH64_OPND_SVE_Zm3_INDEX:
3304 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3305 case AARCH64_OPND_SVE_Zm4_INDEX:
3306 case AARCH64_OPND_SVE_Zn_INDEX:
3307 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3308 aarch64_get_qualifier_name (opnd->qualifier),
3309 opnd->reglane.index);
3310 break;
3311
3312 case AARCH64_OPND_CRn:
3313 case AARCH64_OPND_CRm:
3314 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3315 break;
3316
3317 case AARCH64_OPND_IDX:
3318 case AARCH64_OPND_MASK:
3319 case AARCH64_OPND_IMM:
3320 case AARCH64_OPND_IMM_2:
3321 case AARCH64_OPND_WIDTH:
3322 case AARCH64_OPND_UIMM3_OP1:
3323 case AARCH64_OPND_UIMM3_OP2:
3324 case AARCH64_OPND_BIT_NUM:
3325 case AARCH64_OPND_IMM_VLSL:
3326 case AARCH64_OPND_IMM_VLSR:
3327 case AARCH64_OPND_SHLL_IMM:
3328 case AARCH64_OPND_IMM0:
3329 case AARCH64_OPND_IMMR:
3330 case AARCH64_OPND_IMMS:
3331 case AARCH64_OPND_FBITS:
3332 case AARCH64_OPND_TME_UIMM16:
3333 case AARCH64_OPND_SIMM5:
3334 case AARCH64_OPND_SVE_SHLIMM_PRED:
3335 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3336 case AARCH64_OPND_SVE_SHRIMM_PRED:
3337 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3338 case AARCH64_OPND_SVE_SIMM5:
3339 case AARCH64_OPND_SVE_SIMM5B:
3340 case AARCH64_OPND_SVE_SIMM6:
3341 case AARCH64_OPND_SVE_SIMM8:
3342 case AARCH64_OPND_SVE_UIMM3:
3343 case AARCH64_OPND_SVE_UIMM7:
3344 case AARCH64_OPND_SVE_UIMM8:
3345 case AARCH64_OPND_SVE_UIMM8_53:
3346 case AARCH64_OPND_IMM_ROT1:
3347 case AARCH64_OPND_IMM_ROT2:
3348 case AARCH64_OPND_IMM_ROT3:
3349 case AARCH64_OPND_SVE_IMM_ROT1:
3350 case AARCH64_OPND_SVE_IMM_ROT2:
3351 case AARCH64_OPND_SVE_IMM_ROT3:
3352 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3353 break;
3354
3355 case AARCH64_OPND_SVE_I1_HALF_ONE:
3356 case AARCH64_OPND_SVE_I1_HALF_TWO:
3357 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3358 {
3359 single_conv_t c;
3360 c.i = opnd->imm.value;
3361 snprintf (buf, size, "#%.1f", c.f);
3362 break;
3363 }
3364
3365 case AARCH64_OPND_SVE_PATTERN:
3366 if (optional_operand_p (opcode, idx)
3367 && opnd->imm.value == get_optional_operand_default_value (opcode))
3368 break;
3369 enum_value = opnd->imm.value;
3370 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3371 if (aarch64_sve_pattern_array[enum_value])
3372 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3373 else
3374 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3375 break;
3376
3377 case AARCH64_OPND_SVE_PATTERN_SCALED:
3378 if (optional_operand_p (opcode, idx)
3379 && !opnd->shifter.operator_present
3380 && opnd->imm.value == get_optional_operand_default_value (opcode))
3381 break;
3382 enum_value = opnd->imm.value;
3383 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3384 if (aarch64_sve_pattern_array[opnd->imm.value])
3385 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3386 else
3387 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3388 if (opnd->shifter.operator_present)
3389 {
3390 size_t len = strlen (buf);
3391 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3392 aarch64_operand_modifiers[opnd->shifter.kind].name,
3393 opnd->shifter.amount);
3394 }
3395 break;
3396
3397 case AARCH64_OPND_SVE_PRFOP:
3398 enum_value = opnd->imm.value;
3399 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3400 if (aarch64_sve_prfop_array[enum_value])
3401 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3402 else
3403 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3404 break;
3405
3406 case AARCH64_OPND_IMM_MOV:
3407 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3408 {
3409 case 4: /* e.g. MOV Wd, #<imm32>. */
3410 {
3411 int imm32 = opnd->imm.value;
3412 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3413 }
3414 break;
3415 case 8: /* e.g. MOV Xd, #<imm64>. */
3416 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3417 opnd->imm.value, opnd->imm.value);
3418 break;
3419 default: assert (0);
3420 }
3421 break;
3422
3423 case AARCH64_OPND_FPIMM0:
3424 snprintf (buf, size, "#0.0");
3425 break;
3426
3427 case AARCH64_OPND_LIMM:
3428 case AARCH64_OPND_AIMM:
3429 case AARCH64_OPND_HALF:
3430 case AARCH64_OPND_SVE_INV_LIMM:
3431 case AARCH64_OPND_SVE_LIMM:
3432 case AARCH64_OPND_SVE_LIMM_MOV:
3433 if (opnd->shifter.amount)
3434 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3435 opnd->shifter.amount);
3436 else
3437 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3438 break;
3439
3440 case AARCH64_OPND_SIMD_IMM:
3441 case AARCH64_OPND_SIMD_IMM_SFT:
3442 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3443 || opnd->shifter.kind == AARCH64_MOD_NONE)
3444 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3445 else
3446 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3447 aarch64_operand_modifiers[opnd->shifter.kind].name,
3448 opnd->shifter.amount);
3449 break;
3450
3451 case AARCH64_OPND_SVE_AIMM:
3452 case AARCH64_OPND_SVE_ASIMM:
3453 if (opnd->shifter.amount)
3454 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3455 opnd->shifter.amount);
3456 else
3457 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3458 break;
3459
3460 case AARCH64_OPND_FPIMM:
3461 case AARCH64_OPND_SIMD_FPIMM:
3462 case AARCH64_OPND_SVE_FPIMM8:
3463 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3464 {
3465 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3466 {
3467 half_conv_t c;
3468 c.i = expand_fp_imm (2, opnd->imm.value);
3469 snprintf (buf, size, "#%.18e", c.f);
3470 }
3471 break;
3472 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3473 {
3474 single_conv_t c;
3475 c.i = expand_fp_imm (4, opnd->imm.value);
3476 snprintf (buf, size, "#%.18e", c.f);
3477 }
3478 break;
3479 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3480 {
3481 double_conv_t c;
3482 c.i = expand_fp_imm (8, opnd->imm.value);
3483 snprintf (buf, size, "#%.18e", c.d);
3484 }
3485 break;
3486 default: assert (0);
3487 }
3488 break;
3489
3490 case AARCH64_OPND_CCMP_IMM:
3491 case AARCH64_OPND_NZCV:
3492 case AARCH64_OPND_EXCEPTION:
3493 case AARCH64_OPND_UIMM4:
3494 case AARCH64_OPND_UIMM4_ADDG:
3495 case AARCH64_OPND_UIMM7:
3496 case AARCH64_OPND_UIMM10:
3497 if (optional_operand_p (opcode, idx) == TRUE
3498 && (opnd->imm.value ==
3499 (int64_t) get_optional_operand_default_value (opcode)))
3500 /* Omit the operand, e.g. DCPS1. */
3501 break;
3502 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3503 break;
3504
3505 case AARCH64_OPND_COND:
3506 case AARCH64_OPND_COND1:
3507 snprintf (buf, size, "%s", opnd->cond->names[0]);
3508 num_conds = ARRAY_SIZE (opnd->cond->names);
3509 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3510 {
3511 size_t len = strlen (buf);
3512 if (i == 1)
3513 snprintf (buf + len, size - len, " // %s = %s",
3514 opnd->cond->names[0], opnd->cond->names[i]);
3515 else
3516 snprintf (buf + len, size - len, ", %s",
3517 opnd->cond->names[i]);
3518 }
3519 break;
3520
3521 case AARCH64_OPND_ADDR_ADRP:
3522 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3523 + opnd->imm.value;
3524 if (pcrel_p)
3525 *pcrel_p = 1;
3526 if (address)
3527 *address = addr;
3528 /* This is not necessary during the disassembling, as print_address_func
3529 in the disassemble_info will take care of the printing. But some
3530 other callers may be still interested in getting the string in *STR,
3531 so here we do snprintf regardless. */
3532 snprintf (buf, size, "#0x%" PRIx64, addr);
3533 break;
3534
3535 case AARCH64_OPND_ADDR_PCREL14:
3536 case AARCH64_OPND_ADDR_PCREL19:
3537 case AARCH64_OPND_ADDR_PCREL21:
3538 case AARCH64_OPND_ADDR_PCREL26:
3539 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3540 if (pcrel_p)
3541 *pcrel_p = 1;
3542 if (address)
3543 *address = addr;
3544 /* This is not necessary during the disassembling, as print_address_func
3545 in the disassemble_info will take care of the printing. But some
3546 other callers may be still interested in getting the string in *STR,
3547 so here we do snprintf regardless. */
3548 snprintf (buf, size, "#0x%" PRIx64, addr);
3549 break;
3550
3551 case AARCH64_OPND_ADDR_SIMPLE:
3552 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3553 case AARCH64_OPND_SIMD_ADDR_POST:
3554 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3555 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3556 {
3557 if (opnd->addr.offset.is_reg)
3558 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3559 else
3560 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3561 }
3562 else
3563 snprintf (buf, size, "[%s]", name);
3564 break;
3565
3566 case AARCH64_OPND_ADDR_REGOFF:
3567 case AARCH64_OPND_SVE_ADDR_R:
3568 case AARCH64_OPND_SVE_ADDR_RR:
3569 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3570 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3571 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3572 case AARCH64_OPND_SVE_ADDR_RX:
3573 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3574 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3575 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3576 print_register_offset_address
3577 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3578 get_offset_int_reg_name (opnd));
3579 break;
3580
3581 case AARCH64_OPND_SVE_ADDR_RZ:
3582 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3583 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3584 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3585 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3586 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3587 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3588 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3589 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3590 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3591 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3592 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3593 print_register_offset_address
3594 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3595 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3596 break;
3597
3598 case AARCH64_OPND_ADDR_SIMM7:
3599 case AARCH64_OPND_ADDR_SIMM9:
3600 case AARCH64_OPND_ADDR_SIMM9_2:
3601 case AARCH64_OPND_ADDR_SIMM10:
3602 case AARCH64_OPND_ADDR_SIMM11:
3603 case AARCH64_OPND_ADDR_SIMM13:
3604 case AARCH64_OPND_ADDR_OFFSET:
3605 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3606 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3607 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3608 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3609 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3610 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3611 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3612 case AARCH64_OPND_SVE_ADDR_RI_U6:
3613 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3614 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3615 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3616 print_immediate_offset_address
3617 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3618 break;
3619
3620 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3621 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3622 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3623 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3624 print_immediate_offset_address
3625 (buf, size, opnd,
3626 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3627 break;
3628
3629 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3630 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3631 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3632 print_register_offset_address
3633 (buf, size, opnd,
3634 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3635 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3636 break;
3637
3638 case AARCH64_OPND_ADDR_UIMM12:
3639 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3640 if (opnd->addr.offset.imm)
3641 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3642 else
3643 snprintf (buf, size, "[%s]", name);
3644 break;
3645
3646 case AARCH64_OPND_SYSREG:
3647 for (i = 0; aarch64_sys_regs[i].name; ++i)
3648 {
3649 bfd_boolean exact_match
3650 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3651 == opnd->sysreg.flags;
3652
3653 /* Try and find an exact match, But if that fails, return the first
3654 partial match that was found. */
3655 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3656 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3657 && (name == NULL || exact_match))
3658 {
3659 name = aarch64_sys_regs[i].name;
3660 if (exact_match)
3661 {
3662 if (notes)
3663 *notes = NULL;
3664 break;
3665 }
3666
3667 /* If we didn't match exactly, that means the presense of a flag
3668 indicates what we didn't want for this instruction. e.g. If
3669 F_REG_READ is there, that means we were looking for a write
3670 register. See aarch64_ext_sysreg. */
3671 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3672 *notes = _("reading from a write-only register");
3673 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3674 *notes = _("writing to a read-only register");
3675 }
3676 }
3677
3678 if (name)
3679 snprintf (buf, size, "%s", name);
3680 else
3681 {
3682 /* Implementation defined system register. */
3683 unsigned int value = opnd->sysreg.value;
3684 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3685 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3686 value & 0x7);
3687 }
3688 break;
3689
3690 case AARCH64_OPND_PSTATEFIELD:
3691 for (i = 0; aarch64_pstatefields[i].name; ++i)
3692 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3693 break;
3694 assert (aarch64_pstatefields[i].name);
3695 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3696 break;
3697
3698 case AARCH64_OPND_SYSREG_AT:
3699 case AARCH64_OPND_SYSREG_DC:
3700 case AARCH64_OPND_SYSREG_IC:
3701 case AARCH64_OPND_SYSREG_TLBI:
3702 case AARCH64_OPND_SYSREG_SR:
3703 snprintf (buf, size, "%s", opnd->sysins_op->name);
3704 break;
3705
3706 case AARCH64_OPND_BARRIER:
3707 snprintf (buf, size, "%s", opnd->barrier->name);
3708 break;
3709
3710 case AARCH64_OPND_BARRIER_ISB:
3711 /* Operand can be omitted, e.g. in DCPS1. */
3712 if (! optional_operand_p (opcode, idx)
3713 || (opnd->barrier->value
3714 != get_optional_operand_default_value (opcode)))
3715 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3716 break;
3717
3718 case AARCH64_OPND_PRFOP:
3719 if (opnd->prfop->name != NULL)
3720 snprintf (buf, size, "%s", opnd->prfop->name);
3721 else
3722 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3723 break;
3724
3725 case AARCH64_OPND_BARRIER_PSB:
3726 case AARCH64_OPND_BTI_TARGET:
3727 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3728 snprintf (buf, size, "%s", opnd->hint_option->name);
3729 break;
3730
3731 default:
3732 assert (0);
3733 }
3734 }
3735 \f
3736 #define CPENC(op0,op1,crn,crm,op2) \
3737 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3738 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3739 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3740 /* for 3.9.10 System Instructions */
3741 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3742
3743 #define C0 0
3744 #define C1 1
3745 #define C2 2
3746 #define C3 3
3747 #define C4 4
3748 #define C5 5
3749 #define C6 6
3750 #define C7 7
3751 #define C8 8
3752 #define C9 9
3753 #define C10 10
3754 #define C11 11
3755 #define C12 12
3756 #define C13 13
3757 #define C14 14
3758 #define C15 15
3759
3760 /* TODO there is one more issues need to be resolved
3761 1. handle cpu-implementation-defined system registers. */
3762 const aarch64_sys_reg aarch64_sys_regs [] =
3763 {
3764 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3765 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3766 { "elr_el1", CPEN_(0,C0,1), 0 },
3767 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3768 { "sp_el0", CPEN_(0,C1,0), 0 },
3769 { "spsel", CPEN_(0,C2,0), 0 },
3770 { "daif", CPEN_(3,C2,1), 0 },
3771 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3772 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3773 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3774 { "nzcv", CPEN_(3,C2,0), 0 },
3775 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3776 { "fpcr", CPEN_(3,C4,0), 0 },
3777 { "fpsr", CPEN_(3,C4,1), 0 },
3778 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3779 { "dlr_el0", CPEN_(3,C5,1), 0 },
3780 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3781 { "elr_el2", CPEN_(4,C0,1), 0 },
3782 { "sp_el1", CPEN_(4,C1,0), 0 },
3783 { "spsr_irq", CPEN_(4,C3,0), 0 },
3784 { "spsr_abt", CPEN_(4,C3,1), 0 },
3785 { "spsr_und", CPEN_(4,C3,2), 0 },
3786 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3787 { "spsr_el3", CPEN_(6,C0,0), 0 },
3788 { "elr_el3", CPEN_(6,C0,1), 0 },
3789 { "sp_el2", CPEN_(6,C1,0), 0 },
3790 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3791 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3792 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3793 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3794 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3795 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3796 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3797 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3798 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3799 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3800 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3801 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3802 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3803 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3804 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3805 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3806 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3807 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3808 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3809 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3810 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3811 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3812 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3813 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3814 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3815 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3816 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3817 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3818 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3819 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3820 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3821 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3822 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3823 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3824 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3825 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3826 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3827 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3828 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3829 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3830 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3831 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3832 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3833 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3834 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3835 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3836 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3837 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3838 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3839 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3840 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3841 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3842 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3843 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3844 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3845 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3846 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3847 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3848 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3849 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3850 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3851 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3852 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3853 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3854 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3855 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3856 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3857 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3858 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3859 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3860 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3861 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3862 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3863 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3864 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3865 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3866 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3867 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3868 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3869 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3870 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3871 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3872 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3873 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3874 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3875 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3876 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3877 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3878 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3879 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3880 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3881 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3882 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3883 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3884 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3885 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3886 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3887 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3888 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3889 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3890 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3891 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3892 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3893 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3894 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3895 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3896 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3897 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3898 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3899 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3900 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3901 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3902 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3903 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3904 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3905 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3906 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3907 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3908 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3909 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3910 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3911 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3912 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3913 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3914 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3915 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3916 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3917 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3918 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3919 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3920 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3921 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3922 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3923 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3924 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3925 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3926 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3927 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3928 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3929 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3930 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3931 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3932 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3933 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3934 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3935 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3936 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3937 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3938 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3939 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3940 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3941 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3942 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3943 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3944 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3945 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3946 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3947 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3948 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3949 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3950 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3951 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3952 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3953 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3954 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3955 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3956 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3957 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3958 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3959 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3960 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3961 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3962 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3963 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3964 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3965 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3966 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3967 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3968 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3969 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3970 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3971 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3972 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3973 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3974 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3975 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3976 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3977 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3978 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3979 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3980 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3981 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3982 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3983 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3984 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3985 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
3986 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3987 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3988 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
3989 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
3990 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
3991 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
3992 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3993 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3994 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3995 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3996 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3997 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3998 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3999 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4000 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4001 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4002 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4003 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4004 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4005 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4006 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4007 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4008 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4009 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4010 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4011 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4012 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4013 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4014 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4015 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4016 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4017 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4018 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4019 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4020 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4021 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4022 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4023 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4024 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4025 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4026 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4027 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4028 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4029 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4030 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4031 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4032 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4033 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4034 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4035 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4036 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4037 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4038 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4039 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4040 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4041 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4042 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4043 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4044 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4045 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4046 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4047 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4048 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4049 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4050 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4051 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4052 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4053 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4054 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4055 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4056 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4057 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4058 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4059 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4060 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4061 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4062 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4063 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4064 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4065 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4066 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4067 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4068 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4069 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4070 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4071 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4072 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4073 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4074 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4075 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4076 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4077 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4078 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4079 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4080 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4081 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4082 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4083 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4084 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4085 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4086 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4087 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4088 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4089 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4090 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4091 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4092 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4093 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4094 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4095 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4096 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4097 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4098 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4099 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4100 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4101 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4102 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4103 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4104 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4105 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4106 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4107 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4108 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4109 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4110 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4111 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4112 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4113 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4114 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4115 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4116 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4117 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4118 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4119 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4120 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4121 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4122 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4123 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4124 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4125 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4126 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4127 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4128 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4129 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4130 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4131 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4132 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4133 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4134 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4135 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4136 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4137 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4138 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4139 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4140 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4141 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4142 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4143 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4144 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4145 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4146 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4147 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4148 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4149 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4150 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4151 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4152 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4153 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4154 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4155 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4156 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4157
4158 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4159 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4160 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4161 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4162 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4163 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4164 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4165 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4166 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4167 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4168 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4169 { 0, CPENC(0,0,0,0,0), 0 },
4170 };
4171
4172 bfd_boolean
4173 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4174 {
4175 return (reg->flags & F_DEPRECATED) != 0;
4176 }
4177
4178 bfd_boolean
4179 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4180 const aarch64_sys_reg *reg)
4181 {
4182 if (!(reg->flags & F_ARCHEXT))
4183 return TRUE;
4184
4185 /* PAN. Values are from aarch64_sys_regs. */
4186 if (reg->value == CPEN_(0,C2,3)
4187 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4188 return FALSE;
4189
4190 /* SCXTNUM_ELx registers. */
4191 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4192 || reg->value == CPENC (3, 0, C13, C0, 7)
4193 || reg->value == CPENC (3, 4, C13, C0, 7)
4194 || reg->value == CPENC (3, 6, C13, C0, 7)
4195 || reg->value == CPENC (3, 5, C13, C0, 7))
4196 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4197 return FALSE;
4198
4199 /* ID_PFR2_EL1 register. */
4200 if (reg->value == CPENC(3, 0, C0, C3, 4)
4201 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4202 return FALSE;
4203
4204 /* SSBS. Values are from aarch64_sys_regs. */
4205 if (reg->value == CPEN_(3,C2,6)
4206 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4207 return FALSE;
4208
4209 /* Virtualization host extensions: system registers. */
4210 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4211 || reg->value == CPENC (3, 4, C13, C0, 1)
4212 || reg->value == CPENC (3, 4, C14, C3, 0)
4213 || reg->value == CPENC (3, 4, C14, C3, 1)
4214 || reg->value == CPENC (3, 4, C14, C3, 2))
4215 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4216 return FALSE;
4217
4218 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4219 if ((reg->value == CPEN_ (5, C0, 0)
4220 || reg->value == CPEN_ (5, C0, 1)
4221 || reg->value == CPENC (3, 5, C1, C0, 0)
4222 || reg->value == CPENC (3, 5, C1, C0, 2)
4223 || reg->value == CPENC (3, 5, C2, C0, 0)
4224 || reg->value == CPENC (3, 5, C2, C0, 1)
4225 || reg->value == CPENC (3, 5, C2, C0, 2)
4226 || reg->value == CPENC (3, 5, C5, C1, 0)
4227 || reg->value == CPENC (3, 5, C5, C1, 1)
4228 || reg->value == CPENC (3, 5, C5, C2, 0)
4229 || reg->value == CPENC (3, 5, C6, C0, 0)
4230 || reg->value == CPENC (3, 5, C10, C2, 0)
4231 || reg->value == CPENC (3, 5, C10, C3, 0)
4232 || reg->value == CPENC (3, 5, C12, C0, 0)
4233 || reg->value == CPENC (3, 5, C13, C0, 1)
4234 || reg->value == CPENC (3, 5, C14, C1, 0))
4235 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4236 return FALSE;
4237
4238 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4239 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4240 || reg->value == CPENC (3, 5, C14, C2, 1)
4241 || reg->value == CPENC (3, 5, C14, C2, 2)
4242 || reg->value == CPENC (3, 5, C14, C3, 0)
4243 || reg->value == CPENC (3, 5, C14, C3, 1)
4244 || reg->value == CPENC (3, 5, C14, C3, 2))
4245 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4246 return FALSE;
4247
4248 /* ARMv8.2 features. */
4249
4250 /* ID_AA64MMFR2_EL1. */
4251 if (reg->value == CPENC (3, 0, C0, C7, 2)
4252 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4253 return FALSE;
4254
4255 /* PSTATE.UAO. */
4256 if (reg->value == CPEN_ (0, C2, 4)
4257 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4258 return FALSE;
4259
4260 /* RAS extension. */
4261
4262 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4263 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4264 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4265 || reg->value == CPENC (3, 0, C5, C3, 1)
4266 || reg->value == CPENC (3, 0, C5, C3, 2)
4267 || reg->value == CPENC (3, 0, C5, C3, 3)
4268 || reg->value == CPENC (3, 0, C5, C4, 0)
4269 || reg->value == CPENC (3, 0, C5, C4, 1)
4270 || reg->value == CPENC (3, 0, C5, C4, 2)
4271 || reg->value == CPENC (3, 0, C5, C4, 3)
4272 || reg->value == CPENC (3, 0, C5, C5, 0)
4273 || reg->value == CPENC (3, 0, C5, C5, 1))
4274 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4275 return FALSE;
4276
4277 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4278 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4279 || reg->value == CPENC (3, 0, C12, C1, 1)
4280 || reg->value == CPENC (3, 4, C12, C1, 1))
4281 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4282 return FALSE;
4283
4284 /* Statistical Profiling extension. */
4285 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4286 || reg->value == CPENC (3, 0, C9, C10, 1)
4287 || reg->value == CPENC (3, 0, C9, C10, 3)
4288 || reg->value == CPENC (3, 0, C9, C10, 7)
4289 || reg->value == CPENC (3, 0, C9, C9, 0)
4290 || reg->value == CPENC (3, 0, C9, C9, 2)
4291 || reg->value == CPENC (3, 0, C9, C9, 3)
4292 || reg->value == CPENC (3, 0, C9, C9, 4)
4293 || reg->value == CPENC (3, 0, C9, C9, 5)
4294 || reg->value == CPENC (3, 0, C9, C9, 6)
4295 || reg->value == CPENC (3, 0, C9, C9, 7)
4296 || reg->value == CPENC (3, 4, C9, C9, 0)
4297 || reg->value == CPENC (3, 5, C9, C9, 0))
4298 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4299 return FALSE;
4300
4301 /* ARMv8.3 Pointer authentication keys. */
4302 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4303 || reg->value == CPENC (3, 0, C2, C1, 1)
4304 || reg->value == CPENC (3, 0, C2, C1, 2)
4305 || reg->value == CPENC (3, 0, C2, C1, 3)
4306 || reg->value == CPENC (3, 0, C2, C2, 0)
4307 || reg->value == CPENC (3, 0, C2, C2, 1)
4308 || reg->value == CPENC (3, 0, C2, C2, 2)
4309 || reg->value == CPENC (3, 0, C2, C2, 3)
4310 || reg->value == CPENC (3, 0, C2, C3, 0)
4311 || reg->value == CPENC (3, 0, C2, C3, 1))
4312 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4313 return FALSE;
4314
4315 /* SVE. */
4316 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4317 || reg->value == CPENC (3, 0, C1, C2, 0)
4318 || reg->value == CPENC (3, 4, C1, C2, 0)
4319 || reg->value == CPENC (3, 6, C1, C2, 0)
4320 || reg->value == CPENC (3, 5, C1, C2, 0)
4321 || reg->value == CPENC (3, 0, C0, C0, 7))
4322 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4323 return FALSE;
4324
4325 /* ARMv8.4 features. */
4326
4327 /* PSTATE.DIT. */
4328 if (reg->value == CPEN_ (3, C2, 5)
4329 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4330 return FALSE;
4331
4332 /* Virtualization extensions. */
4333 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4334 || reg->value == CPENC(3, 4, C2, C6, 0)
4335 || reg->value == CPENC(3, 4, C14, C4, 0)
4336 || reg->value == CPENC(3, 4, C14, C4, 2)
4337 || reg->value == CPENC(3, 4, C14, C4, 1)
4338 || reg->value == CPENC(3, 4, C14, C5, 0)
4339 || reg->value == CPENC(3, 4, C14, C5, 2)
4340 || reg->value == CPENC(3, 4, C14, C5, 1)
4341 || reg->value == CPENC(3, 4, C1, C3, 1)
4342 || reg->value == CPENC(3, 4, C2, C2, 0))
4343 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4344 return FALSE;
4345
4346 /* ARMv8.4 TLB instructions. */
4347 if ((reg->value == CPENS (0, C8, C1, 0)
4348 || reg->value == CPENS (0, C8, C1, 1)
4349 || reg->value == CPENS (0, C8, C1, 2)
4350 || reg->value == CPENS (0, C8, C1, 3)
4351 || reg->value == CPENS (0, C8, C1, 5)
4352 || reg->value == CPENS (0, C8, C1, 7)
4353 || reg->value == CPENS (4, C8, C4, 0)
4354 || reg->value == CPENS (4, C8, C4, 4)
4355 || reg->value == CPENS (4, C8, C1, 1)
4356 || reg->value == CPENS (4, C8, C1, 5)
4357 || reg->value == CPENS (4, C8, C1, 6)
4358 || reg->value == CPENS (6, C8, C1, 1)
4359 || reg->value == CPENS (6, C8, C1, 5)
4360 || reg->value == CPENS (4, C8, C1, 0)
4361 || reg->value == CPENS (4, C8, C1, 4)
4362 || reg->value == CPENS (6, C8, C1, 0)
4363 || reg->value == CPENS (0, C8, C6, 1)
4364 || reg->value == CPENS (0, C8, C6, 3)
4365 || reg->value == CPENS (0, C8, C6, 5)
4366 || reg->value == CPENS (0, C8, C6, 7)
4367 || reg->value == CPENS (0, C8, C2, 1)
4368 || reg->value == CPENS (0, C8, C2, 3)
4369 || reg->value == CPENS (0, C8, C2, 5)
4370 || reg->value == CPENS (0, C8, C2, 7)
4371 || reg->value == CPENS (0, C8, C5, 1)
4372 || reg->value == CPENS (0, C8, C5, 3)
4373 || reg->value == CPENS (0, C8, C5, 5)
4374 || reg->value == CPENS (0, C8, C5, 7)
4375 || reg->value == CPENS (4, C8, C0, 2)
4376 || reg->value == CPENS (4, C8, C0, 6)
4377 || reg->value == CPENS (4, C8, C4, 2)
4378 || reg->value == CPENS (4, C8, C4, 6)
4379 || reg->value == CPENS (4, C8, C4, 3)
4380 || reg->value == CPENS (4, C8, C4, 7)
4381 || reg->value == CPENS (4, C8, C6, 1)
4382 || reg->value == CPENS (4, C8, C6, 5)
4383 || reg->value == CPENS (4, C8, C2, 1)
4384 || reg->value == CPENS (4, C8, C2, 5)
4385 || reg->value == CPENS (4, C8, C5, 1)
4386 || reg->value == CPENS (4, C8, C5, 5)
4387 || reg->value == CPENS (6, C8, C6, 1)
4388 || reg->value == CPENS (6, C8, C6, 5)
4389 || reg->value == CPENS (6, C8, C2, 1)
4390 || reg->value == CPENS (6, C8, C2, 5)
4391 || reg->value == CPENS (6, C8, C5, 1)
4392 || reg->value == CPENS (6, C8, C5, 5))
4393 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4394 return FALSE;
4395
4396 /* Random Number Instructions. For now they are available
4397 (and optional) only with ARMv8.5-A. */
4398 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4399 || reg->value == CPENC (3, 3, C2, C4, 1))
4400 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4401 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4402 return FALSE;
4403
4404 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4405 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4406 || reg->value == CPENC (3, 0, C6, C6, 1)
4407 || reg->value == CPENC (3, 0, C6, C5, 0)
4408 || reg->value == CPENC (3, 4, C6, C5, 0)
4409 || reg->value == CPENC (3, 6, C6, C6, 0)
4410 || reg->value == CPENC (3, 5, C6, C6, 0)
4411 || reg->value == CPENC (3, 0, C1, C0, 5)
4412 || reg->value == CPENC (3, 0, C1, C0, 6))
4413 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4414 return FALSE;
4415
4416 return TRUE;
4417 }
4418
4419 /* The CPENC below is fairly misleading, the fields
4420 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4421 by ins_pstatefield, which just shifts the value by the width of the fields
4422 in a loop. So if you CPENC them only the first value will be set, the rest
4423 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4424 value of 0b110000000001000000 (0x30040) while what you want is
4425 0b011010 (0x1a). */
4426 const aarch64_sys_reg aarch64_pstatefields [] =
4427 {
4428 { "spsel", 0x05, 0 },
4429 { "daifset", 0x1e, 0 },
4430 { "daifclr", 0x1f, 0 },
4431 { "pan", 0x04, F_ARCHEXT },
4432 { "uao", 0x03, F_ARCHEXT },
4433 { "ssbs", 0x19, F_ARCHEXT },
4434 { "dit", 0x1a, F_ARCHEXT },
4435 { "tco", 0x1c, F_ARCHEXT },
4436 { 0, CPENC(0,0,0,0,0), 0 },
4437 };
4438
4439 bfd_boolean
4440 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4441 const aarch64_sys_reg *reg)
4442 {
4443 if (!(reg->flags & F_ARCHEXT))
4444 return TRUE;
4445
4446 /* PAN. Values are from aarch64_pstatefields. */
4447 if (reg->value == 0x04
4448 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4449 return FALSE;
4450
4451 /* UAO. Values are from aarch64_pstatefields. */
4452 if (reg->value == 0x03
4453 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4454 return FALSE;
4455
4456 /* SSBS. Values are from aarch64_pstatefields. */
4457 if (reg->value == 0x19
4458 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4459 return FALSE;
4460
4461 /* DIT. Values are from aarch64_pstatefields. */
4462 if (reg->value == 0x1a
4463 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4464 return FALSE;
4465
4466 /* TCO. Values are from aarch64_pstatefields. */
4467 if (reg->value == 0x1c
4468 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4469 return FALSE;
4470
4471 return TRUE;
4472 }
4473
4474 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4475 {
4476 { "ialluis", CPENS(0,C7,C1,0), 0 },
4477 { "iallu", CPENS(0,C7,C5,0), 0 },
4478 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4479 { 0, CPENS(0,0,0,0), 0 }
4480 };
4481
4482 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4483 {
4484 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4485 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4486 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4487 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4488 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4489 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4490 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4491 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4492 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4493 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4494 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4495 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4496 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4497 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4498 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4499 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4500 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4501 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4502 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4503 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4504 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4505 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4506 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4507 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4508 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4509 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4510 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4511 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4512 { 0, CPENS(0,0,0,0), 0 }
4513 };
4514
4515 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4516 {
4517 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4518 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4519 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4520 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4521 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4522 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4523 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4524 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4525 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4526 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4527 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4528 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4529 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4530 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4531 { 0, CPENS(0,0,0,0), 0 }
4532 };
4533
4534 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4535 {
4536 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4537 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4538 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4539 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4540 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4541 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4542 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4543 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4544 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4545 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4546 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4547 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4548 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4549 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4550 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4551 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4552 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4553 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4554 { "alle2", CPENS(4,C8,C7,0), 0 },
4555 { "alle2is", CPENS(4,C8,C3,0), 0 },
4556 { "alle1", CPENS(4,C8,C7,4), 0 },
4557 { "alle1is", CPENS(4,C8,C3,4), 0 },
4558 { "alle3", CPENS(6,C8,C7,0), 0 },
4559 { "alle3is", CPENS(6,C8,C3,0), 0 },
4560 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4561 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4562 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4563 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4564 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4565 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4566 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4567 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4568
4569 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4570 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4571 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4572 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4573 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4574 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4575 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4576 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4577 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4578 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4579 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4580 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4581 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4582 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4583 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4584 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4585
4586 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4587 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4588 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4589 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4590 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4591 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4592 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4593 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4594 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4595 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4596 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4597 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4598 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4599 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4600 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4601 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4602 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4603 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4604 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4605 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4606 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4607 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4608 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4609 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4610 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4611 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4612 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4613 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4614 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4615 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4616
4617 { 0, CPENS(0,0,0,0), 0 }
4618 };
4619
4620 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4621 {
4622 /* RCTX is somewhat unique in a way that it has different values
4623 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4624 Thus op2 is masked out and instead encoded directly in the
4625 aarch64_opcode_table entries for the respective instructions. */
4626 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4627
4628 { 0, CPENS(0,0,0,0), 0 }
4629 };
4630
4631 bfd_boolean
4632 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4633 {
4634 return (sys_ins_reg->flags & F_HASXT) != 0;
4635 }
4636
4637 extern bfd_boolean
4638 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4639 const aarch64_sys_ins_reg *reg)
4640 {
4641 if (!(reg->flags & F_ARCHEXT))
4642 return TRUE;
4643
4644 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4645 if (reg->value == CPENS (3, C7, C12, 1)
4646 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4647 return FALSE;
4648
4649 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4650 if (reg->value == CPENS (3, C7, C13, 1)
4651 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4652 return FALSE;
4653
4654 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4655 if ((reg->value == CPENS (0, C7, C6, 3)
4656 || reg->value == CPENS (0, C7, C6, 4)
4657 || reg->value == CPENS (0, C7, C10, 4)
4658 || reg->value == CPENS (0, C7, C14, 4)
4659 || reg->value == CPENS (3, C7, C10, 3)
4660 || reg->value == CPENS (3, C7, C12, 3)
4661 || reg->value == CPENS (3, C7, C13, 3)
4662 || reg->value == CPENS (3, C7, C14, 3)
4663 || reg->value == CPENS (3, C7, C4, 3)
4664 || reg->value == CPENS (0, C7, C6, 5)
4665 || reg->value == CPENS (0, C7, C6, 6)
4666 || reg->value == CPENS (0, C7, C10, 6)
4667 || reg->value == CPENS (0, C7, C14, 6)
4668 || reg->value == CPENS (3, C7, C10, 5)
4669 || reg->value == CPENS (3, C7, C12, 5)
4670 || reg->value == CPENS (3, C7, C13, 5)
4671 || reg->value == CPENS (3, C7, C14, 5)
4672 || reg->value == CPENS (3, C7, C4, 4))
4673 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4674 return FALSE;
4675
4676 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4677 if ((reg->value == CPENS (0, C7, C9, 0)
4678 || reg->value == CPENS (0, C7, C9, 1))
4679 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4680 return FALSE;
4681
4682 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4683 if (reg->value == CPENS (3, C7, C3, 0)
4684 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4685 return FALSE;
4686
4687 return TRUE;
4688 }
4689
4690 #undef C0
4691 #undef C1
4692 #undef C2
4693 #undef C3
4694 #undef C4
4695 #undef C5
4696 #undef C6
4697 #undef C7
4698 #undef C8
4699 #undef C9
4700 #undef C10
4701 #undef C11
4702 #undef C12
4703 #undef C13
4704 #undef C14
4705 #undef C15
4706
4707 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4708 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4709
4710 static enum err_type
4711 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4712 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4713 bfd_boolean encoding ATTRIBUTE_UNUSED,
4714 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4715 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4716 {
4717 int t = BITS (insn, 4, 0);
4718 int n = BITS (insn, 9, 5);
4719 int t2 = BITS (insn, 14, 10);
4720
4721 if (BIT (insn, 23))
4722 {
4723 /* Write back enabled. */
4724 if ((t == n || t2 == n) && n != 31)
4725 return ERR_UND;
4726 }
4727
4728 if (BIT (insn, 22))
4729 {
4730 /* Load */
4731 if (t == t2)
4732 return ERR_UND;
4733 }
4734
4735 return ERR_OK;
4736 }
4737
4738 /* Verifier for vector by element 3 operands functions where the
4739 conditions `if sz:L == 11 then UNDEFINED` holds. */
4740
4741 static enum err_type
4742 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4743 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4744 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4745 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4746 {
4747 const aarch64_insn undef_pattern = 0x3;
4748 aarch64_insn value;
4749
4750 assert (inst->opcode);
4751 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4752 value = encoding ? inst->value : insn;
4753 assert (value);
4754
4755 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4756 return ERR_UND;
4757
4758 return ERR_OK;
4759 }
4760
4761 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4762 If INST is NULL the given insn_sequence is cleared and the sequence is left
4763 uninitialized. */
4764
4765 void
4766 init_insn_sequence (const struct aarch64_inst *inst,
4767 aarch64_instr_sequence *insn_sequence)
4768 {
4769 int num_req_entries = 0;
4770 insn_sequence->next_insn = 0;
4771 insn_sequence->num_insns = num_req_entries;
4772 if (insn_sequence->instr)
4773 XDELETE (insn_sequence->instr);
4774 insn_sequence->instr = NULL;
4775
4776 if (inst)
4777 {
4778 insn_sequence->instr = XNEW (aarch64_inst);
4779 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4780 }
4781
4782 /* Handle all the cases here. May need to think of something smarter than
4783 a giant if/else chain if this grows. At that time, a lookup table may be
4784 best. */
4785 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4786 num_req_entries = 1;
4787
4788 if (insn_sequence->current_insns)
4789 XDELETEVEC (insn_sequence->current_insns);
4790 insn_sequence->current_insns = NULL;
4791
4792 if (num_req_entries != 0)
4793 {
4794 size_t size = num_req_entries * sizeof (aarch64_inst);
4795 insn_sequence->current_insns
4796 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4797 memset (insn_sequence->current_insns, 0, size);
4798 }
4799 }
4800
4801
4802 /* This function verifies that the instruction INST adheres to its specified
4803 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4804 returned and MISMATCH_DETAIL contains the reason why verification failed.
4805
4806 The function is called both during assembly and disassembly. If assembling
4807 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4808 and will contain the PC of the current instruction w.r.t to the section.
4809
4810 If ENCODING and PC=0 then you are at a start of a section. The constraints
4811 are verified against the given state insn_sequence which is updated as it
4812 transitions through the verification. */
4813
4814 enum err_type
4815 verify_constraints (const struct aarch64_inst *inst,
4816 const aarch64_insn insn ATTRIBUTE_UNUSED,
4817 bfd_vma pc,
4818 bfd_boolean encoding,
4819 aarch64_operand_error *mismatch_detail,
4820 aarch64_instr_sequence *insn_sequence)
4821 {
4822 assert (inst);
4823 assert (inst->opcode);
4824
4825 const struct aarch64_opcode *opcode = inst->opcode;
4826 if (!opcode->constraints && !insn_sequence->instr)
4827 return ERR_OK;
4828
4829 assert (insn_sequence);
4830
4831 enum err_type res = ERR_OK;
4832
4833 /* This instruction puts a constraint on the insn_sequence. */
4834 if (opcode->flags & F_SCAN)
4835 {
4836 if (insn_sequence->instr)
4837 {
4838 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4839 mismatch_detail->error = _("instruction opens new dependency "
4840 "sequence without ending previous one");
4841 mismatch_detail->index = -1;
4842 mismatch_detail->non_fatal = TRUE;
4843 res = ERR_VFI;
4844 }
4845
4846 init_insn_sequence (inst, insn_sequence);
4847 return res;
4848 }
4849
4850 /* Verify constraints on an existing sequence. */
4851 if (insn_sequence->instr)
4852 {
4853 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4854 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4855 closed a previous one that we should have. */
4856 if (!encoding && pc == 0)
4857 {
4858 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4859 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4860 mismatch_detail->index = -1;
4861 mismatch_detail->non_fatal = TRUE;
4862 res = ERR_VFI;
4863 /* Reset the sequence. */
4864 init_insn_sequence (NULL, insn_sequence);
4865 return res;
4866 }
4867
4868 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4869 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4870 {
4871 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4872 instruction for better error messages. */
4873 if (!opcode->avariant
4874 || !(*opcode->avariant &
4875 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4876 {
4877 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4878 mismatch_detail->error = _("SVE instruction expected after "
4879 "`movprfx'");
4880 mismatch_detail->index = -1;
4881 mismatch_detail->non_fatal = TRUE;
4882 res = ERR_VFI;
4883 goto done;
4884 }
4885
4886 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4887 instruction that is allowed to be used with a MOVPRFX. */
4888 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4889 {
4890 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4891 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4892 "expected");
4893 mismatch_detail->index = -1;
4894 mismatch_detail->non_fatal = TRUE;
4895 res = ERR_VFI;
4896 goto done;
4897 }
4898
4899 /* Next check for usage of the predicate register. */
4900 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4901 aarch64_opnd_info blk_pred, inst_pred;
4902 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4903 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4904 bfd_boolean predicated = FALSE;
4905 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4906
4907 /* Determine if the movprfx instruction used is predicated or not. */
4908 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4909 {
4910 predicated = TRUE;
4911 blk_pred = insn_sequence->instr->operands[1];
4912 }
4913
4914 unsigned char max_elem_size = 0;
4915 unsigned char current_elem_size;
4916 int num_op_used = 0, last_op_usage = 0;
4917 int i, inst_pred_idx = -1;
4918 int num_ops = aarch64_num_of_operands (opcode);
4919 for (i = 0; i < num_ops; i++)
4920 {
4921 aarch64_opnd_info inst_op = inst->operands[i];
4922 switch (inst_op.type)
4923 {
4924 case AARCH64_OPND_SVE_Zd:
4925 case AARCH64_OPND_SVE_Zm_5:
4926 case AARCH64_OPND_SVE_Zm_16:
4927 case AARCH64_OPND_SVE_Zn:
4928 case AARCH64_OPND_SVE_Zt:
4929 case AARCH64_OPND_SVE_Vm:
4930 case AARCH64_OPND_SVE_Vn:
4931 case AARCH64_OPND_Va:
4932 case AARCH64_OPND_Vn:
4933 case AARCH64_OPND_Vm:
4934 case AARCH64_OPND_Sn:
4935 case AARCH64_OPND_Sm:
4936 case AARCH64_OPND_Rn:
4937 case AARCH64_OPND_Rm:
4938 case AARCH64_OPND_Rn_SP:
4939 case AARCH64_OPND_Rt_SP:
4940 case AARCH64_OPND_Rm_SP:
4941 if (inst_op.reg.regno == blk_dest.reg.regno)
4942 {
4943 num_op_used++;
4944 last_op_usage = i;
4945 }
4946 current_elem_size
4947 = aarch64_get_qualifier_esize (inst_op.qualifier);
4948 if (current_elem_size > max_elem_size)
4949 max_elem_size = current_elem_size;
4950 break;
4951 case AARCH64_OPND_SVE_Pd:
4952 case AARCH64_OPND_SVE_Pg3:
4953 case AARCH64_OPND_SVE_Pg4_5:
4954 case AARCH64_OPND_SVE_Pg4_10:
4955 case AARCH64_OPND_SVE_Pg4_16:
4956 case AARCH64_OPND_SVE_Pm:
4957 case AARCH64_OPND_SVE_Pn:
4958 case AARCH64_OPND_SVE_Pt:
4959 inst_pred = inst_op;
4960 inst_pred_idx = i;
4961 break;
4962 default:
4963 break;
4964 }
4965 }
4966
4967 assert (max_elem_size != 0);
4968 aarch64_opnd_info inst_dest = inst->operands[0];
4969 /* Determine the size that should be used to compare against the
4970 movprfx size. */
4971 current_elem_size
4972 = opcode->constraints & C_MAX_ELEM
4973 ? max_elem_size
4974 : aarch64_get_qualifier_esize (inst_dest.qualifier);
4975
4976 /* If movprfx is predicated do some extra checks. */
4977 if (predicated)
4978 {
4979 /* The instruction must be predicated. */
4980 if (inst_pred_idx < 0)
4981 {
4982 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4983 mismatch_detail->error = _("predicated instruction expected "
4984 "after `movprfx'");
4985 mismatch_detail->index = -1;
4986 mismatch_detail->non_fatal = TRUE;
4987 res = ERR_VFI;
4988 goto done;
4989 }
4990
4991 /* The instruction must have a merging predicate. */
4992 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
4993 {
4994 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4995 mismatch_detail->error = _("merging predicate expected due "
4996 "to preceding `movprfx'");
4997 mismatch_detail->index = inst_pred_idx;
4998 mismatch_detail->non_fatal = TRUE;
4999 res = ERR_VFI;
5000 goto done;
5001 }
5002
5003 /* The same register must be used in instruction. */
5004 if (blk_pred.reg.regno != inst_pred.reg.regno)
5005 {
5006 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5007 mismatch_detail->error = _("predicate register differs "
5008 "from that in preceding "
5009 "`movprfx'");
5010 mismatch_detail->index = inst_pred_idx;
5011 mismatch_detail->non_fatal = TRUE;
5012 res = ERR_VFI;
5013 goto done;
5014 }
5015 }
5016
5017 /* Destructive operations by definition must allow one usage of the
5018 same register. */
5019 int allowed_usage
5020 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5021
5022 /* Operand is not used at all. */
5023 if (num_op_used == 0)
5024 {
5025 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5026 mismatch_detail->error = _("output register of preceding "
5027 "`movprfx' not used in current "
5028 "instruction");
5029 mismatch_detail->index = 0;
5030 mismatch_detail->non_fatal = TRUE;
5031 res = ERR_VFI;
5032 goto done;
5033 }
5034
5035 /* We now know it's used, now determine exactly where it's used. */
5036 if (blk_dest.reg.regno != inst_dest.reg.regno)
5037 {
5038 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5039 mismatch_detail->error = _("output register of preceding "
5040 "`movprfx' expected as output");
5041 mismatch_detail->index = 0;
5042 mismatch_detail->non_fatal = TRUE;
5043 res = ERR_VFI;
5044 goto done;
5045 }
5046
5047 /* Operand used more than allowed for the specific opcode type. */
5048 if (num_op_used > allowed_usage)
5049 {
5050 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5051 mismatch_detail->error = _("output register of preceding "
5052 "`movprfx' used as input");
5053 mismatch_detail->index = last_op_usage;
5054 mismatch_detail->non_fatal = TRUE;
5055 res = ERR_VFI;
5056 goto done;
5057 }
5058
5059 /* Now the only thing left is the qualifiers checks. The register
5060 must have the same maximum element size. */
5061 if (inst_dest.qualifier
5062 && blk_dest.qualifier
5063 && current_elem_size
5064 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5065 {
5066 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5067 mismatch_detail->error = _("register size not compatible with "
5068 "previous `movprfx'");
5069 mismatch_detail->index = 0;
5070 mismatch_detail->non_fatal = TRUE;
5071 res = ERR_VFI;
5072 goto done;
5073 }
5074 }
5075
5076 done:
5077 /* Add the new instruction to the sequence. */
5078 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5079 inst, sizeof (aarch64_inst));
5080
5081 /* Check if sequence is now full. */
5082 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5083 {
5084 /* Sequence is full, but we don't have anything special to do for now,
5085 so clear and reset it. */
5086 init_insn_sequence (NULL, insn_sequence);
5087 }
5088 }
5089
5090 return res;
5091 }
5092
5093
5094 /* Return true if VALUE cannot be moved into an SVE register using DUP
5095 (with any element size, not just ESIZE) and if using DUPM would
5096 therefore be OK. ESIZE is the number of bytes in the immediate. */
5097
5098 bfd_boolean
5099 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5100 {
5101 int64_t svalue = uvalue;
5102 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5103
5104 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5105 return FALSE;
5106 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5107 {
5108 svalue = (int32_t) uvalue;
5109 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5110 {
5111 svalue = (int16_t) uvalue;
5112 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5113 return FALSE;
5114 }
5115 }
5116 if ((svalue & 0xff) == 0)
5117 svalue /= 256;
5118 return svalue < -128 || svalue >= 128;
5119 }
5120
5121 /* Include the opcode description table as well as the operand description
5122 table. */
5123 #define VERIFIER(x) verify_##x
5124 #include "aarch64-tbl.h"
This page took 0.166915 seconds and 5 git commands to generate.