[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
283 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
284 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
285 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
286 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
287 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
288 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
289 { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
290 };
291
292 enum aarch64_operand_class
293 aarch64_get_operand_class (enum aarch64_opnd type)
294 {
295 return aarch64_operands[type].op_class;
296 }
297
298 const char *
299 aarch64_get_operand_name (enum aarch64_opnd type)
300 {
301 return aarch64_operands[type].name;
302 }
303
304 /* Get operand description string.
305 This is usually for the diagnosis purpose. */
306 const char *
307 aarch64_get_operand_desc (enum aarch64_opnd type)
308 {
309 return aarch64_operands[type].desc;
310 }
311
312 /* Table of all conditional affixes. */
313 const aarch64_cond aarch64_conds[16] =
314 {
315 {{"eq"}, 0x0},
316 {{"ne"}, 0x1},
317 {{"cs", "hs"}, 0x2},
318 {{"cc", "lo", "ul"}, 0x3},
319 {{"mi"}, 0x4},
320 {{"pl"}, 0x5},
321 {{"vs"}, 0x6},
322 {{"vc"}, 0x7},
323 {{"hi"}, 0x8},
324 {{"ls"}, 0x9},
325 {{"ge"}, 0xa},
326 {{"lt"}, 0xb},
327 {{"gt"}, 0xc},
328 {{"le"}, 0xd},
329 {{"al"}, 0xe},
330 {{"nv"}, 0xf},
331 };
332
333 const aarch64_cond *
334 get_cond_from_value (aarch64_insn value)
335 {
336 assert (value < 16);
337 return &aarch64_conds[(unsigned int) value];
338 }
339
340 const aarch64_cond *
341 get_inverted_cond (const aarch64_cond *cond)
342 {
343 return &aarch64_conds[cond->value ^ 0x1];
344 }
345
346 /* Table describing the operand extension/shifting operators; indexed by
347 enum aarch64_modifier_kind.
348
349 The value column provides the most common values for encoding modifiers,
350 which enables table-driven encoding/decoding for the modifiers. */
351 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
352 {
353 {"none", 0x0},
354 {"msl", 0x0},
355 {"ror", 0x3},
356 {"asr", 0x2},
357 {"lsr", 0x1},
358 {"lsl", 0x0},
359 {"uxtb", 0x0},
360 {"uxth", 0x1},
361 {"uxtw", 0x2},
362 {"uxtx", 0x3},
363 {"sxtb", 0x4},
364 {"sxth", 0x5},
365 {"sxtw", 0x6},
366 {"sxtx", 0x7},
367 {"mul", 0x0},
368 {"mul vl", 0x0},
369 {NULL, 0},
370 };
371
372 enum aarch64_modifier_kind
373 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
374 {
375 return desc - aarch64_operand_modifiers;
376 }
377
378 aarch64_insn
379 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
380 {
381 return aarch64_operand_modifiers[kind].value;
382 }
383
384 enum aarch64_modifier_kind
385 aarch64_get_operand_modifier_from_value (aarch64_insn value,
386 bfd_boolean extend_p)
387 {
388 if (extend_p == TRUE)
389 return AARCH64_MOD_UXTB + value;
390 else
391 return AARCH64_MOD_LSL - value;
392 }
393
394 bfd_boolean
395 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
396 {
397 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
398 ? TRUE : FALSE;
399 }
400
401 static inline bfd_boolean
402 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
403 {
404 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
405 ? TRUE : FALSE;
406 }
407
408 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
409 {
410 { "#0x00", 0x0 },
411 { "oshld", 0x1 },
412 { "oshst", 0x2 },
413 { "osh", 0x3 },
414 { "#0x04", 0x4 },
415 { "nshld", 0x5 },
416 { "nshst", 0x6 },
417 { "nsh", 0x7 },
418 { "#0x08", 0x8 },
419 { "ishld", 0x9 },
420 { "ishst", 0xa },
421 { "ish", 0xb },
422 { "#0x0c", 0xc },
423 { "ld", 0xd },
424 { "st", 0xe },
425 { "sy", 0xf },
426 };
427
428 /* Table describing the operands supported by the aliases of the HINT
429 instruction.
430
431 The name column is the operand that is accepted for the alias. The value
432 column is the hint number of the alias. The list of operands is terminated
433 by NULL in the name column. */
434
435 const struct aarch64_name_value_pair aarch64_hint_options[] =
436 {
437 { "csync", 0x11 }, /* PSB CSYNC. */
438 { NULL, 0x0 },
439 };
440
441 /* op -> op: load = 0 instruction = 1 store = 2
442 l -> level: 1-3
443 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
444 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
445 const struct aarch64_name_value_pair aarch64_prfops[32] =
446 {
447 { "pldl1keep", B(0, 1, 0) },
448 { "pldl1strm", B(0, 1, 1) },
449 { "pldl2keep", B(0, 2, 0) },
450 { "pldl2strm", B(0, 2, 1) },
451 { "pldl3keep", B(0, 3, 0) },
452 { "pldl3strm", B(0, 3, 1) },
453 { NULL, 0x06 },
454 { NULL, 0x07 },
455 { "plil1keep", B(1, 1, 0) },
456 { "plil1strm", B(1, 1, 1) },
457 { "plil2keep", B(1, 2, 0) },
458 { "plil2strm", B(1, 2, 1) },
459 { "plil3keep", B(1, 3, 0) },
460 { "plil3strm", B(1, 3, 1) },
461 { NULL, 0x0e },
462 { NULL, 0x0f },
463 { "pstl1keep", B(2, 1, 0) },
464 { "pstl1strm", B(2, 1, 1) },
465 { "pstl2keep", B(2, 2, 0) },
466 { "pstl2strm", B(2, 2, 1) },
467 { "pstl3keep", B(2, 3, 0) },
468 { "pstl3strm", B(2, 3, 1) },
469 { NULL, 0x16 },
470 { NULL, 0x17 },
471 { NULL, 0x18 },
472 { NULL, 0x19 },
473 { NULL, 0x1a },
474 { NULL, 0x1b },
475 { NULL, 0x1c },
476 { NULL, 0x1d },
477 { NULL, 0x1e },
478 { NULL, 0x1f },
479 };
480 #undef B
481 \f
482 /* Utilities on value constraint. */
483
484 static inline int
485 value_in_range_p (int64_t value, int low, int high)
486 {
487 return (value >= low && value <= high) ? 1 : 0;
488 }
489
490 /* Return true if VALUE is a multiple of ALIGN. */
491 static inline int
492 value_aligned_p (int64_t value, int align)
493 {
494 return (value % align) == 0;
495 }
496
497 /* A signed value fits in a field. */
498 static inline int
499 value_fit_signed_field_p (int64_t value, unsigned width)
500 {
501 assert (width < 32);
502 if (width < sizeof (value) * 8)
503 {
504 int64_t lim = (int64_t)1 << (width - 1);
505 if (value >= -lim && value < lim)
506 return 1;
507 }
508 return 0;
509 }
510
511 /* An unsigned value fits in a field. */
512 static inline int
513 value_fit_unsigned_field_p (int64_t value, unsigned width)
514 {
515 assert (width < 32);
516 if (width < sizeof (value) * 8)
517 {
518 int64_t lim = (int64_t)1 << width;
519 if (value >= 0 && value < lim)
520 return 1;
521 }
522 return 0;
523 }
524
525 /* Return 1 if OPERAND is SP or WSP. */
526 int
527 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
528 {
529 return ((aarch64_get_operand_class (operand->type)
530 == AARCH64_OPND_CLASS_INT_REG)
531 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
532 && operand->reg.regno == 31);
533 }
534
535 /* Return 1 if OPERAND is XZR or WZP. */
536 int
537 aarch64_zero_register_p (const aarch64_opnd_info *operand)
538 {
539 return ((aarch64_get_operand_class (operand->type)
540 == AARCH64_OPND_CLASS_INT_REG)
541 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
542 && operand->reg.regno == 31);
543 }
544
545 /* Return true if the operand *OPERAND that has the operand code
546 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
547 qualified by the qualifier TARGET. */
548
549 static inline int
550 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
551 aarch64_opnd_qualifier_t target)
552 {
553 switch (operand->qualifier)
554 {
555 case AARCH64_OPND_QLF_W:
556 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
557 return 1;
558 break;
559 case AARCH64_OPND_QLF_X:
560 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
561 return 1;
562 break;
563 case AARCH64_OPND_QLF_WSP:
564 if (target == AARCH64_OPND_QLF_W
565 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
566 return 1;
567 break;
568 case AARCH64_OPND_QLF_SP:
569 if (target == AARCH64_OPND_QLF_X
570 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
571 return 1;
572 break;
573 default:
574 break;
575 }
576
577 return 0;
578 }
579
580 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
581 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
582
583 Return NIL if more than one expected qualifiers are found. */
584
585 aarch64_opnd_qualifier_t
586 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
587 int idx,
588 const aarch64_opnd_qualifier_t known_qlf,
589 int known_idx)
590 {
591 int i, saved_i;
592
593 /* Special case.
594
595 When the known qualifier is NIL, we have to assume that there is only
596 one qualifier sequence in the *QSEQ_LIST and return the corresponding
597 qualifier directly. One scenario is that for instruction
598 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
599 which has only one possible valid qualifier sequence
600 NIL, S_D
601 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
602 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
603
604 Because the qualifier NIL has dual roles in the qualifier sequence:
605 it can mean no qualifier for the operand, or the qualifer sequence is
606 not in use (when all qualifiers in the sequence are NILs), we have to
607 handle this special case here. */
608 if (known_qlf == AARCH64_OPND_NIL)
609 {
610 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
611 return qseq_list[0][idx];
612 }
613
614 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
615 {
616 if (qseq_list[i][known_idx] == known_qlf)
617 {
618 if (saved_i != -1)
619 /* More than one sequences are found to have KNOWN_QLF at
620 KNOWN_IDX. */
621 return AARCH64_OPND_NIL;
622 saved_i = i;
623 }
624 }
625
626 return qseq_list[saved_i][idx];
627 }
628
629 enum operand_qualifier_kind
630 {
631 OQK_NIL,
632 OQK_OPD_VARIANT,
633 OQK_VALUE_IN_RANGE,
634 OQK_MISC,
635 };
636
637 /* Operand qualifier description. */
638 struct operand_qualifier_data
639 {
640 /* The usage of the three data fields depends on the qualifier kind. */
641 int data0;
642 int data1;
643 int data2;
644 /* Description. */
645 const char *desc;
646 /* Kind. */
647 enum operand_qualifier_kind kind;
648 };
649
650 /* Indexed by the operand qualifier enumerators. */
651 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
652 {
653 {0, 0, 0, "NIL", OQK_NIL},
654
655 /* Operand variant qualifiers.
656 First 3 fields:
657 element size, number of elements and common value for encoding. */
658
659 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
660 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
661 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
662 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
663
664 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
665 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
666 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
667 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
668 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
669
670 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
671 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
672 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
673 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
674 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
675 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
676 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
677 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
678 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
679 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
680
681 {0, 0, 0, "z", OQK_OPD_VARIANT},
682 {0, 0, 0, "m", OQK_OPD_VARIANT},
683
684 /* Qualifiers constraining the value range.
685 First 3 fields:
686 Lower bound, higher bound, unused. */
687
688 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
689 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
690 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
691 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
692 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
693 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
694
695 /* Qualifiers for miscellaneous purpose.
696 First 3 fields:
697 unused, unused and unused. */
698
699 {0, 0, 0, "lsl", 0},
700 {0, 0, 0, "msl", 0},
701
702 {0, 0, 0, "retrieving", 0},
703 };
704
705 static inline bfd_boolean
706 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
707 {
708 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
709 ? TRUE : FALSE;
710 }
711
712 static inline bfd_boolean
713 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
714 {
715 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
716 ? TRUE : FALSE;
717 }
718
719 const char*
720 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
721 {
722 return aarch64_opnd_qualifiers[qualifier].desc;
723 }
724
725 /* Given an operand qualifier, return the expected data element size
726 of a qualified operand. */
727 unsigned char
728 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
729 {
730 assert (operand_variant_qualifier_p (qualifier) == TRUE);
731 return aarch64_opnd_qualifiers[qualifier].data0;
732 }
733
734 unsigned char
735 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
736 {
737 assert (operand_variant_qualifier_p (qualifier) == TRUE);
738 return aarch64_opnd_qualifiers[qualifier].data1;
739 }
740
741 aarch64_insn
742 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
743 {
744 assert (operand_variant_qualifier_p (qualifier) == TRUE);
745 return aarch64_opnd_qualifiers[qualifier].data2;
746 }
747
748 static int
749 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
750 {
751 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
752 return aarch64_opnd_qualifiers[qualifier].data0;
753 }
754
755 static int
756 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
757 {
758 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
759 return aarch64_opnd_qualifiers[qualifier].data1;
760 }
761
762 #ifdef DEBUG_AARCH64
763 void
764 aarch64_verbose (const char *str, ...)
765 {
766 va_list ap;
767 va_start (ap, str);
768 printf ("#### ");
769 vprintf (str, ap);
770 printf ("\n");
771 va_end (ap);
772 }
773
774 static inline void
775 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
776 {
777 int i;
778 printf ("#### \t");
779 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
780 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
781 printf ("\n");
782 }
783
784 static void
785 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
786 const aarch64_opnd_qualifier_t *qualifier)
787 {
788 int i;
789 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
790
791 aarch64_verbose ("dump_match_qualifiers:");
792 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
793 curr[i] = opnd[i].qualifier;
794 dump_qualifier_sequence (curr);
795 aarch64_verbose ("against");
796 dump_qualifier_sequence (qualifier);
797 }
798 #endif /* DEBUG_AARCH64 */
799
800 /* TODO improve this, we can have an extra field at the runtime to
801 store the number of operands rather than calculating it every time. */
802
803 int
804 aarch64_num_of_operands (const aarch64_opcode *opcode)
805 {
806 int i = 0;
807 const enum aarch64_opnd *opnds = opcode->operands;
808 while (opnds[i++] != AARCH64_OPND_NIL)
809 ;
810 --i;
811 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
812 return i;
813 }
814
815 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
816 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
817
818 N.B. on the entry, it is very likely that only some operands in *INST
819 have had their qualifiers been established.
820
821 If STOP_AT is not -1, the function will only try to match
822 the qualifier sequence for operands before and including the operand
823 of index STOP_AT; and on success *RET will only be filled with the first
824 (STOP_AT+1) qualifiers.
825
826 A couple examples of the matching algorithm:
827
828 X,W,NIL should match
829 X,W,NIL
830
831 NIL,NIL should match
832 X ,NIL
833
834 Apart from serving the main encoding routine, this can also be called
835 during or after the operand decoding. */
836
837 int
838 aarch64_find_best_match (const aarch64_inst *inst,
839 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
840 int stop_at, aarch64_opnd_qualifier_t *ret)
841 {
842 int found = 0;
843 int i, num_opnds;
844 const aarch64_opnd_qualifier_t *qualifiers;
845
846 num_opnds = aarch64_num_of_operands (inst->opcode);
847 if (num_opnds == 0)
848 {
849 DEBUG_TRACE ("SUCCEED: no operand");
850 return 1;
851 }
852
853 if (stop_at < 0 || stop_at >= num_opnds)
854 stop_at = num_opnds - 1;
855
856 /* For each pattern. */
857 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
858 {
859 int j;
860 qualifiers = *qualifiers_list;
861
862 /* Start as positive. */
863 found = 1;
864
865 DEBUG_TRACE ("%d", i);
866 #ifdef DEBUG_AARCH64
867 if (debug_dump)
868 dump_match_qualifiers (inst->operands, qualifiers);
869 #endif
870
871 /* Most opcodes has much fewer patterns in the list.
872 First NIL qualifier indicates the end in the list. */
873 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
874 {
875 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
876 if (i)
877 found = 0;
878 break;
879 }
880
881 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
882 {
883 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
884 {
885 /* Either the operand does not have qualifier, or the qualifier
886 for the operand needs to be deduced from the qualifier
887 sequence.
888 In the latter case, any constraint checking related with
889 the obtained qualifier should be done later in
890 operand_general_constraint_met_p. */
891 continue;
892 }
893 else if (*qualifiers != inst->operands[j].qualifier)
894 {
895 /* Unless the target qualifier can also qualify the operand
896 (which has already had a non-nil qualifier), non-equal
897 qualifiers are generally un-matched. */
898 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
899 continue;
900 else
901 {
902 found = 0;
903 break;
904 }
905 }
906 else
907 continue; /* Equal qualifiers are certainly matched. */
908 }
909
910 /* Qualifiers established. */
911 if (found == 1)
912 break;
913 }
914
915 if (found == 1)
916 {
917 /* Fill the result in *RET. */
918 int j;
919 qualifiers = *qualifiers_list;
920
921 DEBUG_TRACE ("complete qualifiers using list %d", i);
922 #ifdef DEBUG_AARCH64
923 if (debug_dump)
924 dump_qualifier_sequence (qualifiers);
925 #endif
926
927 for (j = 0; j <= stop_at; ++j, ++qualifiers)
928 ret[j] = *qualifiers;
929 for (; j < AARCH64_MAX_OPND_NUM; ++j)
930 ret[j] = AARCH64_OPND_QLF_NIL;
931
932 DEBUG_TRACE ("SUCCESS");
933 return 1;
934 }
935
936 DEBUG_TRACE ("FAIL");
937 return 0;
938 }
939
940 /* Operand qualifier matching and resolving.
941
942 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
943 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
944
945 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
946 succeeds. */
947
948 static int
949 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
950 {
951 int i, nops;
952 aarch64_opnd_qualifier_seq_t qualifiers;
953
954 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
955 qualifiers))
956 {
957 DEBUG_TRACE ("matching FAIL");
958 return 0;
959 }
960
961 if (inst->opcode->flags & F_STRICT)
962 {
963 /* Require an exact qualifier match, even for NIL qualifiers. */
964 nops = aarch64_num_of_operands (inst->opcode);
965 for (i = 0; i < nops; ++i)
966 if (inst->operands[i].qualifier != qualifiers[i])
967 return FALSE;
968 }
969
970 /* Update the qualifiers. */
971 if (update_p == TRUE)
972 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
973 {
974 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
975 break;
976 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
977 "update %s with %s for operand %d",
978 aarch64_get_qualifier_name (inst->operands[i].qualifier),
979 aarch64_get_qualifier_name (qualifiers[i]), i);
980 inst->operands[i].qualifier = qualifiers[i];
981 }
982
983 DEBUG_TRACE ("matching SUCCESS");
984 return 1;
985 }
986
987 /* Return TRUE if VALUE is a wide constant that can be moved into a general
988 register by MOVZ.
989
990 IS32 indicates whether value is a 32-bit immediate or not.
991 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
992 amount will be returned in *SHIFT_AMOUNT. */
993
994 bfd_boolean
995 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
996 {
997 int amount;
998
999 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1000
1001 if (is32)
1002 {
1003 /* Allow all zeros or all ones in top 32-bits, so that
1004 32-bit constant expressions like ~0x80000000 are
1005 permitted. */
1006 uint64_t ext = value;
1007 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1008 /* Immediate out of range. */
1009 return FALSE;
1010 value &= (int64_t) 0xffffffff;
1011 }
1012
1013 /* first, try movz then movn */
1014 amount = -1;
1015 if ((value & ((int64_t) 0xffff << 0)) == value)
1016 amount = 0;
1017 else if ((value & ((int64_t) 0xffff << 16)) == value)
1018 amount = 16;
1019 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1020 amount = 32;
1021 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1022 amount = 48;
1023
1024 if (amount == -1)
1025 {
1026 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1027 return FALSE;
1028 }
1029
1030 if (shift_amount != NULL)
1031 *shift_amount = amount;
1032
1033 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1034
1035 return TRUE;
1036 }
1037
1038 /* Build the accepted values for immediate logical SIMD instructions.
1039
1040 The standard encodings of the immediate value are:
1041 N imms immr SIMD size R S
1042 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1043 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1044 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1045 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1046 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1047 0 11110s 00000r 2 UInt(r) UInt(s)
1048 where all-ones value of S is reserved.
1049
1050 Let's call E the SIMD size.
1051
1052 The immediate value is: S+1 bits '1' rotated to the right by R.
1053
1054 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1055 (remember S != E - 1). */
1056
1057 #define TOTAL_IMM_NB 5334
1058
1059 typedef struct
1060 {
1061 uint64_t imm;
1062 aarch64_insn encoding;
1063 } simd_imm_encoding;
1064
1065 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1066
1067 static int
1068 simd_imm_encoding_cmp(const void *i1, const void *i2)
1069 {
1070 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1071 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1072
1073 if (imm1->imm < imm2->imm)
1074 return -1;
1075 if (imm1->imm > imm2->imm)
1076 return +1;
1077 return 0;
1078 }
1079
1080 /* immediate bitfield standard encoding
1081 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1082 1 ssssss rrrrrr 64 rrrrrr ssssss
1083 0 0sssss 0rrrrr 32 rrrrr sssss
1084 0 10ssss 00rrrr 16 rrrr ssss
1085 0 110sss 000rrr 8 rrr sss
1086 0 1110ss 0000rr 4 rr ss
1087 0 11110s 00000r 2 r s */
1088 static inline int
1089 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1090 {
1091 return (is64 << 12) | (r << 6) | s;
1092 }
1093
1094 static void
1095 build_immediate_table (void)
1096 {
1097 uint32_t log_e, e, s, r, s_mask;
1098 uint64_t mask, imm;
1099 int nb_imms;
1100 int is64;
1101
1102 nb_imms = 0;
1103 for (log_e = 1; log_e <= 6; log_e++)
1104 {
1105 /* Get element size. */
1106 e = 1u << log_e;
1107 if (log_e == 6)
1108 {
1109 is64 = 1;
1110 mask = 0xffffffffffffffffull;
1111 s_mask = 0;
1112 }
1113 else
1114 {
1115 is64 = 0;
1116 mask = (1ull << e) - 1;
1117 /* log_e s_mask
1118 1 ((1 << 4) - 1) << 2 = 111100
1119 2 ((1 << 3) - 1) << 3 = 111000
1120 3 ((1 << 2) - 1) << 4 = 110000
1121 4 ((1 << 1) - 1) << 5 = 100000
1122 5 ((1 << 0) - 1) << 6 = 000000 */
1123 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1124 }
1125 for (s = 0; s < e - 1; s++)
1126 for (r = 0; r < e; r++)
1127 {
1128 /* s+1 consecutive bits to 1 (s < 63) */
1129 imm = (1ull << (s + 1)) - 1;
1130 /* rotate right by r */
1131 if (r != 0)
1132 imm = (imm >> r) | ((imm << (e - r)) & mask);
1133 /* replicate the constant depending on SIMD size */
1134 switch (log_e)
1135 {
1136 case 1: imm = (imm << 2) | imm;
1137 case 2: imm = (imm << 4) | imm;
1138 case 3: imm = (imm << 8) | imm;
1139 case 4: imm = (imm << 16) | imm;
1140 case 5: imm = (imm << 32) | imm;
1141 case 6: break;
1142 default: abort ();
1143 }
1144 simd_immediates[nb_imms].imm = imm;
1145 simd_immediates[nb_imms].encoding =
1146 encode_immediate_bitfield(is64, s | s_mask, r);
1147 nb_imms++;
1148 }
1149 }
1150 assert (nb_imms == TOTAL_IMM_NB);
1151 qsort(simd_immediates, nb_imms,
1152 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1153 }
1154
1155 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1156 be accepted by logical (immediate) instructions
1157 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1158
1159 ESIZE is the number of bytes in the decoded immediate value.
1160 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1161 VALUE will be returned in *ENCODING. */
1162
1163 bfd_boolean
1164 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1165 {
1166 simd_imm_encoding imm_enc;
1167 const simd_imm_encoding *imm_encoding;
1168 static bfd_boolean initialized = FALSE;
1169 uint64_t upper;
1170 int i;
1171
1172 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1173 value, is32);
1174
1175 if (initialized == FALSE)
1176 {
1177 build_immediate_table ();
1178 initialized = TRUE;
1179 }
1180
1181 /* Allow all zeros or all ones in top bits, so that
1182 constant expressions like ~1 are permitted. */
1183 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1184 if ((value & ~upper) != value && (value | upper) != value)
1185 return FALSE;
1186
1187 /* Replicate to a full 64-bit value. */
1188 value &= ~upper;
1189 for (i = esize * 8; i < 64; i *= 2)
1190 value |= (value << i);
1191
1192 imm_enc.imm = value;
1193 imm_encoding = (const simd_imm_encoding *)
1194 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1195 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1196 if (imm_encoding == NULL)
1197 {
1198 DEBUG_TRACE ("exit with FALSE");
1199 return FALSE;
1200 }
1201 if (encoding != NULL)
1202 *encoding = imm_encoding->encoding;
1203 DEBUG_TRACE ("exit with TRUE");
1204 return TRUE;
1205 }
1206
1207 /* If 64-bit immediate IMM is in the format of
1208 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1209 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1210 of value "abcdefgh". Otherwise return -1. */
1211 int
1212 aarch64_shrink_expanded_imm8 (uint64_t imm)
1213 {
1214 int i, ret;
1215 uint32_t byte;
1216
1217 ret = 0;
1218 for (i = 0; i < 8; i++)
1219 {
1220 byte = (imm >> (8 * i)) & 0xff;
1221 if (byte == 0xff)
1222 ret |= 1 << i;
1223 else if (byte != 0x00)
1224 return -1;
1225 }
1226 return ret;
1227 }
1228
1229 /* Utility inline functions for operand_general_constraint_met_p. */
1230
1231 static inline void
1232 set_error (aarch64_operand_error *mismatch_detail,
1233 enum aarch64_operand_error_kind kind, int idx,
1234 const char* error)
1235 {
1236 if (mismatch_detail == NULL)
1237 return;
1238 mismatch_detail->kind = kind;
1239 mismatch_detail->index = idx;
1240 mismatch_detail->error = error;
1241 }
1242
1243 static inline void
1244 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1245 const char* error)
1246 {
1247 if (mismatch_detail == NULL)
1248 return;
1249 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1250 }
1251
1252 static inline void
1253 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1254 int idx, int lower_bound, int upper_bound,
1255 const char* error)
1256 {
1257 if (mismatch_detail == NULL)
1258 return;
1259 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1260 mismatch_detail->data[0] = lower_bound;
1261 mismatch_detail->data[1] = upper_bound;
1262 }
1263
1264 static inline void
1265 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1266 int idx, int lower_bound, int upper_bound)
1267 {
1268 if (mismatch_detail == NULL)
1269 return;
1270 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1271 _("immediate value"));
1272 }
1273
1274 static inline void
1275 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1276 int idx, int lower_bound, int upper_bound)
1277 {
1278 if (mismatch_detail == NULL)
1279 return;
1280 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1281 _("immediate offset"));
1282 }
1283
1284 static inline void
1285 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1286 int idx, int lower_bound, int upper_bound)
1287 {
1288 if (mismatch_detail == NULL)
1289 return;
1290 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1291 _("register number"));
1292 }
1293
1294 static inline void
1295 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1296 int idx, int lower_bound, int upper_bound)
1297 {
1298 if (mismatch_detail == NULL)
1299 return;
1300 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1301 _("register element index"));
1302 }
1303
1304 static inline void
1305 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1306 int idx, int lower_bound, int upper_bound)
1307 {
1308 if (mismatch_detail == NULL)
1309 return;
1310 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1311 _("shift amount"));
1312 }
1313
1314 /* Report that the MUL modifier in operand IDX should be in the range
1315 [LOWER_BOUND, UPPER_BOUND]. */
1316 static inline void
1317 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1318 int idx, int lower_bound, int upper_bound)
1319 {
1320 if (mismatch_detail == NULL)
1321 return;
1322 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1323 _("multiplier"));
1324 }
1325
1326 static inline void
1327 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1328 int alignment)
1329 {
1330 if (mismatch_detail == NULL)
1331 return;
1332 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1333 mismatch_detail->data[0] = alignment;
1334 }
1335
1336 static inline void
1337 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1338 int expected_num)
1339 {
1340 if (mismatch_detail == NULL)
1341 return;
1342 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1343 mismatch_detail->data[0] = expected_num;
1344 }
1345
1346 static inline void
1347 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1348 const char* error)
1349 {
1350 if (mismatch_detail == NULL)
1351 return;
1352 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1353 }
1354
1355 /* General constraint checking based on operand code.
1356
1357 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1358 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1359
1360 This function has to be called after the qualifiers for all operands
1361 have been resolved.
1362
1363 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1364 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1365 of error message during the disassembling where error message is not
1366 wanted. We avoid the dynamic construction of strings of error messages
1367 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1368 use a combination of error code, static string and some integer data to
1369 represent an error. */
1370
1371 static int
1372 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1373 enum aarch64_opnd type,
1374 const aarch64_opcode *opcode,
1375 aarch64_operand_error *mismatch_detail)
1376 {
1377 unsigned num, modifiers;
1378 unsigned char size;
1379 int64_t imm, min_value, max_value;
1380 const aarch64_opnd_info *opnd = opnds + idx;
1381 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1382
1383 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1384
1385 switch (aarch64_operands[type].op_class)
1386 {
1387 case AARCH64_OPND_CLASS_INT_REG:
1388 /* Check pair reg constraints for cas* instructions. */
1389 if (type == AARCH64_OPND_PAIRREG)
1390 {
1391 assert (idx == 1 || idx == 3);
1392 if (opnds[idx - 1].reg.regno % 2 != 0)
1393 {
1394 set_syntax_error (mismatch_detail, idx - 1,
1395 _("reg pair must start from even reg"));
1396 return 0;
1397 }
1398 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1399 {
1400 set_syntax_error (mismatch_detail, idx,
1401 _("reg pair must be contiguous"));
1402 return 0;
1403 }
1404 break;
1405 }
1406
1407 /* <Xt> may be optional in some IC and TLBI instructions. */
1408 if (type == AARCH64_OPND_Rt_SYS)
1409 {
1410 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1411 == AARCH64_OPND_CLASS_SYSTEM));
1412 if (opnds[1].present
1413 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1414 {
1415 set_other_error (mismatch_detail, idx, _("extraneous register"));
1416 return 0;
1417 }
1418 if (!opnds[1].present
1419 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1420 {
1421 set_other_error (mismatch_detail, idx, _("missing register"));
1422 return 0;
1423 }
1424 }
1425 switch (qualifier)
1426 {
1427 case AARCH64_OPND_QLF_WSP:
1428 case AARCH64_OPND_QLF_SP:
1429 if (!aarch64_stack_pointer_p (opnd))
1430 {
1431 set_other_error (mismatch_detail, idx,
1432 _("stack pointer register expected"));
1433 return 0;
1434 }
1435 break;
1436 default:
1437 break;
1438 }
1439 break;
1440
1441 case AARCH64_OPND_CLASS_SVE_REG:
1442 switch (type)
1443 {
1444 case AARCH64_OPND_SVE_Zn_INDEX:
1445 size = aarch64_get_qualifier_esize (opnd->qualifier);
1446 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1447 {
1448 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1449 0, 64 / size - 1);
1450 return 0;
1451 }
1452 break;
1453
1454 case AARCH64_OPND_SVE_ZnxN:
1455 case AARCH64_OPND_SVE_ZtxN:
1456 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1457 {
1458 set_other_error (mismatch_detail, idx,
1459 _("invalid register list"));
1460 return 0;
1461 }
1462 break;
1463
1464 default:
1465 break;
1466 }
1467 break;
1468
1469 case AARCH64_OPND_CLASS_PRED_REG:
1470 if (opnd->reg.regno >= 8
1471 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1472 {
1473 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1474 return 0;
1475 }
1476 break;
1477
1478 case AARCH64_OPND_CLASS_COND:
1479 if (type == AARCH64_OPND_COND1
1480 && (opnds[idx].cond->value & 0xe) == 0xe)
1481 {
1482 /* Not allow AL or NV. */
1483 set_syntax_error (mismatch_detail, idx, NULL);
1484 }
1485 break;
1486
1487 case AARCH64_OPND_CLASS_ADDRESS:
1488 /* Check writeback. */
1489 switch (opcode->iclass)
1490 {
1491 case ldst_pos:
1492 case ldst_unscaled:
1493 case ldstnapair_offs:
1494 case ldstpair_off:
1495 case ldst_unpriv:
1496 if (opnd->addr.writeback == 1)
1497 {
1498 set_syntax_error (mismatch_detail, idx,
1499 _("unexpected address writeback"));
1500 return 0;
1501 }
1502 break;
1503 case ldst_imm9:
1504 case ldstpair_indexed:
1505 case asisdlsep:
1506 case asisdlsop:
1507 if (opnd->addr.writeback == 0)
1508 {
1509 set_syntax_error (mismatch_detail, idx,
1510 _("address writeback expected"));
1511 return 0;
1512 }
1513 break;
1514 default:
1515 assert (opnd->addr.writeback == 0);
1516 break;
1517 }
1518 switch (type)
1519 {
1520 case AARCH64_OPND_ADDR_SIMM7:
1521 /* Scaled signed 7 bits immediate offset. */
1522 /* Get the size of the data element that is accessed, which may be
1523 different from that of the source register size,
1524 e.g. in strb/ldrb. */
1525 size = aarch64_get_qualifier_esize (opnd->qualifier);
1526 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1527 {
1528 set_offset_out_of_range_error (mismatch_detail, idx,
1529 -64 * size, 63 * size);
1530 return 0;
1531 }
1532 if (!value_aligned_p (opnd->addr.offset.imm, size))
1533 {
1534 set_unaligned_error (mismatch_detail, idx, size);
1535 return 0;
1536 }
1537 break;
1538 case AARCH64_OPND_ADDR_SIMM9:
1539 /* Unscaled signed 9 bits immediate offset. */
1540 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1541 {
1542 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1543 return 0;
1544 }
1545 break;
1546
1547 case AARCH64_OPND_ADDR_SIMM9_2:
1548 /* Unscaled signed 9 bits immediate offset, which has to be negative
1549 or unaligned. */
1550 size = aarch64_get_qualifier_esize (qualifier);
1551 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1552 && !value_aligned_p (opnd->addr.offset.imm, size))
1553 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1554 return 1;
1555 set_other_error (mismatch_detail, idx,
1556 _("negative or unaligned offset expected"));
1557 return 0;
1558
1559 case AARCH64_OPND_SIMD_ADDR_POST:
1560 /* AdvSIMD load/store multiple structures, post-index. */
1561 assert (idx == 1);
1562 if (opnd->addr.offset.is_reg)
1563 {
1564 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1565 return 1;
1566 else
1567 {
1568 set_other_error (mismatch_detail, idx,
1569 _("invalid register offset"));
1570 return 0;
1571 }
1572 }
1573 else
1574 {
1575 const aarch64_opnd_info *prev = &opnds[idx-1];
1576 unsigned num_bytes; /* total number of bytes transferred. */
1577 /* The opcode dependent area stores the number of elements in
1578 each structure to be loaded/stored. */
1579 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1580 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1581 /* Special handling of loading single structure to all lane. */
1582 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1583 * aarch64_get_qualifier_esize (prev->qualifier);
1584 else
1585 num_bytes = prev->reglist.num_regs
1586 * aarch64_get_qualifier_esize (prev->qualifier)
1587 * aarch64_get_qualifier_nelem (prev->qualifier);
1588 if ((int) num_bytes != opnd->addr.offset.imm)
1589 {
1590 set_other_error (mismatch_detail, idx,
1591 _("invalid post-increment amount"));
1592 return 0;
1593 }
1594 }
1595 break;
1596
1597 case AARCH64_OPND_ADDR_REGOFF:
1598 /* Get the size of the data element that is accessed, which may be
1599 different from that of the source register size,
1600 e.g. in strb/ldrb. */
1601 size = aarch64_get_qualifier_esize (opnd->qualifier);
1602 /* It is either no shift or shift by the binary logarithm of SIZE. */
1603 if (opnd->shifter.amount != 0
1604 && opnd->shifter.amount != (int)get_logsz (size))
1605 {
1606 set_other_error (mismatch_detail, idx,
1607 _("invalid shift amount"));
1608 return 0;
1609 }
1610 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1611 operators. */
1612 switch (opnd->shifter.kind)
1613 {
1614 case AARCH64_MOD_UXTW:
1615 case AARCH64_MOD_LSL:
1616 case AARCH64_MOD_SXTW:
1617 case AARCH64_MOD_SXTX: break;
1618 default:
1619 set_other_error (mismatch_detail, idx,
1620 _("invalid extend/shift operator"));
1621 return 0;
1622 }
1623 break;
1624
1625 case AARCH64_OPND_ADDR_UIMM12:
1626 imm = opnd->addr.offset.imm;
1627 /* Get the size of the data element that is accessed, which may be
1628 different from that of the source register size,
1629 e.g. in strb/ldrb. */
1630 size = aarch64_get_qualifier_esize (qualifier);
1631 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1632 {
1633 set_offset_out_of_range_error (mismatch_detail, idx,
1634 0, 4095 * size);
1635 return 0;
1636 }
1637 if (!value_aligned_p (opnd->addr.offset.imm, size))
1638 {
1639 set_unaligned_error (mismatch_detail, idx, size);
1640 return 0;
1641 }
1642 break;
1643
1644 case AARCH64_OPND_ADDR_PCREL14:
1645 case AARCH64_OPND_ADDR_PCREL19:
1646 case AARCH64_OPND_ADDR_PCREL21:
1647 case AARCH64_OPND_ADDR_PCREL26:
1648 imm = opnd->imm.value;
1649 if (operand_need_shift_by_two (get_operand_from_code (type)))
1650 {
1651 /* The offset value in a PC-relative branch instruction is alway
1652 4-byte aligned and is encoded without the lowest 2 bits. */
1653 if (!value_aligned_p (imm, 4))
1654 {
1655 set_unaligned_error (mismatch_detail, idx, 4);
1656 return 0;
1657 }
1658 /* Right shift by 2 so that we can carry out the following check
1659 canonically. */
1660 imm >>= 2;
1661 }
1662 size = get_operand_fields_width (get_operand_from_code (type));
1663 if (!value_fit_signed_field_p (imm, size))
1664 {
1665 set_other_error (mismatch_detail, idx,
1666 _("immediate out of range"));
1667 return 0;
1668 }
1669 break;
1670
1671 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1672 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1673 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1674 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1675 min_value = -8;
1676 max_value = 7;
1677 sve_imm_offset_vl:
1678 assert (!opnd->addr.offset.is_reg);
1679 assert (opnd->addr.preind);
1680 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1681 min_value *= num;
1682 max_value *= num;
1683 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1684 || (opnd->shifter.operator_present
1685 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1686 {
1687 set_other_error (mismatch_detail, idx,
1688 _("invalid addressing mode"));
1689 return 0;
1690 }
1691 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1692 {
1693 set_offset_out_of_range_error (mismatch_detail, idx,
1694 min_value, max_value);
1695 return 0;
1696 }
1697 if (!value_aligned_p (opnd->addr.offset.imm, num))
1698 {
1699 set_unaligned_error (mismatch_detail, idx, num);
1700 return 0;
1701 }
1702 break;
1703
1704 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1705 min_value = -32;
1706 max_value = 31;
1707 goto sve_imm_offset_vl;
1708
1709 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1710 min_value = -256;
1711 max_value = 255;
1712 goto sve_imm_offset_vl;
1713
1714 case AARCH64_OPND_SVE_ADDR_RI_U6:
1715 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1716 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1717 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1718 min_value = 0;
1719 max_value = 63;
1720 sve_imm_offset:
1721 assert (!opnd->addr.offset.is_reg);
1722 assert (opnd->addr.preind);
1723 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1724 min_value *= num;
1725 max_value *= num;
1726 if (opnd->shifter.operator_present
1727 || opnd->shifter.amount_present)
1728 {
1729 set_other_error (mismatch_detail, idx,
1730 _("invalid addressing mode"));
1731 return 0;
1732 }
1733 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1734 {
1735 set_offset_out_of_range_error (mismatch_detail, idx,
1736 min_value, max_value);
1737 return 0;
1738 }
1739 if (!value_aligned_p (opnd->addr.offset.imm, num))
1740 {
1741 set_unaligned_error (mismatch_detail, idx, num);
1742 return 0;
1743 }
1744 break;
1745
1746 case AARCH64_OPND_SVE_ADDR_RR:
1747 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1748 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1749 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1750 case AARCH64_OPND_SVE_ADDR_RX:
1751 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1752 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1753 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1754 case AARCH64_OPND_SVE_ADDR_RZ:
1755 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1756 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1757 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1758 modifiers = 1 << AARCH64_MOD_LSL;
1759 sve_rr_operand:
1760 assert (opnd->addr.offset.is_reg);
1761 assert (opnd->addr.preind);
1762 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1763 && opnd->addr.offset.regno == 31)
1764 {
1765 set_other_error (mismatch_detail, idx,
1766 _("index register xzr is not allowed"));
1767 return 0;
1768 }
1769 if (((1 << opnd->shifter.kind) & modifiers) == 0
1770 || (opnd->shifter.amount
1771 != get_operand_specific_data (&aarch64_operands[type])))
1772 {
1773 set_other_error (mismatch_detail, idx,
1774 _("invalid addressing mode"));
1775 return 0;
1776 }
1777 break;
1778
1779 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1780 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1781 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1782 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1783 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1784 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1785 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1786 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1787 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1788 goto sve_rr_operand;
1789
1790 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1791 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1792 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1793 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1794 min_value = 0;
1795 max_value = 31;
1796 goto sve_imm_offset;
1797
1798 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1799 modifiers = 1 << AARCH64_MOD_LSL;
1800 sve_zz_operand:
1801 assert (opnd->addr.offset.is_reg);
1802 assert (opnd->addr.preind);
1803 if (((1 << opnd->shifter.kind) & modifiers) == 0
1804 || opnd->shifter.amount < 0
1805 || opnd->shifter.amount > 3)
1806 {
1807 set_other_error (mismatch_detail, idx,
1808 _("invalid addressing mode"));
1809 return 0;
1810 }
1811 break;
1812
1813 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1814 modifiers = (1 << AARCH64_MOD_SXTW);
1815 goto sve_zz_operand;
1816
1817 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1818 modifiers = 1 << AARCH64_MOD_UXTW;
1819 goto sve_zz_operand;
1820
1821 default:
1822 break;
1823 }
1824 break;
1825
1826 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1827 if (type == AARCH64_OPND_LEt)
1828 {
1829 /* Get the upper bound for the element index. */
1830 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1831 if (!value_in_range_p (opnd->reglist.index, 0, num))
1832 {
1833 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1834 return 0;
1835 }
1836 }
1837 /* The opcode dependent area stores the number of elements in
1838 each structure to be loaded/stored. */
1839 num = get_opcode_dependent_value (opcode);
1840 switch (type)
1841 {
1842 case AARCH64_OPND_LVt:
1843 assert (num >= 1 && num <= 4);
1844 /* Unless LD1/ST1, the number of registers should be equal to that
1845 of the structure elements. */
1846 if (num != 1 && opnd->reglist.num_regs != num)
1847 {
1848 set_reg_list_error (mismatch_detail, idx, num);
1849 return 0;
1850 }
1851 break;
1852 case AARCH64_OPND_LVt_AL:
1853 case AARCH64_OPND_LEt:
1854 assert (num >= 1 && num <= 4);
1855 /* The number of registers should be equal to that of the structure
1856 elements. */
1857 if (opnd->reglist.num_regs != num)
1858 {
1859 set_reg_list_error (mismatch_detail, idx, num);
1860 return 0;
1861 }
1862 break;
1863 default:
1864 break;
1865 }
1866 break;
1867
1868 case AARCH64_OPND_CLASS_IMMEDIATE:
1869 /* Constraint check on immediate operand. */
1870 imm = opnd->imm.value;
1871 /* E.g. imm_0_31 constrains value to be 0..31. */
1872 if (qualifier_value_in_range_constraint_p (qualifier)
1873 && !value_in_range_p (imm, get_lower_bound (qualifier),
1874 get_upper_bound (qualifier)))
1875 {
1876 set_imm_out_of_range_error (mismatch_detail, idx,
1877 get_lower_bound (qualifier),
1878 get_upper_bound (qualifier));
1879 return 0;
1880 }
1881
1882 switch (type)
1883 {
1884 case AARCH64_OPND_AIMM:
1885 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1886 {
1887 set_other_error (mismatch_detail, idx,
1888 _("invalid shift operator"));
1889 return 0;
1890 }
1891 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1892 {
1893 set_other_error (mismatch_detail, idx,
1894 _("shift amount expected to be 0 or 12"));
1895 return 0;
1896 }
1897 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1898 {
1899 set_other_error (mismatch_detail, idx,
1900 _("immediate out of range"));
1901 return 0;
1902 }
1903 break;
1904
1905 case AARCH64_OPND_HALF:
1906 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1907 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1908 {
1909 set_other_error (mismatch_detail, idx,
1910 _("invalid shift operator"));
1911 return 0;
1912 }
1913 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1914 if (!value_aligned_p (opnd->shifter.amount, 16))
1915 {
1916 set_other_error (mismatch_detail, idx,
1917 _("shift amount should be a multiple of 16"));
1918 return 0;
1919 }
1920 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1921 {
1922 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1923 0, size * 8 - 16);
1924 return 0;
1925 }
1926 if (opnd->imm.value < 0)
1927 {
1928 set_other_error (mismatch_detail, idx,
1929 _("negative immediate value not allowed"));
1930 return 0;
1931 }
1932 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1933 {
1934 set_other_error (mismatch_detail, idx,
1935 _("immediate out of range"));
1936 return 0;
1937 }
1938 break;
1939
1940 case AARCH64_OPND_IMM_MOV:
1941 {
1942 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1943 imm = opnd->imm.value;
1944 assert (idx == 1);
1945 switch (opcode->op)
1946 {
1947 case OP_MOV_IMM_WIDEN:
1948 imm = ~imm;
1949 /* Fall through... */
1950 case OP_MOV_IMM_WIDE:
1951 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1952 {
1953 set_other_error (mismatch_detail, idx,
1954 _("immediate out of range"));
1955 return 0;
1956 }
1957 break;
1958 case OP_MOV_IMM_LOG:
1959 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1960 {
1961 set_other_error (mismatch_detail, idx,
1962 _("immediate out of range"));
1963 return 0;
1964 }
1965 break;
1966 default:
1967 assert (0);
1968 return 0;
1969 }
1970 }
1971 break;
1972
1973 case AARCH64_OPND_NZCV:
1974 case AARCH64_OPND_CCMP_IMM:
1975 case AARCH64_OPND_EXCEPTION:
1976 case AARCH64_OPND_UIMM4:
1977 case AARCH64_OPND_UIMM7:
1978 case AARCH64_OPND_UIMM3_OP1:
1979 case AARCH64_OPND_UIMM3_OP2:
1980 size = get_operand_fields_width (get_operand_from_code (type));
1981 assert (size < 32);
1982 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1983 {
1984 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1985 (1 << size) - 1);
1986 return 0;
1987 }
1988 break;
1989
1990 case AARCH64_OPND_WIDTH:
1991 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1992 && opnds[0].type == AARCH64_OPND_Rd);
1993 size = get_upper_bound (qualifier);
1994 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1995 /* lsb+width <= reg.size */
1996 {
1997 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1998 size - opnds[idx-1].imm.value);
1999 return 0;
2000 }
2001 break;
2002
2003 case AARCH64_OPND_LIMM:
2004 {
2005 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2006 uint64_t uimm = opnd->imm.value;
2007 if (opcode->op == OP_BIC)
2008 uimm = ~uimm;
2009 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2010 {
2011 set_other_error (mismatch_detail, idx,
2012 _("immediate out of range"));
2013 return 0;
2014 }
2015 }
2016 break;
2017
2018 case AARCH64_OPND_IMM0:
2019 case AARCH64_OPND_FPIMM0:
2020 if (opnd->imm.value != 0)
2021 {
2022 set_other_error (mismatch_detail, idx,
2023 _("immediate zero expected"));
2024 return 0;
2025 }
2026 break;
2027
2028 case AARCH64_OPND_SHLL_IMM:
2029 assert (idx == 2);
2030 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2031 if (opnd->imm.value != size)
2032 {
2033 set_other_error (mismatch_detail, idx,
2034 _("invalid shift amount"));
2035 return 0;
2036 }
2037 break;
2038
2039 case AARCH64_OPND_IMM_VLSL:
2040 size = aarch64_get_qualifier_esize (qualifier);
2041 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2042 {
2043 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2044 size * 8 - 1);
2045 return 0;
2046 }
2047 break;
2048
2049 case AARCH64_OPND_IMM_VLSR:
2050 size = aarch64_get_qualifier_esize (qualifier);
2051 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2052 {
2053 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2054 return 0;
2055 }
2056 break;
2057
2058 case AARCH64_OPND_SIMD_IMM:
2059 case AARCH64_OPND_SIMD_IMM_SFT:
2060 /* Qualifier check. */
2061 switch (qualifier)
2062 {
2063 case AARCH64_OPND_QLF_LSL:
2064 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2065 {
2066 set_other_error (mismatch_detail, idx,
2067 _("invalid shift operator"));
2068 return 0;
2069 }
2070 break;
2071 case AARCH64_OPND_QLF_MSL:
2072 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2073 {
2074 set_other_error (mismatch_detail, idx,
2075 _("invalid shift operator"));
2076 return 0;
2077 }
2078 break;
2079 case AARCH64_OPND_QLF_NIL:
2080 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2081 {
2082 set_other_error (mismatch_detail, idx,
2083 _("shift is not permitted"));
2084 return 0;
2085 }
2086 break;
2087 default:
2088 assert (0);
2089 return 0;
2090 }
2091 /* Is the immediate valid? */
2092 assert (idx == 1);
2093 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2094 {
2095 /* uimm8 or simm8 */
2096 if (!value_in_range_p (opnd->imm.value, -128, 255))
2097 {
2098 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2099 return 0;
2100 }
2101 }
2102 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2103 {
2104 /* uimm64 is not
2105 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2106 ffffffffgggggggghhhhhhhh'. */
2107 set_other_error (mismatch_detail, idx,
2108 _("invalid value for immediate"));
2109 return 0;
2110 }
2111 /* Is the shift amount valid? */
2112 switch (opnd->shifter.kind)
2113 {
2114 case AARCH64_MOD_LSL:
2115 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2116 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2117 {
2118 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2119 (size - 1) * 8);
2120 return 0;
2121 }
2122 if (!value_aligned_p (opnd->shifter.amount, 8))
2123 {
2124 set_unaligned_error (mismatch_detail, idx, 8);
2125 return 0;
2126 }
2127 break;
2128 case AARCH64_MOD_MSL:
2129 /* Only 8 and 16 are valid shift amount. */
2130 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2131 {
2132 set_other_error (mismatch_detail, idx,
2133 _("shift amount expected to be 0 or 16"));
2134 return 0;
2135 }
2136 break;
2137 default:
2138 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2139 {
2140 set_other_error (mismatch_detail, idx,
2141 _("invalid shift operator"));
2142 return 0;
2143 }
2144 break;
2145 }
2146 break;
2147
2148 case AARCH64_OPND_FPIMM:
2149 case AARCH64_OPND_SIMD_FPIMM:
2150 if (opnd->imm.is_fp == 0)
2151 {
2152 set_other_error (mismatch_detail, idx,
2153 _("floating-point immediate expected"));
2154 return 0;
2155 }
2156 /* The value is expected to be an 8-bit floating-point constant with
2157 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2158 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2159 instruction). */
2160 if (!value_in_range_p (opnd->imm.value, 0, 255))
2161 {
2162 set_other_error (mismatch_detail, idx,
2163 _("immediate out of range"));
2164 return 0;
2165 }
2166 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2167 {
2168 set_other_error (mismatch_detail, idx,
2169 _("invalid shift operator"));
2170 return 0;
2171 }
2172 break;
2173
2174 case AARCH64_OPND_SVE_PATTERN_SCALED:
2175 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2176 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2177 {
2178 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2179 return 0;
2180 }
2181 break;
2182
2183 default:
2184 break;
2185 }
2186 break;
2187
2188 case AARCH64_OPND_CLASS_CP_REG:
2189 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2190 valid range: C0 - C15. */
2191 if (opnd->reg.regno > 15)
2192 {
2193 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2194 return 0;
2195 }
2196 break;
2197
2198 case AARCH64_OPND_CLASS_SYSTEM:
2199 switch (type)
2200 {
2201 case AARCH64_OPND_PSTATEFIELD:
2202 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2203 /* MSR UAO, #uimm4
2204 MSR PAN, #uimm4
2205 The immediate must be #0 or #1. */
2206 if ((opnd->pstatefield == 0x03 /* UAO. */
2207 || opnd->pstatefield == 0x04) /* PAN. */
2208 && opnds[1].imm.value > 1)
2209 {
2210 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2211 return 0;
2212 }
2213 /* MSR SPSel, #uimm4
2214 Uses uimm4 as a control value to select the stack pointer: if
2215 bit 0 is set it selects the current exception level's stack
2216 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2217 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2218 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2219 {
2220 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2221 return 0;
2222 }
2223 break;
2224 default:
2225 break;
2226 }
2227 break;
2228
2229 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2230 /* Get the upper bound for the element index. */
2231 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2232 /* Index out-of-range. */
2233 if (!value_in_range_p (opnd->reglane.index, 0, num))
2234 {
2235 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2236 return 0;
2237 }
2238 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2239 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2240 number is encoded in "size:M:Rm":
2241 size <Vm>
2242 00 RESERVED
2243 01 0:Rm
2244 10 M:Rm
2245 11 RESERVED */
2246 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2247 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2248 {
2249 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2250 return 0;
2251 }
2252 break;
2253
2254 case AARCH64_OPND_CLASS_MODIFIED_REG:
2255 assert (idx == 1 || idx == 2);
2256 switch (type)
2257 {
2258 case AARCH64_OPND_Rm_EXT:
2259 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2260 && opnd->shifter.kind != AARCH64_MOD_LSL)
2261 {
2262 set_other_error (mismatch_detail, idx,
2263 _("extend operator expected"));
2264 return 0;
2265 }
2266 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2267 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2268 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2269 case. */
2270 if (!aarch64_stack_pointer_p (opnds + 0)
2271 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2272 {
2273 if (!opnd->shifter.operator_present)
2274 {
2275 set_other_error (mismatch_detail, idx,
2276 _("missing extend operator"));
2277 return 0;
2278 }
2279 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2280 {
2281 set_other_error (mismatch_detail, idx,
2282 _("'LSL' operator not allowed"));
2283 return 0;
2284 }
2285 }
2286 assert (opnd->shifter.operator_present /* Default to LSL. */
2287 || opnd->shifter.kind == AARCH64_MOD_LSL);
2288 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2289 {
2290 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2291 return 0;
2292 }
2293 /* In the 64-bit form, the final register operand is written as Wm
2294 for all but the (possibly omitted) UXTX/LSL and SXTX
2295 operators.
2296 N.B. GAS allows X register to be used with any operator as a
2297 programming convenience. */
2298 if (qualifier == AARCH64_OPND_QLF_X
2299 && opnd->shifter.kind != AARCH64_MOD_LSL
2300 && opnd->shifter.kind != AARCH64_MOD_UXTX
2301 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2302 {
2303 set_other_error (mismatch_detail, idx, _("W register expected"));
2304 return 0;
2305 }
2306 break;
2307
2308 case AARCH64_OPND_Rm_SFT:
2309 /* ROR is not available to the shifted register operand in
2310 arithmetic instructions. */
2311 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2312 {
2313 set_other_error (mismatch_detail, idx,
2314 _("shift operator expected"));
2315 return 0;
2316 }
2317 if (opnd->shifter.kind == AARCH64_MOD_ROR
2318 && opcode->iclass != log_shift)
2319 {
2320 set_other_error (mismatch_detail, idx,
2321 _("'ROR' operator not allowed"));
2322 return 0;
2323 }
2324 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2325 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2326 {
2327 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2328 return 0;
2329 }
2330 break;
2331
2332 default:
2333 break;
2334 }
2335 break;
2336
2337 default:
2338 break;
2339 }
2340
2341 return 1;
2342 }
2343
2344 /* Main entrypoint for the operand constraint checking.
2345
2346 Return 1 if operands of *INST meet the constraint applied by the operand
2347 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2348 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2349 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2350 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2351 error kind when it is notified that an instruction does not pass the check).
2352
2353 Un-determined operand qualifiers may get established during the process. */
2354
2355 int
2356 aarch64_match_operands_constraint (aarch64_inst *inst,
2357 aarch64_operand_error *mismatch_detail)
2358 {
2359 int i;
2360
2361 DEBUG_TRACE ("enter");
2362
2363 /* Check for cases where a source register needs to be the same as the
2364 destination register. Do this before matching qualifiers since if
2365 an instruction has both invalid tying and invalid qualifiers,
2366 the error about qualifiers would suggest several alternative
2367 instructions that also have invalid tying. */
2368 i = inst->opcode->tied_operand;
2369 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2370 {
2371 if (mismatch_detail)
2372 {
2373 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2374 mismatch_detail->index = i;
2375 mismatch_detail->error = NULL;
2376 }
2377 return 0;
2378 }
2379
2380 /* Match operands' qualifier.
2381 *INST has already had qualifier establish for some, if not all, of
2382 its operands; we need to find out whether these established
2383 qualifiers match one of the qualifier sequence in
2384 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2385 with the corresponding qualifier in such a sequence.
2386 Only basic operand constraint checking is done here; the more thorough
2387 constraint checking will carried out by operand_general_constraint_met_p,
2388 which has be to called after this in order to get all of the operands'
2389 qualifiers established. */
2390 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2391 {
2392 DEBUG_TRACE ("FAIL on operand qualifier matching");
2393 if (mismatch_detail)
2394 {
2395 /* Return an error type to indicate that it is the qualifier
2396 matching failure; we don't care about which operand as there
2397 are enough information in the opcode table to reproduce it. */
2398 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2399 mismatch_detail->index = -1;
2400 mismatch_detail->error = NULL;
2401 }
2402 return 0;
2403 }
2404
2405 /* Match operands' constraint. */
2406 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2407 {
2408 enum aarch64_opnd type = inst->opcode->operands[i];
2409 if (type == AARCH64_OPND_NIL)
2410 break;
2411 if (inst->operands[i].skip)
2412 {
2413 DEBUG_TRACE ("skip the incomplete operand %d", i);
2414 continue;
2415 }
2416 if (operand_general_constraint_met_p (inst->operands, i, type,
2417 inst->opcode, mismatch_detail) == 0)
2418 {
2419 DEBUG_TRACE ("FAIL on operand %d", i);
2420 return 0;
2421 }
2422 }
2423
2424 DEBUG_TRACE ("PASS");
2425
2426 return 1;
2427 }
2428
2429 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2430 Also updates the TYPE of each INST->OPERANDS with the corresponding
2431 value of OPCODE->OPERANDS.
2432
2433 Note that some operand qualifiers may need to be manually cleared by
2434 the caller before it further calls the aarch64_opcode_encode; by
2435 doing this, it helps the qualifier matching facilities work
2436 properly. */
2437
2438 const aarch64_opcode*
2439 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2440 {
2441 int i;
2442 const aarch64_opcode *old = inst->opcode;
2443
2444 inst->opcode = opcode;
2445
2446 /* Update the operand types. */
2447 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2448 {
2449 inst->operands[i].type = opcode->operands[i];
2450 if (opcode->operands[i] == AARCH64_OPND_NIL)
2451 break;
2452 }
2453
2454 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2455
2456 return old;
2457 }
2458
2459 int
2460 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2461 {
2462 int i;
2463 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2464 if (operands[i] == operand)
2465 return i;
2466 else if (operands[i] == AARCH64_OPND_NIL)
2467 break;
2468 return -1;
2469 }
2470 \f
2471 /* R0...R30, followed by FOR31. */
2472 #define BANK(R, FOR31) \
2473 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2474 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2475 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2476 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2477 /* [0][0] 32-bit integer regs with sp Wn
2478 [0][1] 64-bit integer regs with sp Xn sf=1
2479 [1][0] 32-bit integer regs with #0 Wn
2480 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2481 static const char *int_reg[2][2][32] = {
2482 #define R32(X) "w" #X
2483 #define R64(X) "x" #X
2484 { BANK (R32, "wsp"), BANK (R64, "sp") },
2485 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2486 #undef R64
2487 #undef R32
2488 };
2489
2490 /* Names of the SVE vector registers, first with .S suffixes,
2491 then with .D suffixes. */
2492
2493 static const char *sve_reg[2][32] = {
2494 #define ZS(X) "z" #X ".s"
2495 #define ZD(X) "z" #X ".d"
2496 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2497 #undef ZD
2498 #undef ZS
2499 };
2500 #undef BANK
2501
2502 /* Return the integer register name.
2503 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2504
2505 static inline const char *
2506 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2507 {
2508 const int has_zr = sp_reg_p ? 0 : 1;
2509 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2510 return int_reg[has_zr][is_64][regno];
2511 }
2512
2513 /* Like get_int_reg_name, but IS_64 is always 1. */
2514
2515 static inline const char *
2516 get_64bit_int_reg_name (int regno, int sp_reg_p)
2517 {
2518 const int has_zr = sp_reg_p ? 0 : 1;
2519 return int_reg[has_zr][1][regno];
2520 }
2521
2522 /* Get the name of the integer offset register in OPND, using the shift type
2523 to decide whether it's a word or doubleword. */
2524
2525 static inline const char *
2526 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2527 {
2528 switch (opnd->shifter.kind)
2529 {
2530 case AARCH64_MOD_UXTW:
2531 case AARCH64_MOD_SXTW:
2532 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2533
2534 case AARCH64_MOD_LSL:
2535 case AARCH64_MOD_SXTX:
2536 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2537
2538 default:
2539 abort ();
2540 }
2541 }
2542
2543 /* Get the name of the SVE vector offset register in OPND, using the operand
2544 qualifier to decide whether the suffix should be .S or .D. */
2545
2546 static inline const char *
2547 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2548 {
2549 assert (qualifier == AARCH64_OPND_QLF_S_S
2550 || qualifier == AARCH64_OPND_QLF_S_D);
2551 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2552 }
2553
2554 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2555
2556 typedef union
2557 {
2558 uint64_t i;
2559 double d;
2560 } double_conv_t;
2561
2562 typedef union
2563 {
2564 uint32_t i;
2565 float f;
2566 } single_conv_t;
2567
2568 typedef union
2569 {
2570 uint32_t i;
2571 float f;
2572 } half_conv_t;
2573
2574 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2575 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2576 (depending on the type of the instruction). IMM8 will be expanded to a
2577 single-precision floating-point value (SIZE == 4) or a double-precision
2578 floating-point value (SIZE == 8). A half-precision floating-point value
2579 (SIZE == 2) is expanded to a single-precision floating-point value. The
2580 expanded value is returned. */
2581
2582 static uint64_t
2583 expand_fp_imm (int size, uint32_t imm8)
2584 {
2585 uint64_t imm;
2586 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2587
2588 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2589 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2590 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2591 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2592 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2593 if (size == 8)
2594 {
2595 imm = (imm8_7 << (63-32)) /* imm8<7> */
2596 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2597 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2598 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2599 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2600 imm <<= 32;
2601 }
2602 else if (size == 4 || size == 2)
2603 {
2604 imm = (imm8_7 << 31) /* imm8<7> */
2605 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2606 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2607 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2608 }
2609 else
2610 {
2611 /* An unsupported size. */
2612 assert (0);
2613 }
2614
2615 return imm;
2616 }
2617
2618 /* Produce the string representation of the register list operand *OPND
2619 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2620 the register name that comes before the register number, such as "v". */
2621 static void
2622 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2623 const char *prefix)
2624 {
2625 const int num_regs = opnd->reglist.num_regs;
2626 const int first_reg = opnd->reglist.first_regno;
2627 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2628 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2629 char tb[8]; /* Temporary buffer. */
2630
2631 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2632 assert (num_regs >= 1 && num_regs <= 4);
2633
2634 /* Prepare the index if any. */
2635 if (opnd->reglist.has_index)
2636 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2637 else
2638 tb[0] = '\0';
2639
2640 /* The hyphenated form is preferred for disassembly if there are
2641 more than two registers in the list, and the register numbers
2642 are monotonically increasing in increments of one. */
2643 if (num_regs > 2 && last_reg > first_reg)
2644 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2645 prefix, last_reg, qlf_name, tb);
2646 else
2647 {
2648 const int reg0 = first_reg;
2649 const int reg1 = (first_reg + 1) & 0x1f;
2650 const int reg2 = (first_reg + 2) & 0x1f;
2651 const int reg3 = (first_reg + 3) & 0x1f;
2652
2653 switch (num_regs)
2654 {
2655 case 1:
2656 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2657 break;
2658 case 2:
2659 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2660 prefix, reg1, qlf_name, tb);
2661 break;
2662 case 3:
2663 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2664 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2665 prefix, reg2, qlf_name, tb);
2666 break;
2667 case 4:
2668 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2669 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2670 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2671 break;
2672 }
2673 }
2674 }
2675
2676 /* Print the register+immediate address in OPND to BUF, which has SIZE
2677 characters. BASE is the name of the base register. */
2678
2679 static void
2680 print_immediate_offset_address (char *buf, size_t size,
2681 const aarch64_opnd_info *opnd,
2682 const char *base)
2683 {
2684 if (opnd->addr.writeback)
2685 {
2686 if (opnd->addr.preind)
2687 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2688 else
2689 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2690 }
2691 else
2692 {
2693 if (opnd->shifter.operator_present)
2694 {
2695 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2696 snprintf (buf, size, "[%s,#%d,mul vl]",
2697 base, opnd->addr.offset.imm);
2698 }
2699 else if (opnd->addr.offset.imm)
2700 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2701 else
2702 snprintf (buf, size, "[%s]", base);
2703 }
2704 }
2705
2706 /* Produce the string representation of the register offset address operand
2707 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2708 the names of the base and offset registers. */
2709 static void
2710 print_register_offset_address (char *buf, size_t size,
2711 const aarch64_opnd_info *opnd,
2712 const char *base, const char *offset)
2713 {
2714 char tb[16]; /* Temporary buffer. */
2715 bfd_boolean print_extend_p = TRUE;
2716 bfd_boolean print_amount_p = TRUE;
2717 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2718
2719 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2720 || !opnd->shifter.amount_present))
2721 {
2722 /* Not print the shift/extend amount when the amount is zero and
2723 when it is not the special case of 8-bit load/store instruction. */
2724 print_amount_p = FALSE;
2725 /* Likewise, no need to print the shift operator LSL in such a
2726 situation. */
2727 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2728 print_extend_p = FALSE;
2729 }
2730
2731 /* Prepare for the extend/shift. */
2732 if (print_extend_p)
2733 {
2734 if (print_amount_p)
2735 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2736 opnd->shifter.amount);
2737 else
2738 snprintf (tb, sizeof (tb), ",%s", shift_name);
2739 }
2740 else
2741 tb[0] = '\0';
2742
2743 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2744 }
2745
2746 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2747 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2748 PC, PCREL_P and ADDRESS are used to pass in and return information about
2749 the PC-relative address calculation, where the PC value is passed in
2750 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2751 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2752 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2753
2754 The function serves both the disassembler and the assembler diagnostics
2755 issuer, which is the reason why it lives in this file. */
2756
2757 void
2758 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2759 const aarch64_opcode *opcode,
2760 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2761 bfd_vma *address)
2762 {
2763 int i;
2764 const char *name = NULL;
2765 const aarch64_opnd_info *opnd = opnds + idx;
2766 enum aarch64_modifier_kind kind;
2767 uint64_t addr, enum_value;
2768
2769 buf[0] = '\0';
2770 if (pcrel_p)
2771 *pcrel_p = 0;
2772
2773 switch (opnd->type)
2774 {
2775 case AARCH64_OPND_Rd:
2776 case AARCH64_OPND_Rn:
2777 case AARCH64_OPND_Rm:
2778 case AARCH64_OPND_Rt:
2779 case AARCH64_OPND_Rt2:
2780 case AARCH64_OPND_Rs:
2781 case AARCH64_OPND_Ra:
2782 case AARCH64_OPND_Rt_SYS:
2783 case AARCH64_OPND_PAIRREG:
2784 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2785 the <ic_op>, therefore we we use opnd->present to override the
2786 generic optional-ness information. */
2787 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2788 break;
2789 /* Omit the operand, e.g. RET. */
2790 if (optional_operand_p (opcode, idx)
2791 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2792 break;
2793 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2794 || opnd->qualifier == AARCH64_OPND_QLF_X);
2795 snprintf (buf, size, "%s",
2796 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2797 break;
2798
2799 case AARCH64_OPND_Rd_SP:
2800 case AARCH64_OPND_Rn_SP:
2801 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2802 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2803 || opnd->qualifier == AARCH64_OPND_QLF_X
2804 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2805 snprintf (buf, size, "%s",
2806 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2807 break;
2808
2809 case AARCH64_OPND_Rm_EXT:
2810 kind = opnd->shifter.kind;
2811 assert (idx == 1 || idx == 2);
2812 if ((aarch64_stack_pointer_p (opnds)
2813 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2814 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2815 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2816 && kind == AARCH64_MOD_UXTW)
2817 || (opnd->qualifier == AARCH64_OPND_QLF_X
2818 && kind == AARCH64_MOD_UXTX)))
2819 {
2820 /* 'LSL' is the preferred form in this case. */
2821 kind = AARCH64_MOD_LSL;
2822 if (opnd->shifter.amount == 0)
2823 {
2824 /* Shifter omitted. */
2825 snprintf (buf, size, "%s",
2826 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2827 break;
2828 }
2829 }
2830 if (opnd->shifter.amount)
2831 snprintf (buf, size, "%s, %s #%" PRIi64,
2832 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2833 aarch64_operand_modifiers[kind].name,
2834 opnd->shifter.amount);
2835 else
2836 snprintf (buf, size, "%s, %s",
2837 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2838 aarch64_operand_modifiers[kind].name);
2839 break;
2840
2841 case AARCH64_OPND_Rm_SFT:
2842 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2843 || opnd->qualifier == AARCH64_OPND_QLF_X);
2844 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2845 snprintf (buf, size, "%s",
2846 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2847 else
2848 snprintf (buf, size, "%s, %s #%" PRIi64,
2849 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2850 aarch64_operand_modifiers[opnd->shifter.kind].name,
2851 opnd->shifter.amount);
2852 break;
2853
2854 case AARCH64_OPND_Fd:
2855 case AARCH64_OPND_Fn:
2856 case AARCH64_OPND_Fm:
2857 case AARCH64_OPND_Fa:
2858 case AARCH64_OPND_Ft:
2859 case AARCH64_OPND_Ft2:
2860 case AARCH64_OPND_Sd:
2861 case AARCH64_OPND_Sn:
2862 case AARCH64_OPND_Sm:
2863 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2864 opnd->reg.regno);
2865 break;
2866
2867 case AARCH64_OPND_Vd:
2868 case AARCH64_OPND_Vn:
2869 case AARCH64_OPND_Vm:
2870 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2871 aarch64_get_qualifier_name (opnd->qualifier));
2872 break;
2873
2874 case AARCH64_OPND_Ed:
2875 case AARCH64_OPND_En:
2876 case AARCH64_OPND_Em:
2877 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2878 aarch64_get_qualifier_name (opnd->qualifier),
2879 opnd->reglane.index);
2880 break;
2881
2882 case AARCH64_OPND_VdD1:
2883 case AARCH64_OPND_VnD1:
2884 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2885 break;
2886
2887 case AARCH64_OPND_LVn:
2888 case AARCH64_OPND_LVt:
2889 case AARCH64_OPND_LVt_AL:
2890 case AARCH64_OPND_LEt:
2891 print_register_list (buf, size, opnd, "v");
2892 break;
2893
2894 case AARCH64_OPND_SVE_Pd:
2895 case AARCH64_OPND_SVE_Pg3:
2896 case AARCH64_OPND_SVE_Pg4_5:
2897 case AARCH64_OPND_SVE_Pg4_10:
2898 case AARCH64_OPND_SVE_Pg4_16:
2899 case AARCH64_OPND_SVE_Pm:
2900 case AARCH64_OPND_SVE_Pn:
2901 case AARCH64_OPND_SVE_Pt:
2902 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2903 snprintf (buf, size, "p%d", opnd->reg.regno);
2904 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2905 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2906 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2907 aarch64_get_qualifier_name (opnd->qualifier));
2908 else
2909 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2910 aarch64_get_qualifier_name (opnd->qualifier));
2911 break;
2912
2913 case AARCH64_OPND_SVE_Za_5:
2914 case AARCH64_OPND_SVE_Za_16:
2915 case AARCH64_OPND_SVE_Zd:
2916 case AARCH64_OPND_SVE_Zm_5:
2917 case AARCH64_OPND_SVE_Zm_16:
2918 case AARCH64_OPND_SVE_Zn:
2919 case AARCH64_OPND_SVE_Zt:
2920 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2921 snprintf (buf, size, "z%d", opnd->reg.regno);
2922 else
2923 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2924 aarch64_get_qualifier_name (opnd->qualifier));
2925 break;
2926
2927 case AARCH64_OPND_SVE_ZnxN:
2928 case AARCH64_OPND_SVE_ZtxN:
2929 print_register_list (buf, size, opnd, "z");
2930 break;
2931
2932 case AARCH64_OPND_SVE_Zn_INDEX:
2933 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2934 aarch64_get_qualifier_name (opnd->qualifier),
2935 opnd->reglane.index);
2936 break;
2937
2938 case AARCH64_OPND_Cn:
2939 case AARCH64_OPND_Cm:
2940 snprintf (buf, size, "C%d", opnd->reg.regno);
2941 break;
2942
2943 case AARCH64_OPND_IDX:
2944 case AARCH64_OPND_IMM:
2945 case AARCH64_OPND_WIDTH:
2946 case AARCH64_OPND_UIMM3_OP1:
2947 case AARCH64_OPND_UIMM3_OP2:
2948 case AARCH64_OPND_BIT_NUM:
2949 case AARCH64_OPND_IMM_VLSL:
2950 case AARCH64_OPND_IMM_VLSR:
2951 case AARCH64_OPND_SHLL_IMM:
2952 case AARCH64_OPND_IMM0:
2953 case AARCH64_OPND_IMMR:
2954 case AARCH64_OPND_IMMS:
2955 case AARCH64_OPND_FBITS:
2956 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2957 break;
2958
2959 case AARCH64_OPND_SVE_PATTERN:
2960 if (optional_operand_p (opcode, idx)
2961 && opnd->imm.value == get_optional_operand_default_value (opcode))
2962 break;
2963 enum_value = opnd->imm.value;
2964 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2965 if (aarch64_sve_pattern_array[enum_value])
2966 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
2967 else
2968 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2969 break;
2970
2971 case AARCH64_OPND_SVE_PATTERN_SCALED:
2972 if (optional_operand_p (opcode, idx)
2973 && !opnd->shifter.operator_present
2974 && opnd->imm.value == get_optional_operand_default_value (opcode))
2975 break;
2976 enum_value = opnd->imm.value;
2977 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2978 if (aarch64_sve_pattern_array[opnd->imm.value])
2979 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
2980 else
2981 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2982 if (opnd->shifter.operator_present)
2983 {
2984 size_t len = strlen (buf);
2985 snprintf (buf + len, size - len, ", %s #%" PRIi64,
2986 aarch64_operand_modifiers[opnd->shifter.kind].name,
2987 opnd->shifter.amount);
2988 }
2989 break;
2990
2991 case AARCH64_OPND_SVE_PRFOP:
2992 enum_value = opnd->imm.value;
2993 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
2994 if (aarch64_sve_prfop_array[enum_value])
2995 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
2996 else
2997 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2998 break;
2999
3000 case AARCH64_OPND_IMM_MOV:
3001 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3002 {
3003 case 4: /* e.g. MOV Wd, #<imm32>. */
3004 {
3005 int imm32 = opnd->imm.value;
3006 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3007 }
3008 break;
3009 case 8: /* e.g. MOV Xd, #<imm64>. */
3010 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3011 opnd->imm.value, opnd->imm.value);
3012 break;
3013 default: assert (0);
3014 }
3015 break;
3016
3017 case AARCH64_OPND_FPIMM0:
3018 snprintf (buf, size, "#0.0");
3019 break;
3020
3021 case AARCH64_OPND_LIMM:
3022 case AARCH64_OPND_AIMM:
3023 case AARCH64_OPND_HALF:
3024 if (opnd->shifter.amount)
3025 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3026 opnd->shifter.amount);
3027 else
3028 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3029 break;
3030
3031 case AARCH64_OPND_SIMD_IMM:
3032 case AARCH64_OPND_SIMD_IMM_SFT:
3033 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3034 || opnd->shifter.kind == AARCH64_MOD_NONE)
3035 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3036 else
3037 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3038 aarch64_operand_modifiers[opnd->shifter.kind].name,
3039 opnd->shifter.amount);
3040 break;
3041
3042 case AARCH64_OPND_FPIMM:
3043 case AARCH64_OPND_SIMD_FPIMM:
3044 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3045 {
3046 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3047 {
3048 half_conv_t c;
3049 c.i = expand_fp_imm (2, opnd->imm.value);
3050 snprintf (buf, size, "#%.18e", c.f);
3051 }
3052 break;
3053 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3054 {
3055 single_conv_t c;
3056 c.i = expand_fp_imm (4, opnd->imm.value);
3057 snprintf (buf, size, "#%.18e", c.f);
3058 }
3059 break;
3060 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3061 {
3062 double_conv_t c;
3063 c.i = expand_fp_imm (8, opnd->imm.value);
3064 snprintf (buf, size, "#%.18e", c.d);
3065 }
3066 break;
3067 default: assert (0);
3068 }
3069 break;
3070
3071 case AARCH64_OPND_CCMP_IMM:
3072 case AARCH64_OPND_NZCV:
3073 case AARCH64_OPND_EXCEPTION:
3074 case AARCH64_OPND_UIMM4:
3075 case AARCH64_OPND_UIMM7:
3076 if (optional_operand_p (opcode, idx) == TRUE
3077 && (opnd->imm.value ==
3078 (int64_t) get_optional_operand_default_value (opcode)))
3079 /* Omit the operand, e.g. DCPS1. */
3080 break;
3081 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3082 break;
3083
3084 case AARCH64_OPND_COND:
3085 case AARCH64_OPND_COND1:
3086 snprintf (buf, size, "%s", opnd->cond->names[0]);
3087 break;
3088
3089 case AARCH64_OPND_ADDR_ADRP:
3090 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3091 + opnd->imm.value;
3092 if (pcrel_p)
3093 *pcrel_p = 1;
3094 if (address)
3095 *address = addr;
3096 /* This is not necessary during the disassembling, as print_address_func
3097 in the disassemble_info will take care of the printing. But some
3098 other callers may be still interested in getting the string in *STR,
3099 so here we do snprintf regardless. */
3100 snprintf (buf, size, "#0x%" PRIx64, addr);
3101 break;
3102
3103 case AARCH64_OPND_ADDR_PCREL14:
3104 case AARCH64_OPND_ADDR_PCREL19:
3105 case AARCH64_OPND_ADDR_PCREL21:
3106 case AARCH64_OPND_ADDR_PCREL26:
3107 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3108 if (pcrel_p)
3109 *pcrel_p = 1;
3110 if (address)
3111 *address = addr;
3112 /* This is not necessary during the disassembling, as print_address_func
3113 in the disassemble_info will take care of the printing. But some
3114 other callers may be still interested in getting the string in *STR,
3115 so here we do snprintf regardless. */
3116 snprintf (buf, size, "#0x%" PRIx64, addr);
3117 break;
3118
3119 case AARCH64_OPND_ADDR_SIMPLE:
3120 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3121 case AARCH64_OPND_SIMD_ADDR_POST:
3122 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3123 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3124 {
3125 if (opnd->addr.offset.is_reg)
3126 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3127 else
3128 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3129 }
3130 else
3131 snprintf (buf, size, "[%s]", name);
3132 break;
3133
3134 case AARCH64_OPND_ADDR_REGOFF:
3135 case AARCH64_OPND_SVE_ADDR_RR:
3136 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3137 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3138 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3139 case AARCH64_OPND_SVE_ADDR_RX:
3140 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3141 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3142 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3143 print_register_offset_address
3144 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3145 get_offset_int_reg_name (opnd));
3146 break;
3147
3148 case AARCH64_OPND_SVE_ADDR_RZ:
3149 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3150 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3151 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3152 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3153 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3154 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3155 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3156 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3157 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3158 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3159 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3160 print_register_offset_address
3161 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3162 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3163 break;
3164
3165 case AARCH64_OPND_ADDR_SIMM7:
3166 case AARCH64_OPND_ADDR_SIMM9:
3167 case AARCH64_OPND_ADDR_SIMM9_2:
3168 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3169 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3170 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3171 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3172 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3173 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3174 case AARCH64_OPND_SVE_ADDR_RI_U6:
3175 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3176 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3177 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3178 print_immediate_offset_address
3179 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3180 break;
3181
3182 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3183 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3184 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3185 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3186 print_immediate_offset_address
3187 (buf, size, opnd,
3188 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3189 break;
3190
3191 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3192 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3193 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3194 print_register_offset_address
3195 (buf, size, opnd,
3196 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3197 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3198 break;
3199
3200 case AARCH64_OPND_ADDR_UIMM12:
3201 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3202 if (opnd->addr.offset.imm)
3203 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
3204 else
3205 snprintf (buf, size, "[%s]", name);
3206 break;
3207
3208 case AARCH64_OPND_SYSREG:
3209 for (i = 0; aarch64_sys_regs[i].name; ++i)
3210 if (aarch64_sys_regs[i].value == opnd->sysreg
3211 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3212 break;
3213 if (aarch64_sys_regs[i].name)
3214 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3215 else
3216 {
3217 /* Implementation defined system register. */
3218 unsigned int value = opnd->sysreg;
3219 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3220 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3221 value & 0x7);
3222 }
3223 break;
3224
3225 case AARCH64_OPND_PSTATEFIELD:
3226 for (i = 0; aarch64_pstatefields[i].name; ++i)
3227 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3228 break;
3229 assert (aarch64_pstatefields[i].name);
3230 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3231 break;
3232
3233 case AARCH64_OPND_SYSREG_AT:
3234 case AARCH64_OPND_SYSREG_DC:
3235 case AARCH64_OPND_SYSREG_IC:
3236 case AARCH64_OPND_SYSREG_TLBI:
3237 snprintf (buf, size, "%s", opnd->sysins_op->name);
3238 break;
3239
3240 case AARCH64_OPND_BARRIER:
3241 snprintf (buf, size, "%s", opnd->barrier->name);
3242 break;
3243
3244 case AARCH64_OPND_BARRIER_ISB:
3245 /* Operand can be omitted, e.g. in DCPS1. */
3246 if (! optional_operand_p (opcode, idx)
3247 || (opnd->barrier->value
3248 != get_optional_operand_default_value (opcode)))
3249 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3250 break;
3251
3252 case AARCH64_OPND_PRFOP:
3253 if (opnd->prfop->name != NULL)
3254 snprintf (buf, size, "%s", opnd->prfop->name);
3255 else
3256 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3257 break;
3258
3259 case AARCH64_OPND_BARRIER_PSB:
3260 snprintf (buf, size, "%s", opnd->hint_option->name);
3261 break;
3262
3263 default:
3264 assert (0);
3265 }
3266 }
3267 \f
3268 #define CPENC(op0,op1,crn,crm,op2) \
3269 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3270 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3271 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3272 /* for 3.9.10 System Instructions */
3273 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3274
3275 #define C0 0
3276 #define C1 1
3277 #define C2 2
3278 #define C3 3
3279 #define C4 4
3280 #define C5 5
3281 #define C6 6
3282 #define C7 7
3283 #define C8 8
3284 #define C9 9
3285 #define C10 10
3286 #define C11 11
3287 #define C12 12
3288 #define C13 13
3289 #define C14 14
3290 #define C15 15
3291
3292 #ifdef F_DEPRECATED
3293 #undef F_DEPRECATED
3294 #endif
3295 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3296
3297 #ifdef F_ARCHEXT
3298 #undef F_ARCHEXT
3299 #endif
3300 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3301
3302 #ifdef F_HASXT
3303 #undef F_HASXT
3304 #endif
3305 #define F_HASXT 0x4 /* System instruction register <Xt>
3306 operand. */
3307
3308
3309 /* TODO there are two more issues need to be resolved
3310 1. handle read-only and write-only system registers
3311 2. handle cpu-implementation-defined system registers. */
3312 const aarch64_sys_reg aarch64_sys_regs [] =
3313 {
3314 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3315 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3316 { "elr_el1", CPEN_(0,C0,1), 0 },
3317 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3318 { "sp_el0", CPEN_(0,C1,0), 0 },
3319 { "spsel", CPEN_(0,C2,0), 0 },
3320 { "daif", CPEN_(3,C2,1), 0 },
3321 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3322 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3323 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3324 { "nzcv", CPEN_(3,C2,0), 0 },
3325 { "fpcr", CPEN_(3,C4,0), 0 },
3326 { "fpsr", CPEN_(3,C4,1), 0 },
3327 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3328 { "dlr_el0", CPEN_(3,C5,1), 0 },
3329 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3330 { "elr_el2", CPEN_(4,C0,1), 0 },
3331 { "sp_el1", CPEN_(4,C1,0), 0 },
3332 { "spsr_irq", CPEN_(4,C3,0), 0 },
3333 { "spsr_abt", CPEN_(4,C3,1), 0 },
3334 { "spsr_und", CPEN_(4,C3,2), 0 },
3335 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3336 { "spsr_el3", CPEN_(6,C0,0), 0 },
3337 { "elr_el3", CPEN_(6,C0,1), 0 },
3338 { "sp_el2", CPEN_(6,C1,0), 0 },
3339 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3340 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3341 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3342 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3343 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3344 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3345 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3346 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3347 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3348 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3349 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3350 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3351 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3352 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3353 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3354 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3355 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3356 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3357 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3358 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3359 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3360 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3361 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3362 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3363 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3364 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3365 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3366 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3367 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3368 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3369 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3370 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3371 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3372 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3373 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3374 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3375 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3376 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3377 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3378 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3379 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3380 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3381 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3382 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3383 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3384 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3385 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3386 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3387 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3388 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3389 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3390 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3391 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3392 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3393 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3394 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3395 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3396 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3397 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3398 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3399 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3400 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3401 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3402 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3403 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3404 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3405 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3406 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3407 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3408 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3409 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3410 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3411 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3412 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3413 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3414 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3415 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3416 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3417 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3418 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3419 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3420 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3421 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3422 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3423 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3424 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3425 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3426 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3427 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3428 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3429 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3430 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3431 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3432 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3433 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3434 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3435 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3436 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3437 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3438 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3439 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3440 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3441 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3442 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3443 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3444 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3445 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3446 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3447 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3448 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3449 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3450 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3451 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3452 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3453 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3454 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3455 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3456 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3457 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3458 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3459 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3460 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3461 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3462 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3463 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3464 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3465 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3466 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3467 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3468 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3469 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3470 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3471 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3472 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3473 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3474 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3475 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3476 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3477 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3478 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3479 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3480 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3481 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3482 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3483 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3484 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3485 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3486 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3487 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3488 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3489 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3490 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3491 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3492 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3493 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3494 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3495 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3496 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3497 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3498 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3499 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3500 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3501 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3502 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3503 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3504 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3505 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3506 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3507 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3508 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3509 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3510 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3511 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3512 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3513 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3514 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3515 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3516 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3517 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3518 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3519 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3520 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3521 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3522 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3523 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3524 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3525 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3526 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3527 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3528 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3529 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3530 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3531 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3532 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3533 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3534 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3535 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3536 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3537 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3538 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3539 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3540 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3541 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3542 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3543 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3544 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3545 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3546 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3547 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3548 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3549 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3550 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3551 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3552 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3553 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3554 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3555 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3556 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3557 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3558 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3559 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3560 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3561 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3562 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3563 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3564 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3565 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3566 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3567 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3568 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3569 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3570 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3571 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3572 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3573 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3574 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3575 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3576 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3577 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3578 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3579 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3580 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3581 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3582 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3583 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3584 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3585 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3586 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3587 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3588 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3589 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3590 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3591 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3592 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3593 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3594 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3595 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3596 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3597 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3598 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3599 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3600 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3601 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3602 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3603 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3604 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3605 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3606 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3607 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3608 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3609 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3610 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3611 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3612 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3613 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3614 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3615 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3616 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3617 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3618 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3619 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3620 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3621 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3622 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3623 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3624 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3625 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3626 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3627 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3628 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3629 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3630 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3631 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3632 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3633 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3634 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3635 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3636 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3637 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3638 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3639 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3640 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3641 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3642 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3643 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3644 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3645 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3646 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3647 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3648 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3649 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3650 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3651 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3652 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3653 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3654 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3655 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3656 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3657 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3658 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3659 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3660 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3661 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3662 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3663 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3664 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3665 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3666 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3667 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3668 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3669 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3670 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3671 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3672 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3673 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3674 { 0, CPENC(0,0,0,0,0), 0 },
3675 };
3676
3677 bfd_boolean
3678 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3679 {
3680 return (reg->flags & F_DEPRECATED) != 0;
3681 }
3682
3683 bfd_boolean
3684 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3685 const aarch64_sys_reg *reg)
3686 {
3687 if (!(reg->flags & F_ARCHEXT))
3688 return TRUE;
3689
3690 /* PAN. Values are from aarch64_sys_regs. */
3691 if (reg->value == CPEN_(0,C2,3)
3692 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3693 return FALSE;
3694
3695 /* Virtualization host extensions: system registers. */
3696 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3697 || reg->value == CPENC (3, 4, C13, C0, 1)
3698 || reg->value == CPENC (3, 4, C14, C3, 0)
3699 || reg->value == CPENC (3, 4, C14, C3, 1)
3700 || reg->value == CPENC (3, 4, C14, C3, 2))
3701 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3702 return FALSE;
3703
3704 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3705 if ((reg->value == CPEN_ (5, C0, 0)
3706 || reg->value == CPEN_ (5, C0, 1)
3707 || reg->value == CPENC (3, 5, C1, C0, 0)
3708 || reg->value == CPENC (3, 5, C1, C0, 2)
3709 || reg->value == CPENC (3, 5, C2, C0, 0)
3710 || reg->value == CPENC (3, 5, C2, C0, 1)
3711 || reg->value == CPENC (3, 5, C2, C0, 2)
3712 || reg->value == CPENC (3, 5, C5, C1, 0)
3713 || reg->value == CPENC (3, 5, C5, C1, 1)
3714 || reg->value == CPENC (3, 5, C5, C2, 0)
3715 || reg->value == CPENC (3, 5, C6, C0, 0)
3716 || reg->value == CPENC (3, 5, C10, C2, 0)
3717 || reg->value == CPENC (3, 5, C10, C3, 0)
3718 || reg->value == CPENC (3, 5, C12, C0, 0)
3719 || reg->value == CPENC (3, 5, C13, C0, 1)
3720 || reg->value == CPENC (3, 5, C14, C1, 0))
3721 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3722 return FALSE;
3723
3724 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3725 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3726 || reg->value == CPENC (3, 5, C14, C2, 1)
3727 || reg->value == CPENC (3, 5, C14, C2, 2)
3728 || reg->value == CPENC (3, 5, C14, C3, 0)
3729 || reg->value == CPENC (3, 5, C14, C3, 1)
3730 || reg->value == CPENC (3, 5, C14, C3, 2))
3731 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3732 return FALSE;
3733
3734 /* ARMv8.2 features. */
3735
3736 /* ID_AA64MMFR2_EL1. */
3737 if (reg->value == CPENC (3, 0, C0, C7, 2)
3738 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3739 return FALSE;
3740
3741 /* PSTATE.UAO. */
3742 if (reg->value == CPEN_ (0, C2, 4)
3743 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3744 return FALSE;
3745
3746 /* RAS extension. */
3747
3748 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3749 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3750 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3751 || reg->value == CPENC (3, 0, C5, C3, 1)
3752 || reg->value == CPENC (3, 0, C5, C3, 2)
3753 || reg->value == CPENC (3, 0, C5, C3, 3)
3754 || reg->value == CPENC (3, 0, C5, C4, 0)
3755 || reg->value == CPENC (3, 0, C5, C4, 1)
3756 || reg->value == CPENC (3, 0, C5, C4, 2)
3757 || reg->value == CPENC (3, 0, C5, C4, 3)
3758 || reg->value == CPENC (3, 0, C5, C5, 0)
3759 || reg->value == CPENC (3, 0, C5, C5, 1))
3760 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3761 return FALSE;
3762
3763 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3764 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3765 || reg->value == CPENC (3, 0, C12, C1, 1)
3766 || reg->value == CPENC (3, 4, C12, C1, 1))
3767 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3768 return FALSE;
3769
3770 /* Statistical Profiling extension. */
3771 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3772 || reg->value == CPENC (3, 0, C9, C10, 1)
3773 || reg->value == CPENC (3, 0, C9, C10, 3)
3774 || reg->value == CPENC (3, 0, C9, C10, 7)
3775 || reg->value == CPENC (3, 0, C9, C9, 0)
3776 || reg->value == CPENC (3, 0, C9, C9, 2)
3777 || reg->value == CPENC (3, 0, C9, C9, 3)
3778 || reg->value == CPENC (3, 0, C9, C9, 4)
3779 || reg->value == CPENC (3, 0, C9, C9, 5)
3780 || reg->value == CPENC (3, 0, C9, C9, 6)
3781 || reg->value == CPENC (3, 0, C9, C9, 7)
3782 || reg->value == CPENC (3, 4, C9, C9, 0)
3783 || reg->value == CPENC (3, 5, C9, C9, 0))
3784 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3785 return FALSE;
3786
3787 return TRUE;
3788 }
3789
3790 const aarch64_sys_reg aarch64_pstatefields [] =
3791 {
3792 { "spsel", 0x05, 0 },
3793 { "daifset", 0x1e, 0 },
3794 { "daifclr", 0x1f, 0 },
3795 { "pan", 0x04, F_ARCHEXT },
3796 { "uao", 0x03, F_ARCHEXT },
3797 { 0, CPENC(0,0,0,0,0), 0 },
3798 };
3799
3800 bfd_boolean
3801 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3802 const aarch64_sys_reg *reg)
3803 {
3804 if (!(reg->flags & F_ARCHEXT))
3805 return TRUE;
3806
3807 /* PAN. Values are from aarch64_pstatefields. */
3808 if (reg->value == 0x04
3809 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3810 return FALSE;
3811
3812 /* UAO. Values are from aarch64_pstatefields. */
3813 if (reg->value == 0x03
3814 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3815 return FALSE;
3816
3817 return TRUE;
3818 }
3819
3820 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3821 {
3822 { "ialluis", CPENS(0,C7,C1,0), 0 },
3823 { "iallu", CPENS(0,C7,C5,0), 0 },
3824 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3825 { 0, CPENS(0,0,0,0), 0 }
3826 };
3827
3828 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3829 {
3830 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3831 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3832 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3833 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3834 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3835 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3836 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3837 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3838 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3839 { 0, CPENS(0,0,0,0), 0 }
3840 };
3841
3842 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3843 {
3844 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3845 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3846 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3847 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3848 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3849 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3850 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3851 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3852 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3853 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3854 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3855 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3856 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3857 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3858 { 0, CPENS(0,0,0,0), 0 }
3859 };
3860
3861 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3862 {
3863 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3864 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3865 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3866 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3867 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3868 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3869 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3870 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3871 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3872 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3873 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3874 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3875 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3876 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3877 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3878 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3879 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3880 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3881 { "alle2", CPENS(4,C8,C7,0), 0 },
3882 { "alle2is", CPENS(4,C8,C3,0), 0 },
3883 { "alle1", CPENS(4,C8,C7,4), 0 },
3884 { "alle1is", CPENS(4,C8,C3,4), 0 },
3885 { "alle3", CPENS(6,C8,C7,0), 0 },
3886 { "alle3is", CPENS(6,C8,C3,0), 0 },
3887 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3888 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3889 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3890 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3891 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3892 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3893 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3894 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3895 { 0, CPENS(0,0,0,0), 0 }
3896 };
3897
3898 bfd_boolean
3899 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3900 {
3901 return (sys_ins_reg->flags & F_HASXT) != 0;
3902 }
3903
3904 extern bfd_boolean
3905 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3906 const aarch64_sys_ins_reg *reg)
3907 {
3908 if (!(reg->flags & F_ARCHEXT))
3909 return TRUE;
3910
3911 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3912 if (reg->value == CPENS (3, C7, C12, 1)
3913 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3914 return FALSE;
3915
3916 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3917 if ((reg->value == CPENS (0, C7, C9, 0)
3918 || reg->value == CPENS (0, C7, C9, 1))
3919 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3920 return FALSE;
3921
3922 return TRUE;
3923 }
3924
3925 #undef C0
3926 #undef C1
3927 #undef C2
3928 #undef C3
3929 #undef C4
3930 #undef C5
3931 #undef C6
3932 #undef C7
3933 #undef C8
3934 #undef C9
3935 #undef C10
3936 #undef C11
3937 #undef C12
3938 #undef C13
3939 #undef C14
3940 #undef C15
3941
3942 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3943 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3944
3945 static bfd_boolean
3946 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3947 const aarch64_insn insn)
3948 {
3949 int t = BITS (insn, 4, 0);
3950 int n = BITS (insn, 9, 5);
3951 int t2 = BITS (insn, 14, 10);
3952
3953 if (BIT (insn, 23))
3954 {
3955 /* Write back enabled. */
3956 if ((t == n || t2 == n) && n != 31)
3957 return FALSE;
3958 }
3959
3960 if (BIT (insn, 22))
3961 {
3962 /* Load */
3963 if (t == t2)
3964 return FALSE;
3965 }
3966
3967 return TRUE;
3968 }
3969
3970 /* Include the opcode description table as well as the operand description
3971 table. */
3972 #define VERIFIER(x) verify_##x
3973 #include "aarch64-tbl.h"
This page took 0.251606 seconds and 5 git commands to generate.