326b94e71e956980554b22bdaab1247832f2b3c6
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
244 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
245 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
246 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
247 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
248 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
249 { 5, 14 }, /* imm14: in test bit and branch instructions. */
250 { 5, 16 }, /* imm16: in exception instructions. */
251 { 0, 26 }, /* imm26: in unconditional branch instructions. */
252 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
253 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
254 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
255 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
256 { 22, 1 }, /* N: in logical (immediate) instructions. */
257 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
258 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
259 { 31, 1 }, /* sf: in integer data processing instructions. */
260 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
261 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
262 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
263 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
264 { 31, 1 }, /* b5: in the test bit and branch instructions. */
265 { 19, 5 }, /* b40: in the test bit and branch instructions. */
266 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
267 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
268 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
269 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
270 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
271 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
272 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
273 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
274 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
275 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
276 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
277 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
278 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
279 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
280 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
281 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
282 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
283 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
284 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
285 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
286 };
287
288 enum aarch64_operand_class
289 aarch64_get_operand_class (enum aarch64_opnd type)
290 {
291 return aarch64_operands[type].op_class;
292 }
293
294 const char *
295 aarch64_get_operand_name (enum aarch64_opnd type)
296 {
297 return aarch64_operands[type].name;
298 }
299
300 /* Get operand description string.
301 This is usually for the diagnosis purpose. */
302 const char *
303 aarch64_get_operand_desc (enum aarch64_opnd type)
304 {
305 return aarch64_operands[type].desc;
306 }
307
308 /* Table of all conditional affixes. */
309 const aarch64_cond aarch64_conds[16] =
310 {
311 {{"eq"}, 0x0},
312 {{"ne"}, 0x1},
313 {{"cs", "hs"}, 0x2},
314 {{"cc", "lo", "ul"}, 0x3},
315 {{"mi"}, 0x4},
316 {{"pl"}, 0x5},
317 {{"vs"}, 0x6},
318 {{"vc"}, 0x7},
319 {{"hi"}, 0x8},
320 {{"ls"}, 0x9},
321 {{"ge"}, 0xa},
322 {{"lt"}, 0xb},
323 {{"gt"}, 0xc},
324 {{"le"}, 0xd},
325 {{"al"}, 0xe},
326 {{"nv"}, 0xf},
327 };
328
329 const aarch64_cond *
330 get_cond_from_value (aarch64_insn value)
331 {
332 assert (value < 16);
333 return &aarch64_conds[(unsigned int) value];
334 }
335
336 const aarch64_cond *
337 get_inverted_cond (const aarch64_cond *cond)
338 {
339 return &aarch64_conds[cond->value ^ 0x1];
340 }
341
342 /* Table describing the operand extension/shifting operators; indexed by
343 enum aarch64_modifier_kind.
344
345 The value column provides the most common values for encoding modifiers,
346 which enables table-driven encoding/decoding for the modifiers. */
347 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
348 {
349 {"none", 0x0},
350 {"msl", 0x0},
351 {"ror", 0x3},
352 {"asr", 0x2},
353 {"lsr", 0x1},
354 {"lsl", 0x0},
355 {"uxtb", 0x0},
356 {"uxth", 0x1},
357 {"uxtw", 0x2},
358 {"uxtx", 0x3},
359 {"sxtb", 0x4},
360 {"sxth", 0x5},
361 {"sxtw", 0x6},
362 {"sxtx", 0x7},
363 {"mul", 0x0},
364 {NULL, 0},
365 };
366
367 enum aarch64_modifier_kind
368 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
369 {
370 return desc - aarch64_operand_modifiers;
371 }
372
373 aarch64_insn
374 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
375 {
376 return aarch64_operand_modifiers[kind].value;
377 }
378
379 enum aarch64_modifier_kind
380 aarch64_get_operand_modifier_from_value (aarch64_insn value,
381 bfd_boolean extend_p)
382 {
383 if (extend_p == TRUE)
384 return AARCH64_MOD_UXTB + value;
385 else
386 return AARCH64_MOD_LSL - value;
387 }
388
389 bfd_boolean
390 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
391 {
392 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
393 ? TRUE : FALSE;
394 }
395
396 static inline bfd_boolean
397 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
398 {
399 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
400 ? TRUE : FALSE;
401 }
402
403 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
404 {
405 { "#0x00", 0x0 },
406 { "oshld", 0x1 },
407 { "oshst", 0x2 },
408 { "osh", 0x3 },
409 { "#0x04", 0x4 },
410 { "nshld", 0x5 },
411 { "nshst", 0x6 },
412 { "nsh", 0x7 },
413 { "#0x08", 0x8 },
414 { "ishld", 0x9 },
415 { "ishst", 0xa },
416 { "ish", 0xb },
417 { "#0x0c", 0xc },
418 { "ld", 0xd },
419 { "st", 0xe },
420 { "sy", 0xf },
421 };
422
423 /* Table describing the operands supported by the aliases of the HINT
424 instruction.
425
426 The name column is the operand that is accepted for the alias. The value
427 column is the hint number of the alias. The list of operands is terminated
428 by NULL in the name column. */
429
430 const struct aarch64_name_value_pair aarch64_hint_options[] =
431 {
432 { "csync", 0x11 }, /* PSB CSYNC. */
433 { NULL, 0x0 },
434 };
435
436 /* op -> op: load = 0 instruction = 1 store = 2
437 l -> level: 1-3
438 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
439 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
440 const struct aarch64_name_value_pair aarch64_prfops[32] =
441 {
442 { "pldl1keep", B(0, 1, 0) },
443 { "pldl1strm", B(0, 1, 1) },
444 { "pldl2keep", B(0, 2, 0) },
445 { "pldl2strm", B(0, 2, 1) },
446 { "pldl3keep", B(0, 3, 0) },
447 { "pldl3strm", B(0, 3, 1) },
448 { NULL, 0x06 },
449 { NULL, 0x07 },
450 { "plil1keep", B(1, 1, 0) },
451 { "plil1strm", B(1, 1, 1) },
452 { "plil2keep", B(1, 2, 0) },
453 { "plil2strm", B(1, 2, 1) },
454 { "plil3keep", B(1, 3, 0) },
455 { "plil3strm", B(1, 3, 1) },
456 { NULL, 0x0e },
457 { NULL, 0x0f },
458 { "pstl1keep", B(2, 1, 0) },
459 { "pstl1strm", B(2, 1, 1) },
460 { "pstl2keep", B(2, 2, 0) },
461 { "pstl2strm", B(2, 2, 1) },
462 { "pstl3keep", B(2, 3, 0) },
463 { "pstl3strm", B(2, 3, 1) },
464 { NULL, 0x16 },
465 { NULL, 0x17 },
466 { NULL, 0x18 },
467 { NULL, 0x19 },
468 { NULL, 0x1a },
469 { NULL, 0x1b },
470 { NULL, 0x1c },
471 { NULL, 0x1d },
472 { NULL, 0x1e },
473 { NULL, 0x1f },
474 };
475 #undef B
476 \f
477 /* Utilities on value constraint. */
478
479 static inline int
480 value_in_range_p (int64_t value, int low, int high)
481 {
482 return (value >= low && value <= high) ? 1 : 0;
483 }
484
485 static inline int
486 value_aligned_p (int64_t value, int align)
487 {
488 return ((value & (align - 1)) == 0) ? 1 : 0;
489 }
490
491 /* A signed value fits in a field. */
492 static inline int
493 value_fit_signed_field_p (int64_t value, unsigned width)
494 {
495 assert (width < 32);
496 if (width < sizeof (value) * 8)
497 {
498 int64_t lim = (int64_t)1 << (width - 1);
499 if (value >= -lim && value < lim)
500 return 1;
501 }
502 return 0;
503 }
504
505 /* An unsigned value fits in a field. */
506 static inline int
507 value_fit_unsigned_field_p (int64_t value, unsigned width)
508 {
509 assert (width < 32);
510 if (width < sizeof (value) * 8)
511 {
512 int64_t lim = (int64_t)1 << width;
513 if (value >= 0 && value < lim)
514 return 1;
515 }
516 return 0;
517 }
518
519 /* Return 1 if OPERAND is SP or WSP. */
520 int
521 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
522 {
523 return ((aarch64_get_operand_class (operand->type)
524 == AARCH64_OPND_CLASS_INT_REG)
525 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
526 && operand->reg.regno == 31);
527 }
528
529 /* Return 1 if OPERAND is XZR or WZP. */
530 int
531 aarch64_zero_register_p (const aarch64_opnd_info *operand)
532 {
533 return ((aarch64_get_operand_class (operand->type)
534 == AARCH64_OPND_CLASS_INT_REG)
535 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
536 && operand->reg.regno == 31);
537 }
538
539 /* Return true if the operand *OPERAND that has the operand code
540 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
541 qualified by the qualifier TARGET. */
542
543 static inline int
544 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
545 aarch64_opnd_qualifier_t target)
546 {
547 switch (operand->qualifier)
548 {
549 case AARCH64_OPND_QLF_W:
550 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
551 return 1;
552 break;
553 case AARCH64_OPND_QLF_X:
554 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
555 return 1;
556 break;
557 case AARCH64_OPND_QLF_WSP:
558 if (target == AARCH64_OPND_QLF_W
559 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
560 return 1;
561 break;
562 case AARCH64_OPND_QLF_SP:
563 if (target == AARCH64_OPND_QLF_X
564 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
565 return 1;
566 break;
567 default:
568 break;
569 }
570
571 return 0;
572 }
573
574 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
575 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
576
577 Return NIL if more than one expected qualifiers are found. */
578
579 aarch64_opnd_qualifier_t
580 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
581 int idx,
582 const aarch64_opnd_qualifier_t known_qlf,
583 int known_idx)
584 {
585 int i, saved_i;
586
587 /* Special case.
588
589 When the known qualifier is NIL, we have to assume that there is only
590 one qualifier sequence in the *QSEQ_LIST and return the corresponding
591 qualifier directly. One scenario is that for instruction
592 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
593 which has only one possible valid qualifier sequence
594 NIL, S_D
595 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
596 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
597
598 Because the qualifier NIL has dual roles in the qualifier sequence:
599 it can mean no qualifier for the operand, or the qualifer sequence is
600 not in use (when all qualifiers in the sequence are NILs), we have to
601 handle this special case here. */
602 if (known_qlf == AARCH64_OPND_NIL)
603 {
604 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
605 return qseq_list[0][idx];
606 }
607
608 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
609 {
610 if (qseq_list[i][known_idx] == known_qlf)
611 {
612 if (saved_i != -1)
613 /* More than one sequences are found to have KNOWN_QLF at
614 KNOWN_IDX. */
615 return AARCH64_OPND_NIL;
616 saved_i = i;
617 }
618 }
619
620 return qseq_list[saved_i][idx];
621 }
622
623 enum operand_qualifier_kind
624 {
625 OQK_NIL,
626 OQK_OPD_VARIANT,
627 OQK_VALUE_IN_RANGE,
628 OQK_MISC,
629 };
630
631 /* Operand qualifier description. */
632 struct operand_qualifier_data
633 {
634 /* The usage of the three data fields depends on the qualifier kind. */
635 int data0;
636 int data1;
637 int data2;
638 /* Description. */
639 const char *desc;
640 /* Kind. */
641 enum operand_qualifier_kind kind;
642 };
643
644 /* Indexed by the operand qualifier enumerators. */
645 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
646 {
647 {0, 0, 0, "NIL", OQK_NIL},
648
649 /* Operand variant qualifiers.
650 First 3 fields:
651 element size, number of elements and common value for encoding. */
652
653 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
654 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
655 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
656 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
657
658 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
659 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
660 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
661 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
662 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
663
664 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
665 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
666 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
667 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
668 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
669 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
670 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
671 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
672 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
673 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
674
675 {0, 0, 0, "z", OQK_OPD_VARIANT},
676 {0, 0, 0, "m", OQK_OPD_VARIANT},
677
678 /* Qualifiers constraining the value range.
679 First 3 fields:
680 Lower bound, higher bound, unused. */
681
682 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
683 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
684 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
685 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
686 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
687 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
688
689 /* Qualifiers for miscellaneous purpose.
690 First 3 fields:
691 unused, unused and unused. */
692
693 {0, 0, 0, "lsl", 0},
694 {0, 0, 0, "msl", 0},
695
696 {0, 0, 0, "retrieving", 0},
697 };
698
699 static inline bfd_boolean
700 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
701 {
702 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
703 ? TRUE : FALSE;
704 }
705
706 static inline bfd_boolean
707 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
708 {
709 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
710 ? TRUE : FALSE;
711 }
712
713 const char*
714 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
715 {
716 return aarch64_opnd_qualifiers[qualifier].desc;
717 }
718
719 /* Given an operand qualifier, return the expected data element size
720 of a qualified operand. */
721 unsigned char
722 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
723 {
724 assert (operand_variant_qualifier_p (qualifier) == TRUE);
725 return aarch64_opnd_qualifiers[qualifier].data0;
726 }
727
728 unsigned char
729 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
730 {
731 assert (operand_variant_qualifier_p (qualifier) == TRUE);
732 return aarch64_opnd_qualifiers[qualifier].data1;
733 }
734
735 aarch64_insn
736 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
737 {
738 assert (operand_variant_qualifier_p (qualifier) == TRUE);
739 return aarch64_opnd_qualifiers[qualifier].data2;
740 }
741
742 static int
743 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
744 {
745 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
746 return aarch64_opnd_qualifiers[qualifier].data0;
747 }
748
749 static int
750 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
751 {
752 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
753 return aarch64_opnd_qualifiers[qualifier].data1;
754 }
755
756 #ifdef DEBUG_AARCH64
757 void
758 aarch64_verbose (const char *str, ...)
759 {
760 va_list ap;
761 va_start (ap, str);
762 printf ("#### ");
763 vprintf (str, ap);
764 printf ("\n");
765 va_end (ap);
766 }
767
768 static inline void
769 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
770 {
771 int i;
772 printf ("#### \t");
773 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
774 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
775 printf ("\n");
776 }
777
778 static void
779 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
780 const aarch64_opnd_qualifier_t *qualifier)
781 {
782 int i;
783 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
784
785 aarch64_verbose ("dump_match_qualifiers:");
786 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
787 curr[i] = opnd[i].qualifier;
788 dump_qualifier_sequence (curr);
789 aarch64_verbose ("against");
790 dump_qualifier_sequence (qualifier);
791 }
792 #endif /* DEBUG_AARCH64 */
793
794 /* TODO improve this, we can have an extra field at the runtime to
795 store the number of operands rather than calculating it every time. */
796
797 int
798 aarch64_num_of_operands (const aarch64_opcode *opcode)
799 {
800 int i = 0;
801 const enum aarch64_opnd *opnds = opcode->operands;
802 while (opnds[i++] != AARCH64_OPND_NIL)
803 ;
804 --i;
805 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
806 return i;
807 }
808
809 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
810 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
811
812 N.B. on the entry, it is very likely that only some operands in *INST
813 have had their qualifiers been established.
814
815 If STOP_AT is not -1, the function will only try to match
816 the qualifier sequence for operands before and including the operand
817 of index STOP_AT; and on success *RET will only be filled with the first
818 (STOP_AT+1) qualifiers.
819
820 A couple examples of the matching algorithm:
821
822 X,W,NIL should match
823 X,W,NIL
824
825 NIL,NIL should match
826 X ,NIL
827
828 Apart from serving the main encoding routine, this can also be called
829 during or after the operand decoding. */
830
831 int
832 aarch64_find_best_match (const aarch64_inst *inst,
833 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
834 int stop_at, aarch64_opnd_qualifier_t *ret)
835 {
836 int found = 0;
837 int i, num_opnds;
838 const aarch64_opnd_qualifier_t *qualifiers;
839
840 num_opnds = aarch64_num_of_operands (inst->opcode);
841 if (num_opnds == 0)
842 {
843 DEBUG_TRACE ("SUCCEED: no operand");
844 return 1;
845 }
846
847 if (stop_at < 0 || stop_at >= num_opnds)
848 stop_at = num_opnds - 1;
849
850 /* For each pattern. */
851 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
852 {
853 int j;
854 qualifiers = *qualifiers_list;
855
856 /* Start as positive. */
857 found = 1;
858
859 DEBUG_TRACE ("%d", i);
860 #ifdef DEBUG_AARCH64
861 if (debug_dump)
862 dump_match_qualifiers (inst->operands, qualifiers);
863 #endif
864
865 /* Most opcodes has much fewer patterns in the list.
866 First NIL qualifier indicates the end in the list. */
867 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
868 {
869 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
870 if (i)
871 found = 0;
872 break;
873 }
874
875 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
876 {
877 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
878 {
879 /* Either the operand does not have qualifier, or the qualifier
880 for the operand needs to be deduced from the qualifier
881 sequence.
882 In the latter case, any constraint checking related with
883 the obtained qualifier should be done later in
884 operand_general_constraint_met_p. */
885 continue;
886 }
887 else if (*qualifiers != inst->operands[j].qualifier)
888 {
889 /* Unless the target qualifier can also qualify the operand
890 (which has already had a non-nil qualifier), non-equal
891 qualifiers are generally un-matched. */
892 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
893 continue;
894 else
895 {
896 found = 0;
897 break;
898 }
899 }
900 else
901 continue; /* Equal qualifiers are certainly matched. */
902 }
903
904 /* Qualifiers established. */
905 if (found == 1)
906 break;
907 }
908
909 if (found == 1)
910 {
911 /* Fill the result in *RET. */
912 int j;
913 qualifiers = *qualifiers_list;
914
915 DEBUG_TRACE ("complete qualifiers using list %d", i);
916 #ifdef DEBUG_AARCH64
917 if (debug_dump)
918 dump_qualifier_sequence (qualifiers);
919 #endif
920
921 for (j = 0; j <= stop_at; ++j, ++qualifiers)
922 ret[j] = *qualifiers;
923 for (; j < AARCH64_MAX_OPND_NUM; ++j)
924 ret[j] = AARCH64_OPND_QLF_NIL;
925
926 DEBUG_TRACE ("SUCCESS");
927 return 1;
928 }
929
930 DEBUG_TRACE ("FAIL");
931 return 0;
932 }
933
934 /* Operand qualifier matching and resolving.
935
936 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
937 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
938
939 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
940 succeeds. */
941
942 static int
943 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
944 {
945 int i, nops;
946 aarch64_opnd_qualifier_seq_t qualifiers;
947
948 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
949 qualifiers))
950 {
951 DEBUG_TRACE ("matching FAIL");
952 return 0;
953 }
954
955 if (inst->opcode->flags & F_STRICT)
956 {
957 /* Require an exact qualifier match, even for NIL qualifiers. */
958 nops = aarch64_num_of_operands (inst->opcode);
959 for (i = 0; i < nops; ++i)
960 if (inst->operands[i].qualifier != qualifiers[i])
961 return FALSE;
962 }
963
964 /* Update the qualifiers. */
965 if (update_p == TRUE)
966 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
967 {
968 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
969 break;
970 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
971 "update %s with %s for operand %d",
972 aarch64_get_qualifier_name (inst->operands[i].qualifier),
973 aarch64_get_qualifier_name (qualifiers[i]), i);
974 inst->operands[i].qualifier = qualifiers[i];
975 }
976
977 DEBUG_TRACE ("matching SUCCESS");
978 return 1;
979 }
980
981 /* Return TRUE if VALUE is a wide constant that can be moved into a general
982 register by MOVZ.
983
984 IS32 indicates whether value is a 32-bit immediate or not.
985 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
986 amount will be returned in *SHIFT_AMOUNT. */
987
988 bfd_boolean
989 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
990 {
991 int amount;
992
993 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
994
995 if (is32)
996 {
997 /* Allow all zeros or all ones in top 32-bits, so that
998 32-bit constant expressions like ~0x80000000 are
999 permitted. */
1000 uint64_t ext = value;
1001 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1002 /* Immediate out of range. */
1003 return FALSE;
1004 value &= (int64_t) 0xffffffff;
1005 }
1006
1007 /* first, try movz then movn */
1008 amount = -1;
1009 if ((value & ((int64_t) 0xffff << 0)) == value)
1010 amount = 0;
1011 else if ((value & ((int64_t) 0xffff << 16)) == value)
1012 amount = 16;
1013 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1014 amount = 32;
1015 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1016 amount = 48;
1017
1018 if (amount == -1)
1019 {
1020 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1021 return FALSE;
1022 }
1023
1024 if (shift_amount != NULL)
1025 *shift_amount = amount;
1026
1027 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1028
1029 return TRUE;
1030 }
1031
1032 /* Build the accepted values for immediate logical SIMD instructions.
1033
1034 The standard encodings of the immediate value are:
1035 N imms immr SIMD size R S
1036 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1037 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1038 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1039 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1040 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1041 0 11110s 00000r 2 UInt(r) UInt(s)
1042 where all-ones value of S is reserved.
1043
1044 Let's call E the SIMD size.
1045
1046 The immediate value is: S+1 bits '1' rotated to the right by R.
1047
1048 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1049 (remember S != E - 1). */
1050
1051 #define TOTAL_IMM_NB 5334
1052
1053 typedef struct
1054 {
1055 uint64_t imm;
1056 aarch64_insn encoding;
1057 } simd_imm_encoding;
1058
1059 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1060
1061 static int
1062 simd_imm_encoding_cmp(const void *i1, const void *i2)
1063 {
1064 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1065 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1066
1067 if (imm1->imm < imm2->imm)
1068 return -1;
1069 if (imm1->imm > imm2->imm)
1070 return +1;
1071 return 0;
1072 }
1073
1074 /* immediate bitfield standard encoding
1075 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1076 1 ssssss rrrrrr 64 rrrrrr ssssss
1077 0 0sssss 0rrrrr 32 rrrrr sssss
1078 0 10ssss 00rrrr 16 rrrr ssss
1079 0 110sss 000rrr 8 rrr sss
1080 0 1110ss 0000rr 4 rr ss
1081 0 11110s 00000r 2 r s */
1082 static inline int
1083 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1084 {
1085 return (is64 << 12) | (r << 6) | s;
1086 }
1087
1088 static void
1089 build_immediate_table (void)
1090 {
1091 uint32_t log_e, e, s, r, s_mask;
1092 uint64_t mask, imm;
1093 int nb_imms;
1094 int is64;
1095
1096 nb_imms = 0;
1097 for (log_e = 1; log_e <= 6; log_e++)
1098 {
1099 /* Get element size. */
1100 e = 1u << log_e;
1101 if (log_e == 6)
1102 {
1103 is64 = 1;
1104 mask = 0xffffffffffffffffull;
1105 s_mask = 0;
1106 }
1107 else
1108 {
1109 is64 = 0;
1110 mask = (1ull << e) - 1;
1111 /* log_e s_mask
1112 1 ((1 << 4) - 1) << 2 = 111100
1113 2 ((1 << 3) - 1) << 3 = 111000
1114 3 ((1 << 2) - 1) << 4 = 110000
1115 4 ((1 << 1) - 1) << 5 = 100000
1116 5 ((1 << 0) - 1) << 6 = 000000 */
1117 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1118 }
1119 for (s = 0; s < e - 1; s++)
1120 for (r = 0; r < e; r++)
1121 {
1122 /* s+1 consecutive bits to 1 (s < 63) */
1123 imm = (1ull << (s + 1)) - 1;
1124 /* rotate right by r */
1125 if (r != 0)
1126 imm = (imm >> r) | ((imm << (e - r)) & mask);
1127 /* replicate the constant depending on SIMD size */
1128 switch (log_e)
1129 {
1130 case 1: imm = (imm << 2) | imm;
1131 case 2: imm = (imm << 4) | imm;
1132 case 3: imm = (imm << 8) | imm;
1133 case 4: imm = (imm << 16) | imm;
1134 case 5: imm = (imm << 32) | imm;
1135 case 6: break;
1136 default: abort ();
1137 }
1138 simd_immediates[nb_imms].imm = imm;
1139 simd_immediates[nb_imms].encoding =
1140 encode_immediate_bitfield(is64, s | s_mask, r);
1141 nb_imms++;
1142 }
1143 }
1144 assert (nb_imms == TOTAL_IMM_NB);
1145 qsort(simd_immediates, nb_imms,
1146 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1147 }
1148
1149 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1150 be accepted by logical (immediate) instructions
1151 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1152
1153 ESIZE is the number of bytes in the decoded immediate value.
1154 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1155 VALUE will be returned in *ENCODING. */
1156
1157 bfd_boolean
1158 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1159 {
1160 simd_imm_encoding imm_enc;
1161 const simd_imm_encoding *imm_encoding;
1162 static bfd_boolean initialized = FALSE;
1163 uint64_t upper;
1164 int i;
1165
1166 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1167 value, is32);
1168
1169 if (initialized == FALSE)
1170 {
1171 build_immediate_table ();
1172 initialized = TRUE;
1173 }
1174
1175 /* Allow all zeros or all ones in top bits, so that
1176 constant expressions like ~1 are permitted. */
1177 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1178 if ((value & ~upper) != value && (value | upper) != value)
1179 return FALSE;
1180
1181 /* Replicate to a full 64-bit value. */
1182 value &= ~upper;
1183 for (i = esize * 8; i < 64; i *= 2)
1184 value |= (value << i);
1185
1186 imm_enc.imm = value;
1187 imm_encoding = (const simd_imm_encoding *)
1188 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1189 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1190 if (imm_encoding == NULL)
1191 {
1192 DEBUG_TRACE ("exit with FALSE");
1193 return FALSE;
1194 }
1195 if (encoding != NULL)
1196 *encoding = imm_encoding->encoding;
1197 DEBUG_TRACE ("exit with TRUE");
1198 return TRUE;
1199 }
1200
1201 /* If 64-bit immediate IMM is in the format of
1202 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1203 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1204 of value "abcdefgh". Otherwise return -1. */
1205 int
1206 aarch64_shrink_expanded_imm8 (uint64_t imm)
1207 {
1208 int i, ret;
1209 uint32_t byte;
1210
1211 ret = 0;
1212 for (i = 0; i < 8; i++)
1213 {
1214 byte = (imm >> (8 * i)) & 0xff;
1215 if (byte == 0xff)
1216 ret |= 1 << i;
1217 else if (byte != 0x00)
1218 return -1;
1219 }
1220 return ret;
1221 }
1222
1223 /* Utility inline functions for operand_general_constraint_met_p. */
1224
1225 static inline void
1226 set_error (aarch64_operand_error *mismatch_detail,
1227 enum aarch64_operand_error_kind kind, int idx,
1228 const char* error)
1229 {
1230 if (mismatch_detail == NULL)
1231 return;
1232 mismatch_detail->kind = kind;
1233 mismatch_detail->index = idx;
1234 mismatch_detail->error = error;
1235 }
1236
1237 static inline void
1238 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1239 const char* error)
1240 {
1241 if (mismatch_detail == NULL)
1242 return;
1243 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1244 }
1245
1246 static inline void
1247 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1248 int idx, int lower_bound, int upper_bound,
1249 const char* error)
1250 {
1251 if (mismatch_detail == NULL)
1252 return;
1253 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1254 mismatch_detail->data[0] = lower_bound;
1255 mismatch_detail->data[1] = upper_bound;
1256 }
1257
1258 static inline void
1259 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1260 int idx, int lower_bound, int upper_bound)
1261 {
1262 if (mismatch_detail == NULL)
1263 return;
1264 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1265 _("immediate value"));
1266 }
1267
1268 static inline void
1269 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1270 int idx, int lower_bound, int upper_bound)
1271 {
1272 if (mismatch_detail == NULL)
1273 return;
1274 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1275 _("immediate offset"));
1276 }
1277
1278 static inline void
1279 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1280 int idx, int lower_bound, int upper_bound)
1281 {
1282 if (mismatch_detail == NULL)
1283 return;
1284 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1285 _("register number"));
1286 }
1287
1288 static inline void
1289 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1290 int idx, int lower_bound, int upper_bound)
1291 {
1292 if (mismatch_detail == NULL)
1293 return;
1294 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1295 _("register element index"));
1296 }
1297
1298 static inline void
1299 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1300 int idx, int lower_bound, int upper_bound)
1301 {
1302 if (mismatch_detail == NULL)
1303 return;
1304 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1305 _("shift amount"));
1306 }
1307
1308 /* Report that the MUL modifier in operand IDX should be in the range
1309 [LOWER_BOUND, UPPER_BOUND]. */
1310 static inline void
1311 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1312 int idx, int lower_bound, int upper_bound)
1313 {
1314 if (mismatch_detail == NULL)
1315 return;
1316 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1317 _("multiplier"));
1318 }
1319
1320 static inline void
1321 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1322 int alignment)
1323 {
1324 if (mismatch_detail == NULL)
1325 return;
1326 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1327 mismatch_detail->data[0] = alignment;
1328 }
1329
1330 static inline void
1331 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1332 int expected_num)
1333 {
1334 if (mismatch_detail == NULL)
1335 return;
1336 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1337 mismatch_detail->data[0] = expected_num;
1338 }
1339
1340 static inline void
1341 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1342 const char* error)
1343 {
1344 if (mismatch_detail == NULL)
1345 return;
1346 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1347 }
1348
1349 /* General constraint checking based on operand code.
1350
1351 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1352 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1353
1354 This function has to be called after the qualifiers for all operands
1355 have been resolved.
1356
1357 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1358 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1359 of error message during the disassembling where error message is not
1360 wanted. We avoid the dynamic construction of strings of error messages
1361 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1362 use a combination of error code, static string and some integer data to
1363 represent an error. */
1364
1365 static int
1366 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1367 enum aarch64_opnd type,
1368 const aarch64_opcode *opcode,
1369 aarch64_operand_error *mismatch_detail)
1370 {
1371 unsigned num;
1372 unsigned char size;
1373 int64_t imm;
1374 const aarch64_opnd_info *opnd = opnds + idx;
1375 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1376
1377 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1378
1379 switch (aarch64_operands[type].op_class)
1380 {
1381 case AARCH64_OPND_CLASS_INT_REG:
1382 /* Check pair reg constraints for cas* instructions. */
1383 if (type == AARCH64_OPND_PAIRREG)
1384 {
1385 assert (idx == 1 || idx == 3);
1386 if (opnds[idx - 1].reg.regno % 2 != 0)
1387 {
1388 set_syntax_error (mismatch_detail, idx - 1,
1389 _("reg pair must start from even reg"));
1390 return 0;
1391 }
1392 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1393 {
1394 set_syntax_error (mismatch_detail, idx,
1395 _("reg pair must be contiguous"));
1396 return 0;
1397 }
1398 break;
1399 }
1400
1401 /* <Xt> may be optional in some IC and TLBI instructions. */
1402 if (type == AARCH64_OPND_Rt_SYS)
1403 {
1404 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1405 == AARCH64_OPND_CLASS_SYSTEM));
1406 if (opnds[1].present
1407 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1408 {
1409 set_other_error (mismatch_detail, idx, _("extraneous register"));
1410 return 0;
1411 }
1412 if (!opnds[1].present
1413 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1414 {
1415 set_other_error (mismatch_detail, idx, _("missing register"));
1416 return 0;
1417 }
1418 }
1419 switch (qualifier)
1420 {
1421 case AARCH64_OPND_QLF_WSP:
1422 case AARCH64_OPND_QLF_SP:
1423 if (!aarch64_stack_pointer_p (opnd))
1424 {
1425 set_other_error (mismatch_detail, idx,
1426 _("stack pointer register expected"));
1427 return 0;
1428 }
1429 break;
1430 default:
1431 break;
1432 }
1433 break;
1434
1435 case AARCH64_OPND_CLASS_SVE_REG:
1436 switch (type)
1437 {
1438 case AARCH64_OPND_SVE_Zn_INDEX:
1439 size = aarch64_get_qualifier_esize (opnd->qualifier);
1440 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1441 {
1442 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1443 0, 64 / size - 1);
1444 return 0;
1445 }
1446 break;
1447
1448 case AARCH64_OPND_SVE_ZnxN:
1449 case AARCH64_OPND_SVE_ZtxN:
1450 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1451 {
1452 set_other_error (mismatch_detail, idx,
1453 _("invalid register list"));
1454 return 0;
1455 }
1456 break;
1457
1458 default:
1459 break;
1460 }
1461 break;
1462
1463 case AARCH64_OPND_CLASS_PRED_REG:
1464 if (opnd->reg.regno >= 8
1465 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1466 {
1467 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1468 return 0;
1469 }
1470 break;
1471
1472 case AARCH64_OPND_CLASS_COND:
1473 if (type == AARCH64_OPND_COND1
1474 && (opnds[idx].cond->value & 0xe) == 0xe)
1475 {
1476 /* Not allow AL or NV. */
1477 set_syntax_error (mismatch_detail, idx, NULL);
1478 }
1479 break;
1480
1481 case AARCH64_OPND_CLASS_ADDRESS:
1482 /* Check writeback. */
1483 switch (opcode->iclass)
1484 {
1485 case ldst_pos:
1486 case ldst_unscaled:
1487 case ldstnapair_offs:
1488 case ldstpair_off:
1489 case ldst_unpriv:
1490 if (opnd->addr.writeback == 1)
1491 {
1492 set_syntax_error (mismatch_detail, idx,
1493 _("unexpected address writeback"));
1494 return 0;
1495 }
1496 break;
1497 case ldst_imm9:
1498 case ldstpair_indexed:
1499 case asisdlsep:
1500 case asisdlsop:
1501 if (opnd->addr.writeback == 0)
1502 {
1503 set_syntax_error (mismatch_detail, idx,
1504 _("address writeback expected"));
1505 return 0;
1506 }
1507 break;
1508 default:
1509 assert (opnd->addr.writeback == 0);
1510 break;
1511 }
1512 switch (type)
1513 {
1514 case AARCH64_OPND_ADDR_SIMM7:
1515 /* Scaled signed 7 bits immediate offset. */
1516 /* Get the size of the data element that is accessed, which may be
1517 different from that of the source register size,
1518 e.g. in strb/ldrb. */
1519 size = aarch64_get_qualifier_esize (opnd->qualifier);
1520 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1521 {
1522 set_offset_out_of_range_error (mismatch_detail, idx,
1523 -64 * size, 63 * size);
1524 return 0;
1525 }
1526 if (!value_aligned_p (opnd->addr.offset.imm, size))
1527 {
1528 set_unaligned_error (mismatch_detail, idx, size);
1529 return 0;
1530 }
1531 break;
1532 case AARCH64_OPND_ADDR_SIMM9:
1533 /* Unscaled signed 9 bits immediate offset. */
1534 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1535 {
1536 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1537 return 0;
1538 }
1539 break;
1540
1541 case AARCH64_OPND_ADDR_SIMM9_2:
1542 /* Unscaled signed 9 bits immediate offset, which has to be negative
1543 or unaligned. */
1544 size = aarch64_get_qualifier_esize (qualifier);
1545 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1546 && !value_aligned_p (opnd->addr.offset.imm, size))
1547 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1548 return 1;
1549 set_other_error (mismatch_detail, idx,
1550 _("negative or unaligned offset expected"));
1551 return 0;
1552
1553 case AARCH64_OPND_SIMD_ADDR_POST:
1554 /* AdvSIMD load/store multiple structures, post-index. */
1555 assert (idx == 1);
1556 if (opnd->addr.offset.is_reg)
1557 {
1558 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1559 return 1;
1560 else
1561 {
1562 set_other_error (mismatch_detail, idx,
1563 _("invalid register offset"));
1564 return 0;
1565 }
1566 }
1567 else
1568 {
1569 const aarch64_opnd_info *prev = &opnds[idx-1];
1570 unsigned num_bytes; /* total number of bytes transferred. */
1571 /* The opcode dependent area stores the number of elements in
1572 each structure to be loaded/stored. */
1573 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1574 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1575 /* Special handling of loading single structure to all lane. */
1576 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1577 * aarch64_get_qualifier_esize (prev->qualifier);
1578 else
1579 num_bytes = prev->reglist.num_regs
1580 * aarch64_get_qualifier_esize (prev->qualifier)
1581 * aarch64_get_qualifier_nelem (prev->qualifier);
1582 if ((int) num_bytes != opnd->addr.offset.imm)
1583 {
1584 set_other_error (mismatch_detail, idx,
1585 _("invalid post-increment amount"));
1586 return 0;
1587 }
1588 }
1589 break;
1590
1591 case AARCH64_OPND_ADDR_REGOFF:
1592 /* Get the size of the data element that is accessed, which may be
1593 different from that of the source register size,
1594 e.g. in strb/ldrb. */
1595 size = aarch64_get_qualifier_esize (opnd->qualifier);
1596 /* It is either no shift or shift by the binary logarithm of SIZE. */
1597 if (opnd->shifter.amount != 0
1598 && opnd->shifter.amount != (int)get_logsz (size))
1599 {
1600 set_other_error (mismatch_detail, idx,
1601 _("invalid shift amount"));
1602 return 0;
1603 }
1604 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1605 operators. */
1606 switch (opnd->shifter.kind)
1607 {
1608 case AARCH64_MOD_UXTW:
1609 case AARCH64_MOD_LSL:
1610 case AARCH64_MOD_SXTW:
1611 case AARCH64_MOD_SXTX: break;
1612 default:
1613 set_other_error (mismatch_detail, idx,
1614 _("invalid extend/shift operator"));
1615 return 0;
1616 }
1617 break;
1618
1619 case AARCH64_OPND_ADDR_UIMM12:
1620 imm = opnd->addr.offset.imm;
1621 /* Get the size of the data element that is accessed, which may be
1622 different from that of the source register size,
1623 e.g. in strb/ldrb. */
1624 size = aarch64_get_qualifier_esize (qualifier);
1625 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1626 {
1627 set_offset_out_of_range_error (mismatch_detail, idx,
1628 0, 4095 * size);
1629 return 0;
1630 }
1631 if (!value_aligned_p (opnd->addr.offset.imm, size))
1632 {
1633 set_unaligned_error (mismatch_detail, idx, size);
1634 return 0;
1635 }
1636 break;
1637
1638 case AARCH64_OPND_ADDR_PCREL14:
1639 case AARCH64_OPND_ADDR_PCREL19:
1640 case AARCH64_OPND_ADDR_PCREL21:
1641 case AARCH64_OPND_ADDR_PCREL26:
1642 imm = opnd->imm.value;
1643 if (operand_need_shift_by_two (get_operand_from_code (type)))
1644 {
1645 /* The offset value in a PC-relative branch instruction is alway
1646 4-byte aligned and is encoded without the lowest 2 bits. */
1647 if (!value_aligned_p (imm, 4))
1648 {
1649 set_unaligned_error (mismatch_detail, idx, 4);
1650 return 0;
1651 }
1652 /* Right shift by 2 so that we can carry out the following check
1653 canonically. */
1654 imm >>= 2;
1655 }
1656 size = get_operand_fields_width (get_operand_from_code (type));
1657 if (!value_fit_signed_field_p (imm, size))
1658 {
1659 set_other_error (mismatch_detail, idx,
1660 _("immediate out of range"));
1661 return 0;
1662 }
1663 break;
1664
1665 default:
1666 break;
1667 }
1668 break;
1669
1670 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1671 if (type == AARCH64_OPND_LEt)
1672 {
1673 /* Get the upper bound for the element index. */
1674 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1675 if (!value_in_range_p (opnd->reglist.index, 0, num))
1676 {
1677 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1678 return 0;
1679 }
1680 }
1681 /* The opcode dependent area stores the number of elements in
1682 each structure to be loaded/stored. */
1683 num = get_opcode_dependent_value (opcode);
1684 switch (type)
1685 {
1686 case AARCH64_OPND_LVt:
1687 assert (num >= 1 && num <= 4);
1688 /* Unless LD1/ST1, the number of registers should be equal to that
1689 of the structure elements. */
1690 if (num != 1 && opnd->reglist.num_regs != num)
1691 {
1692 set_reg_list_error (mismatch_detail, idx, num);
1693 return 0;
1694 }
1695 break;
1696 case AARCH64_OPND_LVt_AL:
1697 case AARCH64_OPND_LEt:
1698 assert (num >= 1 && num <= 4);
1699 /* The number of registers should be equal to that of the structure
1700 elements. */
1701 if (opnd->reglist.num_regs != num)
1702 {
1703 set_reg_list_error (mismatch_detail, idx, num);
1704 return 0;
1705 }
1706 break;
1707 default:
1708 break;
1709 }
1710 break;
1711
1712 case AARCH64_OPND_CLASS_IMMEDIATE:
1713 /* Constraint check on immediate operand. */
1714 imm = opnd->imm.value;
1715 /* E.g. imm_0_31 constrains value to be 0..31. */
1716 if (qualifier_value_in_range_constraint_p (qualifier)
1717 && !value_in_range_p (imm, get_lower_bound (qualifier),
1718 get_upper_bound (qualifier)))
1719 {
1720 set_imm_out_of_range_error (mismatch_detail, idx,
1721 get_lower_bound (qualifier),
1722 get_upper_bound (qualifier));
1723 return 0;
1724 }
1725
1726 switch (type)
1727 {
1728 case AARCH64_OPND_AIMM:
1729 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1730 {
1731 set_other_error (mismatch_detail, idx,
1732 _("invalid shift operator"));
1733 return 0;
1734 }
1735 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1736 {
1737 set_other_error (mismatch_detail, idx,
1738 _("shift amount expected to be 0 or 12"));
1739 return 0;
1740 }
1741 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1742 {
1743 set_other_error (mismatch_detail, idx,
1744 _("immediate out of range"));
1745 return 0;
1746 }
1747 break;
1748
1749 case AARCH64_OPND_HALF:
1750 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1751 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1752 {
1753 set_other_error (mismatch_detail, idx,
1754 _("invalid shift operator"));
1755 return 0;
1756 }
1757 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1758 if (!value_aligned_p (opnd->shifter.amount, 16))
1759 {
1760 set_other_error (mismatch_detail, idx,
1761 _("shift amount should be a multiple of 16"));
1762 return 0;
1763 }
1764 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1765 {
1766 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1767 0, size * 8 - 16);
1768 return 0;
1769 }
1770 if (opnd->imm.value < 0)
1771 {
1772 set_other_error (mismatch_detail, idx,
1773 _("negative immediate value not allowed"));
1774 return 0;
1775 }
1776 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1777 {
1778 set_other_error (mismatch_detail, idx,
1779 _("immediate out of range"));
1780 return 0;
1781 }
1782 break;
1783
1784 case AARCH64_OPND_IMM_MOV:
1785 {
1786 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1787 imm = opnd->imm.value;
1788 assert (idx == 1);
1789 switch (opcode->op)
1790 {
1791 case OP_MOV_IMM_WIDEN:
1792 imm = ~imm;
1793 /* Fall through... */
1794 case OP_MOV_IMM_WIDE:
1795 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1796 {
1797 set_other_error (mismatch_detail, idx,
1798 _("immediate out of range"));
1799 return 0;
1800 }
1801 break;
1802 case OP_MOV_IMM_LOG:
1803 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1804 {
1805 set_other_error (mismatch_detail, idx,
1806 _("immediate out of range"));
1807 return 0;
1808 }
1809 break;
1810 default:
1811 assert (0);
1812 return 0;
1813 }
1814 }
1815 break;
1816
1817 case AARCH64_OPND_NZCV:
1818 case AARCH64_OPND_CCMP_IMM:
1819 case AARCH64_OPND_EXCEPTION:
1820 case AARCH64_OPND_UIMM4:
1821 case AARCH64_OPND_UIMM7:
1822 case AARCH64_OPND_UIMM3_OP1:
1823 case AARCH64_OPND_UIMM3_OP2:
1824 size = get_operand_fields_width (get_operand_from_code (type));
1825 assert (size < 32);
1826 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1827 {
1828 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1829 (1 << size) - 1);
1830 return 0;
1831 }
1832 break;
1833
1834 case AARCH64_OPND_WIDTH:
1835 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1836 && opnds[0].type == AARCH64_OPND_Rd);
1837 size = get_upper_bound (qualifier);
1838 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1839 /* lsb+width <= reg.size */
1840 {
1841 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1842 size - opnds[idx-1].imm.value);
1843 return 0;
1844 }
1845 break;
1846
1847 case AARCH64_OPND_LIMM:
1848 {
1849 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1850 uint64_t uimm = opnd->imm.value;
1851 if (opcode->op == OP_BIC)
1852 uimm = ~uimm;
1853 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1854 {
1855 set_other_error (mismatch_detail, idx,
1856 _("immediate out of range"));
1857 return 0;
1858 }
1859 }
1860 break;
1861
1862 case AARCH64_OPND_IMM0:
1863 case AARCH64_OPND_FPIMM0:
1864 if (opnd->imm.value != 0)
1865 {
1866 set_other_error (mismatch_detail, idx,
1867 _("immediate zero expected"));
1868 return 0;
1869 }
1870 break;
1871
1872 case AARCH64_OPND_SHLL_IMM:
1873 assert (idx == 2);
1874 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1875 if (opnd->imm.value != size)
1876 {
1877 set_other_error (mismatch_detail, idx,
1878 _("invalid shift amount"));
1879 return 0;
1880 }
1881 break;
1882
1883 case AARCH64_OPND_IMM_VLSL:
1884 size = aarch64_get_qualifier_esize (qualifier);
1885 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1886 {
1887 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1888 size * 8 - 1);
1889 return 0;
1890 }
1891 break;
1892
1893 case AARCH64_OPND_IMM_VLSR:
1894 size = aarch64_get_qualifier_esize (qualifier);
1895 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1896 {
1897 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1898 return 0;
1899 }
1900 break;
1901
1902 case AARCH64_OPND_SIMD_IMM:
1903 case AARCH64_OPND_SIMD_IMM_SFT:
1904 /* Qualifier check. */
1905 switch (qualifier)
1906 {
1907 case AARCH64_OPND_QLF_LSL:
1908 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1909 {
1910 set_other_error (mismatch_detail, idx,
1911 _("invalid shift operator"));
1912 return 0;
1913 }
1914 break;
1915 case AARCH64_OPND_QLF_MSL:
1916 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1917 {
1918 set_other_error (mismatch_detail, idx,
1919 _("invalid shift operator"));
1920 return 0;
1921 }
1922 break;
1923 case AARCH64_OPND_QLF_NIL:
1924 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1925 {
1926 set_other_error (mismatch_detail, idx,
1927 _("shift is not permitted"));
1928 return 0;
1929 }
1930 break;
1931 default:
1932 assert (0);
1933 return 0;
1934 }
1935 /* Is the immediate valid? */
1936 assert (idx == 1);
1937 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1938 {
1939 /* uimm8 or simm8 */
1940 if (!value_in_range_p (opnd->imm.value, -128, 255))
1941 {
1942 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1943 return 0;
1944 }
1945 }
1946 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1947 {
1948 /* uimm64 is not
1949 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1950 ffffffffgggggggghhhhhhhh'. */
1951 set_other_error (mismatch_detail, idx,
1952 _("invalid value for immediate"));
1953 return 0;
1954 }
1955 /* Is the shift amount valid? */
1956 switch (opnd->shifter.kind)
1957 {
1958 case AARCH64_MOD_LSL:
1959 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1960 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1961 {
1962 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1963 (size - 1) * 8);
1964 return 0;
1965 }
1966 if (!value_aligned_p (opnd->shifter.amount, 8))
1967 {
1968 set_unaligned_error (mismatch_detail, idx, 8);
1969 return 0;
1970 }
1971 break;
1972 case AARCH64_MOD_MSL:
1973 /* Only 8 and 16 are valid shift amount. */
1974 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1975 {
1976 set_other_error (mismatch_detail, idx,
1977 _("shift amount expected to be 0 or 16"));
1978 return 0;
1979 }
1980 break;
1981 default:
1982 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1983 {
1984 set_other_error (mismatch_detail, idx,
1985 _("invalid shift operator"));
1986 return 0;
1987 }
1988 break;
1989 }
1990 break;
1991
1992 case AARCH64_OPND_FPIMM:
1993 case AARCH64_OPND_SIMD_FPIMM:
1994 if (opnd->imm.is_fp == 0)
1995 {
1996 set_other_error (mismatch_detail, idx,
1997 _("floating-point immediate expected"));
1998 return 0;
1999 }
2000 /* The value is expected to be an 8-bit floating-point constant with
2001 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2002 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2003 instruction). */
2004 if (!value_in_range_p (opnd->imm.value, 0, 255))
2005 {
2006 set_other_error (mismatch_detail, idx,
2007 _("immediate out of range"));
2008 return 0;
2009 }
2010 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2011 {
2012 set_other_error (mismatch_detail, idx,
2013 _("invalid shift operator"));
2014 return 0;
2015 }
2016 break;
2017
2018 case AARCH64_OPND_SVE_PATTERN_SCALED:
2019 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2020 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2021 {
2022 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2023 return 0;
2024 }
2025 break;
2026
2027 default:
2028 break;
2029 }
2030 break;
2031
2032 case AARCH64_OPND_CLASS_CP_REG:
2033 /* Cn or Cm: 4-bit opcode field named for historical reasons.
2034 valid range: C0 - C15. */
2035 if (opnd->reg.regno > 15)
2036 {
2037 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2038 return 0;
2039 }
2040 break;
2041
2042 case AARCH64_OPND_CLASS_SYSTEM:
2043 switch (type)
2044 {
2045 case AARCH64_OPND_PSTATEFIELD:
2046 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2047 /* MSR UAO, #uimm4
2048 MSR PAN, #uimm4
2049 The immediate must be #0 or #1. */
2050 if ((opnd->pstatefield == 0x03 /* UAO. */
2051 || opnd->pstatefield == 0x04) /* PAN. */
2052 && opnds[1].imm.value > 1)
2053 {
2054 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2055 return 0;
2056 }
2057 /* MSR SPSel, #uimm4
2058 Uses uimm4 as a control value to select the stack pointer: if
2059 bit 0 is set it selects the current exception level's stack
2060 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2061 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2062 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2063 {
2064 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2065 return 0;
2066 }
2067 break;
2068 default:
2069 break;
2070 }
2071 break;
2072
2073 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2074 /* Get the upper bound for the element index. */
2075 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2076 /* Index out-of-range. */
2077 if (!value_in_range_p (opnd->reglane.index, 0, num))
2078 {
2079 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2080 return 0;
2081 }
2082 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2083 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2084 number is encoded in "size:M:Rm":
2085 size <Vm>
2086 00 RESERVED
2087 01 0:Rm
2088 10 M:Rm
2089 11 RESERVED */
2090 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2091 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2092 {
2093 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2094 return 0;
2095 }
2096 break;
2097
2098 case AARCH64_OPND_CLASS_MODIFIED_REG:
2099 assert (idx == 1 || idx == 2);
2100 switch (type)
2101 {
2102 case AARCH64_OPND_Rm_EXT:
2103 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2104 && opnd->shifter.kind != AARCH64_MOD_LSL)
2105 {
2106 set_other_error (mismatch_detail, idx,
2107 _("extend operator expected"));
2108 return 0;
2109 }
2110 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2111 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2112 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2113 case. */
2114 if (!aarch64_stack_pointer_p (opnds + 0)
2115 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2116 {
2117 if (!opnd->shifter.operator_present)
2118 {
2119 set_other_error (mismatch_detail, idx,
2120 _("missing extend operator"));
2121 return 0;
2122 }
2123 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2124 {
2125 set_other_error (mismatch_detail, idx,
2126 _("'LSL' operator not allowed"));
2127 return 0;
2128 }
2129 }
2130 assert (opnd->shifter.operator_present /* Default to LSL. */
2131 || opnd->shifter.kind == AARCH64_MOD_LSL);
2132 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2133 {
2134 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2135 return 0;
2136 }
2137 /* In the 64-bit form, the final register operand is written as Wm
2138 for all but the (possibly omitted) UXTX/LSL and SXTX
2139 operators.
2140 N.B. GAS allows X register to be used with any operator as a
2141 programming convenience. */
2142 if (qualifier == AARCH64_OPND_QLF_X
2143 && opnd->shifter.kind != AARCH64_MOD_LSL
2144 && opnd->shifter.kind != AARCH64_MOD_UXTX
2145 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2146 {
2147 set_other_error (mismatch_detail, idx, _("W register expected"));
2148 return 0;
2149 }
2150 break;
2151
2152 case AARCH64_OPND_Rm_SFT:
2153 /* ROR is not available to the shifted register operand in
2154 arithmetic instructions. */
2155 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2156 {
2157 set_other_error (mismatch_detail, idx,
2158 _("shift operator expected"));
2159 return 0;
2160 }
2161 if (opnd->shifter.kind == AARCH64_MOD_ROR
2162 && opcode->iclass != log_shift)
2163 {
2164 set_other_error (mismatch_detail, idx,
2165 _("'ROR' operator not allowed"));
2166 return 0;
2167 }
2168 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2169 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2170 {
2171 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2172 return 0;
2173 }
2174 break;
2175
2176 default:
2177 break;
2178 }
2179 break;
2180
2181 default:
2182 break;
2183 }
2184
2185 return 1;
2186 }
2187
2188 /* Main entrypoint for the operand constraint checking.
2189
2190 Return 1 if operands of *INST meet the constraint applied by the operand
2191 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2192 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2193 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2194 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2195 error kind when it is notified that an instruction does not pass the check).
2196
2197 Un-determined operand qualifiers may get established during the process. */
2198
2199 int
2200 aarch64_match_operands_constraint (aarch64_inst *inst,
2201 aarch64_operand_error *mismatch_detail)
2202 {
2203 int i;
2204
2205 DEBUG_TRACE ("enter");
2206
2207 /* Check for cases where a source register needs to be the same as the
2208 destination register. Do this before matching qualifiers since if
2209 an instruction has both invalid tying and invalid qualifiers,
2210 the error about qualifiers would suggest several alternative
2211 instructions that also have invalid tying. */
2212 i = inst->opcode->tied_operand;
2213 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2214 {
2215 if (mismatch_detail)
2216 {
2217 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2218 mismatch_detail->index = i;
2219 mismatch_detail->error = NULL;
2220 }
2221 return 0;
2222 }
2223
2224 /* Match operands' qualifier.
2225 *INST has already had qualifier establish for some, if not all, of
2226 its operands; we need to find out whether these established
2227 qualifiers match one of the qualifier sequence in
2228 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2229 with the corresponding qualifier in such a sequence.
2230 Only basic operand constraint checking is done here; the more thorough
2231 constraint checking will carried out by operand_general_constraint_met_p,
2232 which has be to called after this in order to get all of the operands'
2233 qualifiers established. */
2234 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2235 {
2236 DEBUG_TRACE ("FAIL on operand qualifier matching");
2237 if (mismatch_detail)
2238 {
2239 /* Return an error type to indicate that it is the qualifier
2240 matching failure; we don't care about which operand as there
2241 are enough information in the opcode table to reproduce it. */
2242 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2243 mismatch_detail->index = -1;
2244 mismatch_detail->error = NULL;
2245 }
2246 return 0;
2247 }
2248
2249 /* Match operands' constraint. */
2250 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2251 {
2252 enum aarch64_opnd type = inst->opcode->operands[i];
2253 if (type == AARCH64_OPND_NIL)
2254 break;
2255 if (inst->operands[i].skip)
2256 {
2257 DEBUG_TRACE ("skip the incomplete operand %d", i);
2258 continue;
2259 }
2260 if (operand_general_constraint_met_p (inst->operands, i, type,
2261 inst->opcode, mismatch_detail) == 0)
2262 {
2263 DEBUG_TRACE ("FAIL on operand %d", i);
2264 return 0;
2265 }
2266 }
2267
2268 DEBUG_TRACE ("PASS");
2269
2270 return 1;
2271 }
2272
2273 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2274 Also updates the TYPE of each INST->OPERANDS with the corresponding
2275 value of OPCODE->OPERANDS.
2276
2277 Note that some operand qualifiers may need to be manually cleared by
2278 the caller before it further calls the aarch64_opcode_encode; by
2279 doing this, it helps the qualifier matching facilities work
2280 properly. */
2281
2282 const aarch64_opcode*
2283 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2284 {
2285 int i;
2286 const aarch64_opcode *old = inst->opcode;
2287
2288 inst->opcode = opcode;
2289
2290 /* Update the operand types. */
2291 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2292 {
2293 inst->operands[i].type = opcode->operands[i];
2294 if (opcode->operands[i] == AARCH64_OPND_NIL)
2295 break;
2296 }
2297
2298 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2299
2300 return old;
2301 }
2302
2303 int
2304 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2305 {
2306 int i;
2307 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2308 if (operands[i] == operand)
2309 return i;
2310 else if (operands[i] == AARCH64_OPND_NIL)
2311 break;
2312 return -1;
2313 }
2314 \f
2315 /* R0...R30, followed by FOR31. */
2316 #define BANK(R, FOR31) \
2317 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2318 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2319 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2320 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2321 /* [0][0] 32-bit integer regs with sp Wn
2322 [0][1] 64-bit integer regs with sp Xn sf=1
2323 [1][0] 32-bit integer regs with #0 Wn
2324 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2325 static const char *int_reg[2][2][32] = {
2326 #define R32(X) "w" #X
2327 #define R64(X) "x" #X
2328 { BANK (R32, "wsp"), BANK (R64, "sp") },
2329 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2330 #undef R64
2331 #undef R32
2332 };
2333 #undef BANK
2334
2335 /* Return the integer register name.
2336 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2337
2338 static inline const char *
2339 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2340 {
2341 const int has_zr = sp_reg_p ? 0 : 1;
2342 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2343 return int_reg[has_zr][is_64][regno];
2344 }
2345
2346 /* Like get_int_reg_name, but IS_64 is always 1. */
2347
2348 static inline const char *
2349 get_64bit_int_reg_name (int regno, int sp_reg_p)
2350 {
2351 const int has_zr = sp_reg_p ? 0 : 1;
2352 return int_reg[has_zr][1][regno];
2353 }
2354
2355 /* Get the name of the integer offset register in OPND, using the shift type
2356 to decide whether it's a word or doubleword. */
2357
2358 static inline const char *
2359 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2360 {
2361 switch (opnd->shifter.kind)
2362 {
2363 case AARCH64_MOD_UXTW:
2364 case AARCH64_MOD_SXTW:
2365 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2366
2367 case AARCH64_MOD_LSL:
2368 case AARCH64_MOD_SXTX:
2369 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2370
2371 default:
2372 abort ();
2373 }
2374 }
2375
2376 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2377
2378 typedef union
2379 {
2380 uint64_t i;
2381 double d;
2382 } double_conv_t;
2383
2384 typedef union
2385 {
2386 uint32_t i;
2387 float f;
2388 } single_conv_t;
2389
2390 typedef union
2391 {
2392 uint32_t i;
2393 float f;
2394 } half_conv_t;
2395
2396 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2397 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2398 (depending on the type of the instruction). IMM8 will be expanded to a
2399 single-precision floating-point value (SIZE == 4) or a double-precision
2400 floating-point value (SIZE == 8). A half-precision floating-point value
2401 (SIZE == 2) is expanded to a single-precision floating-point value. The
2402 expanded value is returned. */
2403
2404 static uint64_t
2405 expand_fp_imm (int size, uint32_t imm8)
2406 {
2407 uint64_t imm;
2408 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2409
2410 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2411 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2412 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2413 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2414 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2415 if (size == 8)
2416 {
2417 imm = (imm8_7 << (63-32)) /* imm8<7> */
2418 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2419 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2420 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2421 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2422 imm <<= 32;
2423 }
2424 else if (size == 4 || size == 2)
2425 {
2426 imm = (imm8_7 << 31) /* imm8<7> */
2427 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2428 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2429 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2430 }
2431 else
2432 {
2433 /* An unsupported size. */
2434 assert (0);
2435 }
2436
2437 return imm;
2438 }
2439
2440 /* Produce the string representation of the register list operand *OPND
2441 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2442 the register name that comes before the register number, such as "v". */
2443 static void
2444 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2445 const char *prefix)
2446 {
2447 const int num_regs = opnd->reglist.num_regs;
2448 const int first_reg = opnd->reglist.first_regno;
2449 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2450 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2451 char tb[8]; /* Temporary buffer. */
2452
2453 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2454 assert (num_regs >= 1 && num_regs <= 4);
2455
2456 /* Prepare the index if any. */
2457 if (opnd->reglist.has_index)
2458 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2459 else
2460 tb[0] = '\0';
2461
2462 /* The hyphenated form is preferred for disassembly if there are
2463 more than two registers in the list, and the register numbers
2464 are monotonically increasing in increments of one. */
2465 if (num_regs > 2 && last_reg > first_reg)
2466 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2467 prefix, last_reg, qlf_name, tb);
2468 else
2469 {
2470 const int reg0 = first_reg;
2471 const int reg1 = (first_reg + 1) & 0x1f;
2472 const int reg2 = (first_reg + 2) & 0x1f;
2473 const int reg3 = (first_reg + 3) & 0x1f;
2474
2475 switch (num_regs)
2476 {
2477 case 1:
2478 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2479 break;
2480 case 2:
2481 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2482 prefix, reg1, qlf_name, tb);
2483 break;
2484 case 3:
2485 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2486 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2487 prefix, reg2, qlf_name, tb);
2488 break;
2489 case 4:
2490 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2491 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2492 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2493 break;
2494 }
2495 }
2496 }
2497
2498 /* Print the register+immediate address in OPND to BUF, which has SIZE
2499 characters. BASE is the name of the base register. */
2500
2501 static void
2502 print_immediate_offset_address (char *buf, size_t size,
2503 const aarch64_opnd_info *opnd,
2504 const char *base)
2505 {
2506 if (opnd->addr.writeback)
2507 {
2508 if (opnd->addr.preind)
2509 snprintf (buf, size, "[%s,#%d]!", base, opnd->addr.offset.imm);
2510 else
2511 snprintf (buf, size, "[%s],#%d", base, opnd->addr.offset.imm);
2512 }
2513 else
2514 {
2515 if (opnd->addr.offset.imm)
2516 snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
2517 else
2518 snprintf (buf, size, "[%s]", base);
2519 }
2520 }
2521
2522 /* Produce the string representation of the register offset address operand
2523 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
2524 the names of the base and offset registers. */
2525 static void
2526 print_register_offset_address (char *buf, size_t size,
2527 const aarch64_opnd_info *opnd,
2528 const char *base, const char *offset)
2529 {
2530 char tb[16]; /* Temporary buffer. */
2531 bfd_boolean print_extend_p = TRUE;
2532 bfd_boolean print_amount_p = TRUE;
2533 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2534
2535 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2536 || !opnd->shifter.amount_present))
2537 {
2538 /* Not print the shift/extend amount when the amount is zero and
2539 when it is not the special case of 8-bit load/store instruction. */
2540 print_amount_p = FALSE;
2541 /* Likewise, no need to print the shift operator LSL in such a
2542 situation. */
2543 if (opnd->shifter.kind == AARCH64_MOD_LSL)
2544 print_extend_p = FALSE;
2545 }
2546
2547 /* Prepare for the extend/shift. */
2548 if (print_extend_p)
2549 {
2550 if (print_amount_p)
2551 snprintf (tb, sizeof (tb), ",%s #%" PRIi64, shift_name,
2552 opnd->shifter.amount);
2553 else
2554 snprintf (tb, sizeof (tb), ",%s", shift_name);
2555 }
2556 else
2557 tb[0] = '\0';
2558
2559 snprintf (buf, size, "[%s,%s%s]", base, offset, tb);
2560 }
2561
2562 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2563 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2564 PC, PCREL_P and ADDRESS are used to pass in and return information about
2565 the PC-relative address calculation, where the PC value is passed in
2566 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2567 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2568 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2569
2570 The function serves both the disassembler and the assembler diagnostics
2571 issuer, which is the reason why it lives in this file. */
2572
2573 void
2574 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2575 const aarch64_opcode *opcode,
2576 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2577 bfd_vma *address)
2578 {
2579 int i;
2580 const char *name = NULL;
2581 const aarch64_opnd_info *opnd = opnds + idx;
2582 enum aarch64_modifier_kind kind;
2583 uint64_t addr, enum_value;
2584
2585 buf[0] = '\0';
2586 if (pcrel_p)
2587 *pcrel_p = 0;
2588
2589 switch (opnd->type)
2590 {
2591 case AARCH64_OPND_Rd:
2592 case AARCH64_OPND_Rn:
2593 case AARCH64_OPND_Rm:
2594 case AARCH64_OPND_Rt:
2595 case AARCH64_OPND_Rt2:
2596 case AARCH64_OPND_Rs:
2597 case AARCH64_OPND_Ra:
2598 case AARCH64_OPND_Rt_SYS:
2599 case AARCH64_OPND_PAIRREG:
2600 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2601 the <ic_op>, therefore we we use opnd->present to override the
2602 generic optional-ness information. */
2603 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2604 break;
2605 /* Omit the operand, e.g. RET. */
2606 if (optional_operand_p (opcode, idx)
2607 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2608 break;
2609 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2610 || opnd->qualifier == AARCH64_OPND_QLF_X);
2611 snprintf (buf, size, "%s",
2612 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2613 break;
2614
2615 case AARCH64_OPND_Rd_SP:
2616 case AARCH64_OPND_Rn_SP:
2617 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2618 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2619 || opnd->qualifier == AARCH64_OPND_QLF_X
2620 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2621 snprintf (buf, size, "%s",
2622 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2623 break;
2624
2625 case AARCH64_OPND_Rm_EXT:
2626 kind = opnd->shifter.kind;
2627 assert (idx == 1 || idx == 2);
2628 if ((aarch64_stack_pointer_p (opnds)
2629 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2630 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2631 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2632 && kind == AARCH64_MOD_UXTW)
2633 || (opnd->qualifier == AARCH64_OPND_QLF_X
2634 && kind == AARCH64_MOD_UXTX)))
2635 {
2636 /* 'LSL' is the preferred form in this case. */
2637 kind = AARCH64_MOD_LSL;
2638 if (opnd->shifter.amount == 0)
2639 {
2640 /* Shifter omitted. */
2641 snprintf (buf, size, "%s",
2642 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2643 break;
2644 }
2645 }
2646 if (opnd->shifter.amount)
2647 snprintf (buf, size, "%s, %s #%" PRIi64,
2648 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2649 aarch64_operand_modifiers[kind].name,
2650 opnd->shifter.amount);
2651 else
2652 snprintf (buf, size, "%s, %s",
2653 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2654 aarch64_operand_modifiers[kind].name);
2655 break;
2656
2657 case AARCH64_OPND_Rm_SFT:
2658 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2659 || opnd->qualifier == AARCH64_OPND_QLF_X);
2660 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2661 snprintf (buf, size, "%s",
2662 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2663 else
2664 snprintf (buf, size, "%s, %s #%" PRIi64,
2665 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2666 aarch64_operand_modifiers[opnd->shifter.kind].name,
2667 opnd->shifter.amount);
2668 break;
2669
2670 case AARCH64_OPND_Fd:
2671 case AARCH64_OPND_Fn:
2672 case AARCH64_OPND_Fm:
2673 case AARCH64_OPND_Fa:
2674 case AARCH64_OPND_Ft:
2675 case AARCH64_OPND_Ft2:
2676 case AARCH64_OPND_Sd:
2677 case AARCH64_OPND_Sn:
2678 case AARCH64_OPND_Sm:
2679 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2680 opnd->reg.regno);
2681 break;
2682
2683 case AARCH64_OPND_Vd:
2684 case AARCH64_OPND_Vn:
2685 case AARCH64_OPND_Vm:
2686 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2687 aarch64_get_qualifier_name (opnd->qualifier));
2688 break;
2689
2690 case AARCH64_OPND_Ed:
2691 case AARCH64_OPND_En:
2692 case AARCH64_OPND_Em:
2693 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2694 aarch64_get_qualifier_name (opnd->qualifier),
2695 opnd->reglane.index);
2696 break;
2697
2698 case AARCH64_OPND_VdD1:
2699 case AARCH64_OPND_VnD1:
2700 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2701 break;
2702
2703 case AARCH64_OPND_LVn:
2704 case AARCH64_OPND_LVt:
2705 case AARCH64_OPND_LVt_AL:
2706 case AARCH64_OPND_LEt:
2707 print_register_list (buf, size, opnd, "v");
2708 break;
2709
2710 case AARCH64_OPND_SVE_Pd:
2711 case AARCH64_OPND_SVE_Pg3:
2712 case AARCH64_OPND_SVE_Pg4_5:
2713 case AARCH64_OPND_SVE_Pg4_10:
2714 case AARCH64_OPND_SVE_Pg4_16:
2715 case AARCH64_OPND_SVE_Pm:
2716 case AARCH64_OPND_SVE_Pn:
2717 case AARCH64_OPND_SVE_Pt:
2718 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2719 snprintf (buf, size, "p%d", opnd->reg.regno);
2720 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
2721 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
2722 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
2723 aarch64_get_qualifier_name (opnd->qualifier));
2724 else
2725 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
2726 aarch64_get_qualifier_name (opnd->qualifier));
2727 break;
2728
2729 case AARCH64_OPND_SVE_Za_5:
2730 case AARCH64_OPND_SVE_Za_16:
2731 case AARCH64_OPND_SVE_Zd:
2732 case AARCH64_OPND_SVE_Zm_5:
2733 case AARCH64_OPND_SVE_Zm_16:
2734 case AARCH64_OPND_SVE_Zn:
2735 case AARCH64_OPND_SVE_Zt:
2736 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
2737 snprintf (buf, size, "z%d", opnd->reg.regno);
2738 else
2739 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
2740 aarch64_get_qualifier_name (opnd->qualifier));
2741 break;
2742
2743 case AARCH64_OPND_SVE_ZnxN:
2744 case AARCH64_OPND_SVE_ZtxN:
2745 print_register_list (buf, size, opnd, "z");
2746 break;
2747
2748 case AARCH64_OPND_SVE_Zn_INDEX:
2749 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2750 aarch64_get_qualifier_name (opnd->qualifier),
2751 opnd->reglane.index);
2752 break;
2753
2754 case AARCH64_OPND_Cn:
2755 case AARCH64_OPND_Cm:
2756 snprintf (buf, size, "C%d", opnd->reg.regno);
2757 break;
2758
2759 case AARCH64_OPND_IDX:
2760 case AARCH64_OPND_IMM:
2761 case AARCH64_OPND_WIDTH:
2762 case AARCH64_OPND_UIMM3_OP1:
2763 case AARCH64_OPND_UIMM3_OP2:
2764 case AARCH64_OPND_BIT_NUM:
2765 case AARCH64_OPND_IMM_VLSL:
2766 case AARCH64_OPND_IMM_VLSR:
2767 case AARCH64_OPND_SHLL_IMM:
2768 case AARCH64_OPND_IMM0:
2769 case AARCH64_OPND_IMMR:
2770 case AARCH64_OPND_IMMS:
2771 case AARCH64_OPND_FBITS:
2772 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2773 break;
2774
2775 case AARCH64_OPND_SVE_PATTERN:
2776 if (optional_operand_p (opcode, idx)
2777 && opnd->imm.value == get_optional_operand_default_value (opcode))
2778 break;
2779 enum_value = opnd->imm.value;
2780 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2781 if (aarch64_sve_pattern_array[enum_value])
2782 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
2783 else
2784 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2785 break;
2786
2787 case AARCH64_OPND_SVE_PATTERN_SCALED:
2788 if (optional_operand_p (opcode, idx)
2789 && !opnd->shifter.operator_present
2790 && opnd->imm.value == get_optional_operand_default_value (opcode))
2791 break;
2792 enum_value = opnd->imm.value;
2793 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
2794 if (aarch64_sve_pattern_array[opnd->imm.value])
2795 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
2796 else
2797 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2798 if (opnd->shifter.operator_present)
2799 {
2800 size_t len = strlen (buf);
2801 snprintf (buf + len, size - len, ", %s #%" PRIi64,
2802 aarch64_operand_modifiers[opnd->shifter.kind].name,
2803 opnd->shifter.amount);
2804 }
2805 break;
2806
2807 case AARCH64_OPND_SVE_PRFOP:
2808 enum_value = opnd->imm.value;
2809 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
2810 if (aarch64_sve_prfop_array[enum_value])
2811 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
2812 else
2813 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2814 break;
2815
2816 case AARCH64_OPND_IMM_MOV:
2817 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2818 {
2819 case 4: /* e.g. MOV Wd, #<imm32>. */
2820 {
2821 int imm32 = opnd->imm.value;
2822 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2823 }
2824 break;
2825 case 8: /* e.g. MOV Xd, #<imm64>. */
2826 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2827 opnd->imm.value, opnd->imm.value);
2828 break;
2829 default: assert (0);
2830 }
2831 break;
2832
2833 case AARCH64_OPND_FPIMM0:
2834 snprintf (buf, size, "#0.0");
2835 break;
2836
2837 case AARCH64_OPND_LIMM:
2838 case AARCH64_OPND_AIMM:
2839 case AARCH64_OPND_HALF:
2840 if (opnd->shifter.amount)
2841 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
2842 opnd->shifter.amount);
2843 else
2844 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2845 break;
2846
2847 case AARCH64_OPND_SIMD_IMM:
2848 case AARCH64_OPND_SIMD_IMM_SFT:
2849 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2850 || opnd->shifter.kind == AARCH64_MOD_NONE)
2851 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2852 else
2853 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
2854 aarch64_operand_modifiers[opnd->shifter.kind].name,
2855 opnd->shifter.amount);
2856 break;
2857
2858 case AARCH64_OPND_FPIMM:
2859 case AARCH64_OPND_SIMD_FPIMM:
2860 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2861 {
2862 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2863 {
2864 half_conv_t c;
2865 c.i = expand_fp_imm (2, opnd->imm.value);
2866 snprintf (buf, size, "#%.18e", c.f);
2867 }
2868 break;
2869 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2870 {
2871 single_conv_t c;
2872 c.i = expand_fp_imm (4, opnd->imm.value);
2873 snprintf (buf, size, "#%.18e", c.f);
2874 }
2875 break;
2876 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2877 {
2878 double_conv_t c;
2879 c.i = expand_fp_imm (8, opnd->imm.value);
2880 snprintf (buf, size, "#%.18e", c.d);
2881 }
2882 break;
2883 default: assert (0);
2884 }
2885 break;
2886
2887 case AARCH64_OPND_CCMP_IMM:
2888 case AARCH64_OPND_NZCV:
2889 case AARCH64_OPND_EXCEPTION:
2890 case AARCH64_OPND_UIMM4:
2891 case AARCH64_OPND_UIMM7:
2892 if (optional_operand_p (opcode, idx) == TRUE
2893 && (opnd->imm.value ==
2894 (int64_t) get_optional_operand_default_value (opcode)))
2895 /* Omit the operand, e.g. DCPS1. */
2896 break;
2897 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2898 break;
2899
2900 case AARCH64_OPND_COND:
2901 case AARCH64_OPND_COND1:
2902 snprintf (buf, size, "%s", opnd->cond->names[0]);
2903 break;
2904
2905 case AARCH64_OPND_ADDR_ADRP:
2906 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2907 + opnd->imm.value;
2908 if (pcrel_p)
2909 *pcrel_p = 1;
2910 if (address)
2911 *address = addr;
2912 /* This is not necessary during the disassembling, as print_address_func
2913 in the disassemble_info will take care of the printing. But some
2914 other callers may be still interested in getting the string in *STR,
2915 so here we do snprintf regardless. */
2916 snprintf (buf, size, "#0x%" PRIx64, addr);
2917 break;
2918
2919 case AARCH64_OPND_ADDR_PCREL14:
2920 case AARCH64_OPND_ADDR_PCREL19:
2921 case AARCH64_OPND_ADDR_PCREL21:
2922 case AARCH64_OPND_ADDR_PCREL26:
2923 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2924 if (pcrel_p)
2925 *pcrel_p = 1;
2926 if (address)
2927 *address = addr;
2928 /* This is not necessary during the disassembling, as print_address_func
2929 in the disassemble_info will take care of the printing. But some
2930 other callers may be still interested in getting the string in *STR,
2931 so here we do snprintf regardless. */
2932 snprintf (buf, size, "#0x%" PRIx64, addr);
2933 break;
2934
2935 case AARCH64_OPND_ADDR_SIMPLE:
2936 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2937 case AARCH64_OPND_SIMD_ADDR_POST:
2938 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2939 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2940 {
2941 if (opnd->addr.offset.is_reg)
2942 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2943 else
2944 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2945 }
2946 else
2947 snprintf (buf, size, "[%s]", name);
2948 break;
2949
2950 case AARCH64_OPND_ADDR_REGOFF:
2951 print_register_offset_address
2952 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2953 get_offset_int_reg_name (opnd));
2954 break;
2955
2956 case AARCH64_OPND_ADDR_SIMM7:
2957 case AARCH64_OPND_ADDR_SIMM9:
2958 case AARCH64_OPND_ADDR_SIMM9_2:
2959 print_immediate_offset_address
2960 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
2961 break;
2962
2963 case AARCH64_OPND_ADDR_UIMM12:
2964 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2965 if (opnd->addr.offset.imm)
2966 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2967 else
2968 snprintf (buf, size, "[%s]", name);
2969 break;
2970
2971 case AARCH64_OPND_SYSREG:
2972 for (i = 0; aarch64_sys_regs[i].name; ++i)
2973 if (aarch64_sys_regs[i].value == opnd->sysreg
2974 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2975 break;
2976 if (aarch64_sys_regs[i].name)
2977 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2978 else
2979 {
2980 /* Implementation defined system register. */
2981 unsigned int value = opnd->sysreg;
2982 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2983 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2984 value & 0x7);
2985 }
2986 break;
2987
2988 case AARCH64_OPND_PSTATEFIELD:
2989 for (i = 0; aarch64_pstatefields[i].name; ++i)
2990 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2991 break;
2992 assert (aarch64_pstatefields[i].name);
2993 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2994 break;
2995
2996 case AARCH64_OPND_SYSREG_AT:
2997 case AARCH64_OPND_SYSREG_DC:
2998 case AARCH64_OPND_SYSREG_IC:
2999 case AARCH64_OPND_SYSREG_TLBI:
3000 snprintf (buf, size, "%s", opnd->sysins_op->name);
3001 break;
3002
3003 case AARCH64_OPND_BARRIER:
3004 snprintf (buf, size, "%s", opnd->barrier->name);
3005 break;
3006
3007 case AARCH64_OPND_BARRIER_ISB:
3008 /* Operand can be omitted, e.g. in DCPS1. */
3009 if (! optional_operand_p (opcode, idx)
3010 || (opnd->barrier->value
3011 != get_optional_operand_default_value (opcode)))
3012 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3013 break;
3014
3015 case AARCH64_OPND_PRFOP:
3016 if (opnd->prfop->name != NULL)
3017 snprintf (buf, size, "%s", opnd->prfop->name);
3018 else
3019 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3020 break;
3021
3022 case AARCH64_OPND_BARRIER_PSB:
3023 snprintf (buf, size, "%s", opnd->hint_option->name);
3024 break;
3025
3026 default:
3027 assert (0);
3028 }
3029 }
3030 \f
3031 #define CPENC(op0,op1,crn,crm,op2) \
3032 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3033 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3034 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3035 /* for 3.9.10 System Instructions */
3036 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3037
3038 #define C0 0
3039 #define C1 1
3040 #define C2 2
3041 #define C3 3
3042 #define C4 4
3043 #define C5 5
3044 #define C6 6
3045 #define C7 7
3046 #define C8 8
3047 #define C9 9
3048 #define C10 10
3049 #define C11 11
3050 #define C12 12
3051 #define C13 13
3052 #define C14 14
3053 #define C15 15
3054
3055 #ifdef F_DEPRECATED
3056 #undef F_DEPRECATED
3057 #endif
3058 #define F_DEPRECATED 0x1 /* Deprecated system register. */
3059
3060 #ifdef F_ARCHEXT
3061 #undef F_ARCHEXT
3062 #endif
3063 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
3064
3065 #ifdef F_HASXT
3066 #undef F_HASXT
3067 #endif
3068 #define F_HASXT 0x4 /* System instruction register <Xt>
3069 operand. */
3070
3071
3072 /* TODO there are two more issues need to be resolved
3073 1. handle read-only and write-only system registers
3074 2. handle cpu-implementation-defined system registers. */
3075 const aarch64_sys_reg aarch64_sys_regs [] =
3076 {
3077 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3078 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3079 { "elr_el1", CPEN_(0,C0,1), 0 },
3080 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3081 { "sp_el0", CPEN_(0,C1,0), 0 },
3082 { "spsel", CPEN_(0,C2,0), 0 },
3083 { "daif", CPEN_(3,C2,1), 0 },
3084 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
3085 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3086 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3087 { "nzcv", CPEN_(3,C2,0), 0 },
3088 { "fpcr", CPEN_(3,C4,0), 0 },
3089 { "fpsr", CPEN_(3,C4,1), 0 },
3090 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3091 { "dlr_el0", CPEN_(3,C5,1), 0 },
3092 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3093 { "elr_el2", CPEN_(4,C0,1), 0 },
3094 { "sp_el1", CPEN_(4,C1,0), 0 },
3095 { "spsr_irq", CPEN_(4,C3,0), 0 },
3096 { "spsr_abt", CPEN_(4,C3,1), 0 },
3097 { "spsr_und", CPEN_(4,C3,2), 0 },
3098 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3099 { "spsr_el3", CPEN_(6,C0,0), 0 },
3100 { "elr_el3", CPEN_(6,C0,1), 0 },
3101 { "sp_el2", CPEN_(6,C1,0), 0 },
3102 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3103 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3104 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
3105 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
3106 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
3107 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
3108 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
3109 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
3110 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
3111 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
3112 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
3113 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
3114 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
3115 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
3116 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
3117 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
3118 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
3119 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
3120 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
3121 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
3122 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
3123 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
3124 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
3125 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
3126 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
3127 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
3128 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
3129 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
3130 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
3131 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
3132 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
3133 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
3134 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
3135 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
3136 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
3137 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3138 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
3139 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
3140 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
3141 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
3142 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3143 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3144 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3145 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3146 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3147 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3148 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3149 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3150 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3151 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3152 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3153 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3154 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3155 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3156 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3157 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3158 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3159 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3160 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3161 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3162 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3163 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3164 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3165 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3166 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3167 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3168 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3169 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3170 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3171 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3172 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3173 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3174 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3175 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3176 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3177 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3178 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3179 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3180 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3181 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3182 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3183 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3184 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3185 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3186 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3187 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3188 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3189 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3190 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3191 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3192 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3193 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3194 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3195 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3196 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3197 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3198 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3199 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3200 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3201 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3202 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3203 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3204 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3205 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3206 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3207 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3208 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3209 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3210 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3211 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3212 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3213 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3214 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
3215 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
3216 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
3217 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3218 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3219 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3220 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
3221 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3222 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3223 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3224 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3225 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3226 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3227 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
3228 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3229 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3230 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3231 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3232 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
3233 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
3234 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
3235 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3236 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3237 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3238 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3239 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3240 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3241 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3242 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3243 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3244 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3245 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3246 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3247 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3248 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3249 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3250 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3251 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3252 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3253 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3254 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3255 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
3256 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
3257 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3258 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3259 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3260 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
3261 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3262 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3263 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3264 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3265 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3266 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3267 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3268 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3269 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3270 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3271 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3272 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3273 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3274 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3275 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3276 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3277 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3278 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3279 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3280 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3281 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3282 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3283 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3284 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3285 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3286 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3287 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3288 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3289 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3290 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3291 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3292 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3293 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3294 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3295 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3296 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3297 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3298 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3299 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3300 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3301 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3302 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3303 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3304 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3305 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3306 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3307 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3308 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3309 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3310 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3311 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3312 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3313 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3314 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3315 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3316 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3317 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3318 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3319 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3320 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3321 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3322 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3323 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3324 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3325 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3326 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3327 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3328 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3329 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3330 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3331 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3332 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3333 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3334 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3335 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3336 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3337 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3338 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3339 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3340 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3341 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3342 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3343 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3344 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3345 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3346 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3347 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3348 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3349 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3350 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3351 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3352 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3353 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3354 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3355 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3356 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3357 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3358 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3359 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3360 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3361 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3362 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3363 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3364 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3365 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3366 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3367 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3368 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3369 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3370 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3371 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3372 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3373 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3374 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3375 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3376 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3377 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3378 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3379 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3380 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3381 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3382 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3383 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3384 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3385 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3386 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3387 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3388 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3389 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3390 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3391 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3392 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3393 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3394 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3395 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3396 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3397 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3398 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3399 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3400 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3401 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3402 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3403 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3404 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3405 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3406 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3407 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3408 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3409 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3410 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3411 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3412 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3413 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3414 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3415 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3416 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3417 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3418 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3419 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3420 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3421 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3422 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3423 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3424 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3425 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3426 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3427 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3428 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3429 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3430 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3431 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3432 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3433 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3434 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3435 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3436 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3437 { 0, CPENC(0,0,0,0,0), 0 },
3438 };
3439
3440 bfd_boolean
3441 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3442 {
3443 return (reg->flags & F_DEPRECATED) != 0;
3444 }
3445
3446 bfd_boolean
3447 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3448 const aarch64_sys_reg *reg)
3449 {
3450 if (!(reg->flags & F_ARCHEXT))
3451 return TRUE;
3452
3453 /* PAN. Values are from aarch64_sys_regs. */
3454 if (reg->value == CPEN_(0,C2,3)
3455 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3456 return FALSE;
3457
3458 /* Virtualization host extensions: system registers. */
3459 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3460 || reg->value == CPENC (3, 4, C13, C0, 1)
3461 || reg->value == CPENC (3, 4, C14, C3, 0)
3462 || reg->value == CPENC (3, 4, C14, C3, 1)
3463 || reg->value == CPENC (3, 4, C14, C3, 2))
3464 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3465 return FALSE;
3466
3467 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3468 if ((reg->value == CPEN_ (5, C0, 0)
3469 || reg->value == CPEN_ (5, C0, 1)
3470 || reg->value == CPENC (3, 5, C1, C0, 0)
3471 || reg->value == CPENC (3, 5, C1, C0, 2)
3472 || reg->value == CPENC (3, 5, C2, C0, 0)
3473 || reg->value == CPENC (3, 5, C2, C0, 1)
3474 || reg->value == CPENC (3, 5, C2, C0, 2)
3475 || reg->value == CPENC (3, 5, C5, C1, 0)
3476 || reg->value == CPENC (3, 5, C5, C1, 1)
3477 || reg->value == CPENC (3, 5, C5, C2, 0)
3478 || reg->value == CPENC (3, 5, C6, C0, 0)
3479 || reg->value == CPENC (3, 5, C10, C2, 0)
3480 || reg->value == CPENC (3, 5, C10, C3, 0)
3481 || reg->value == CPENC (3, 5, C12, C0, 0)
3482 || reg->value == CPENC (3, 5, C13, C0, 1)
3483 || reg->value == CPENC (3, 5, C14, C1, 0))
3484 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3485 return FALSE;
3486
3487 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3488 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3489 || reg->value == CPENC (3, 5, C14, C2, 1)
3490 || reg->value == CPENC (3, 5, C14, C2, 2)
3491 || reg->value == CPENC (3, 5, C14, C3, 0)
3492 || reg->value == CPENC (3, 5, C14, C3, 1)
3493 || reg->value == CPENC (3, 5, C14, C3, 2))
3494 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3495 return FALSE;
3496
3497 /* ARMv8.2 features. */
3498
3499 /* ID_AA64MMFR2_EL1. */
3500 if (reg->value == CPENC (3, 0, C0, C7, 2)
3501 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3502 return FALSE;
3503
3504 /* PSTATE.UAO. */
3505 if (reg->value == CPEN_ (0, C2, 4)
3506 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3507 return FALSE;
3508
3509 /* RAS extension. */
3510
3511 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3512 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3513 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3514 || reg->value == CPENC (3, 0, C5, C3, 1)
3515 || reg->value == CPENC (3, 0, C5, C3, 2)
3516 || reg->value == CPENC (3, 0, C5, C3, 3)
3517 || reg->value == CPENC (3, 0, C5, C4, 0)
3518 || reg->value == CPENC (3, 0, C5, C4, 1)
3519 || reg->value == CPENC (3, 0, C5, C4, 2)
3520 || reg->value == CPENC (3, 0, C5, C4, 3)
3521 || reg->value == CPENC (3, 0, C5, C5, 0)
3522 || reg->value == CPENC (3, 0, C5, C5, 1))
3523 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3524 return FALSE;
3525
3526 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3527 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3528 || reg->value == CPENC (3, 0, C12, C1, 1)
3529 || reg->value == CPENC (3, 4, C12, C1, 1))
3530 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3531 return FALSE;
3532
3533 /* Statistical Profiling extension. */
3534 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3535 || reg->value == CPENC (3, 0, C9, C10, 1)
3536 || reg->value == CPENC (3, 0, C9, C10, 3)
3537 || reg->value == CPENC (3, 0, C9, C10, 7)
3538 || reg->value == CPENC (3, 0, C9, C9, 0)
3539 || reg->value == CPENC (3, 0, C9, C9, 2)
3540 || reg->value == CPENC (3, 0, C9, C9, 3)
3541 || reg->value == CPENC (3, 0, C9, C9, 4)
3542 || reg->value == CPENC (3, 0, C9, C9, 5)
3543 || reg->value == CPENC (3, 0, C9, C9, 6)
3544 || reg->value == CPENC (3, 0, C9, C9, 7)
3545 || reg->value == CPENC (3, 4, C9, C9, 0)
3546 || reg->value == CPENC (3, 5, C9, C9, 0))
3547 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3548 return FALSE;
3549
3550 return TRUE;
3551 }
3552
3553 const aarch64_sys_reg aarch64_pstatefields [] =
3554 {
3555 { "spsel", 0x05, 0 },
3556 { "daifset", 0x1e, 0 },
3557 { "daifclr", 0x1f, 0 },
3558 { "pan", 0x04, F_ARCHEXT },
3559 { "uao", 0x03, F_ARCHEXT },
3560 { 0, CPENC(0,0,0,0,0), 0 },
3561 };
3562
3563 bfd_boolean
3564 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3565 const aarch64_sys_reg *reg)
3566 {
3567 if (!(reg->flags & F_ARCHEXT))
3568 return TRUE;
3569
3570 /* PAN. Values are from aarch64_pstatefields. */
3571 if (reg->value == 0x04
3572 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3573 return FALSE;
3574
3575 /* UAO. Values are from aarch64_pstatefields. */
3576 if (reg->value == 0x03
3577 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3578 return FALSE;
3579
3580 return TRUE;
3581 }
3582
3583 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3584 {
3585 { "ialluis", CPENS(0,C7,C1,0), 0 },
3586 { "iallu", CPENS(0,C7,C5,0), 0 },
3587 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3588 { 0, CPENS(0,0,0,0), 0 }
3589 };
3590
3591 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3592 {
3593 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3594 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3595 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3596 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3597 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3598 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3599 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3600 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3601 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3602 { 0, CPENS(0,0,0,0), 0 }
3603 };
3604
3605 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3606 {
3607 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3608 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3609 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3610 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3611 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3612 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3613 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3614 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3615 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3616 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3617 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3618 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3619 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3620 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3621 { 0, CPENS(0,0,0,0), 0 }
3622 };
3623
3624 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3625 {
3626 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3627 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3628 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3629 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3630 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3631 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3632 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3633 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3634 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3635 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3636 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3637 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3638 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3639 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3640 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3641 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3642 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3643 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3644 { "alle2", CPENS(4,C8,C7,0), 0 },
3645 { "alle2is", CPENS(4,C8,C3,0), 0 },
3646 { "alle1", CPENS(4,C8,C7,4), 0 },
3647 { "alle1is", CPENS(4,C8,C3,4), 0 },
3648 { "alle3", CPENS(6,C8,C7,0), 0 },
3649 { "alle3is", CPENS(6,C8,C3,0), 0 },
3650 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3651 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3652 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3653 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3654 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3655 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3656 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3657 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3658 { 0, CPENS(0,0,0,0), 0 }
3659 };
3660
3661 bfd_boolean
3662 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3663 {
3664 return (sys_ins_reg->flags & F_HASXT) != 0;
3665 }
3666
3667 extern bfd_boolean
3668 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3669 const aarch64_sys_ins_reg *reg)
3670 {
3671 if (!(reg->flags & F_ARCHEXT))
3672 return TRUE;
3673
3674 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3675 if (reg->value == CPENS (3, C7, C12, 1)
3676 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3677 return FALSE;
3678
3679 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3680 if ((reg->value == CPENS (0, C7, C9, 0)
3681 || reg->value == CPENS (0, C7, C9, 1))
3682 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3683 return FALSE;
3684
3685 return TRUE;
3686 }
3687
3688 #undef C0
3689 #undef C1
3690 #undef C2
3691 #undef C3
3692 #undef C4
3693 #undef C5
3694 #undef C6
3695 #undef C7
3696 #undef C8
3697 #undef C9
3698 #undef C10
3699 #undef C11
3700 #undef C12
3701 #undef C13
3702 #undef C14
3703 #undef C15
3704
3705 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3706 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3707
3708 static bfd_boolean
3709 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3710 const aarch64_insn insn)
3711 {
3712 int t = BITS (insn, 4, 0);
3713 int n = BITS (insn, 9, 5);
3714 int t2 = BITS (insn, 14, 10);
3715
3716 if (BIT (insn, 23))
3717 {
3718 /* Write back enabled. */
3719 if ((t == n || t2 == n) && n != 31)
3720 return FALSE;
3721 }
3722
3723 if (BIT (insn, 22))
3724 {
3725 /* Load */
3726 if (t == t2)
3727 return FALSE;
3728 }
3729
3730 return TRUE;
3731 }
3732
3733 /* Include the opcode description table as well as the operand description
3734 table. */
3735 #define VERIFIER(x) verify_##x
3736 #include "aarch64-tbl.h"
This page took 0.143725 seconds and 4 git commands to generate.