6eac70acaaf6f4a3aeb2f3d3b57aec45a4d68a4f
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <stdint.h>
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30
31 #include "aarch64-opc.h"
32
33 #ifdef DEBUG_AARCH64
34 int debug_dump = FALSE;
35 #endif /* DEBUG_AARCH64 */
36
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42 {
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46 }
47
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50 {
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54 }
55
56 enum data_pattern
57 {
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63 };
64
65 static const char significant_operand_index [] =
66 {
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72 };
73
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81 {
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120 }
121
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130 int
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132 {
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135 }
136 \f
137 const aarch64_field fields[] =
138 {
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
202 };
203
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type)
206 {
207 return aarch64_operands[type].op_class;
208 }
209
210 const char *
211 aarch64_get_operand_name (enum aarch64_opnd type)
212 {
213 return aarch64_operands[type].name;
214 }
215
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
218 const char *
219 aarch64_get_operand_desc (enum aarch64_opnd type)
220 {
221 return aarch64_operands[type].desc;
222 }
223
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds[16] =
226 {
227 {{"eq"}, 0x0},
228 {{"ne"}, 0x1},
229 {{"cs", "hs"}, 0x2},
230 {{"cc", "lo", "ul"}, 0x3},
231 {{"mi"}, 0x4},
232 {{"pl"}, 0x5},
233 {{"vs"}, 0x6},
234 {{"vc"}, 0x7},
235 {{"hi"}, 0x8},
236 {{"ls"}, 0x9},
237 {{"ge"}, 0xa},
238 {{"lt"}, 0xb},
239 {{"gt"}, 0xc},
240 {{"le"}, 0xd},
241 {{"al"}, 0xe},
242 {{"nv"}, 0xf},
243 };
244
245 const aarch64_cond *
246 get_cond_from_value (aarch64_insn value)
247 {
248 assert (value < 16);
249 return &aarch64_conds[(unsigned int) value];
250 }
251
252 const aarch64_cond *
253 get_inverted_cond (const aarch64_cond *cond)
254 {
255 return &aarch64_conds[cond->value ^ 0x1];
256 }
257
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
260
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264 {
265 {"none", 0x0},
266 {"msl", 0x0},
267 {"ror", 0x3},
268 {"asr", 0x2},
269 {"lsr", 0x1},
270 {"lsl", 0x0},
271 {"uxtb", 0x0},
272 {"uxth", 0x1},
273 {"uxtw", 0x2},
274 {"uxtx", 0x3},
275 {"sxtb", 0x4},
276 {"sxth", 0x5},
277 {"sxtw", 0x6},
278 {"sxtx", 0x7},
279 {NULL, 0},
280 };
281
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284 {
285 return desc - aarch64_operand_modifiers;
286 }
287
288 aarch64_insn
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290 {
291 return aarch64_operand_modifiers[kind].value;
292 }
293
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value,
296 bfd_boolean extend_p)
297 {
298 if (extend_p == TRUE)
299 return AARCH64_MOD_UXTB + value;
300 else
301 return AARCH64_MOD_LSL - value;
302 }
303
304 bfd_boolean
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306 {
307 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308 ? TRUE : FALSE;
309 }
310
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313 {
314 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315 ? TRUE : FALSE;
316 }
317
318 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319 {
320 { "#0x00", 0x0 },
321 { "oshld", 0x1 },
322 { "oshst", 0x2 },
323 { "osh", 0x3 },
324 { "#0x04", 0x4 },
325 { "nshld", 0x5 },
326 { "nshst", 0x6 },
327 { "nsh", 0x7 },
328 { "#0x08", 0x8 },
329 { "ishld", 0x9 },
330 { "ishst", 0xa },
331 { "ish", 0xb },
332 { "#0x0c", 0xc },
333 { "ld", 0xd },
334 { "st", 0xe },
335 { "sy", 0xf },
336 };
337
338 /* Table describing the operands supported by the aliases of the HINT
339 instruction.
340
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
344
345 const struct aarch64_name_value_pair aarch64_hint_options[] =
346 {
347 { "csync", 0x11 }, /* PSB CSYNC. */
348 { NULL, 0x0 },
349 };
350
351 /* op -> op: load = 0 instruction = 1 store = 2
352 l -> level: 1-3
353 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
354 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
355 const struct aarch64_name_value_pair aarch64_prfops[32] =
356 {
357 { "pldl1keep", B(0, 1, 0) },
358 { "pldl1strm", B(0, 1, 1) },
359 { "pldl2keep", B(0, 2, 0) },
360 { "pldl2strm", B(0, 2, 1) },
361 { "pldl3keep", B(0, 3, 0) },
362 { "pldl3strm", B(0, 3, 1) },
363 { NULL, 0x06 },
364 { NULL, 0x07 },
365 { "plil1keep", B(1, 1, 0) },
366 { "plil1strm", B(1, 1, 1) },
367 { "plil2keep", B(1, 2, 0) },
368 { "plil2strm", B(1, 2, 1) },
369 { "plil3keep", B(1, 3, 0) },
370 { "plil3strm", B(1, 3, 1) },
371 { NULL, 0x0e },
372 { NULL, 0x0f },
373 { "pstl1keep", B(2, 1, 0) },
374 { "pstl1strm", B(2, 1, 1) },
375 { "pstl2keep", B(2, 2, 0) },
376 { "pstl2strm", B(2, 2, 1) },
377 { "pstl3keep", B(2, 3, 0) },
378 { "pstl3strm", B(2, 3, 1) },
379 { NULL, 0x16 },
380 { NULL, 0x17 },
381 { NULL, 0x18 },
382 { NULL, 0x19 },
383 { NULL, 0x1a },
384 { NULL, 0x1b },
385 { NULL, 0x1c },
386 { NULL, 0x1d },
387 { NULL, 0x1e },
388 { NULL, 0x1f },
389 };
390 #undef B
391 \f
392 /* Utilities on value constraint. */
393
394 static inline int
395 value_in_range_p (int64_t value, int low, int high)
396 {
397 return (value >= low && value <= high) ? 1 : 0;
398 }
399
400 static inline int
401 value_aligned_p (int64_t value, int align)
402 {
403 return ((value & (align - 1)) == 0) ? 1 : 0;
404 }
405
406 /* A signed value fits in a field. */
407 static inline int
408 value_fit_signed_field_p (int64_t value, unsigned width)
409 {
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << (width - 1);
414 if (value >= -lim && value < lim)
415 return 1;
416 }
417 return 0;
418 }
419
420 /* An unsigned value fits in a field. */
421 static inline int
422 value_fit_unsigned_field_p (int64_t value, unsigned width)
423 {
424 assert (width < 32);
425 if (width < sizeof (value) * 8)
426 {
427 int64_t lim = (int64_t)1 << width;
428 if (value >= 0 && value < lim)
429 return 1;
430 }
431 return 0;
432 }
433
434 /* Return 1 if OPERAND is SP or WSP. */
435 int
436 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
437 {
438 return ((aarch64_get_operand_class (operand->type)
439 == AARCH64_OPND_CLASS_INT_REG)
440 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
441 && operand->reg.regno == 31);
442 }
443
444 /* Return 1 if OPERAND is XZR or WZP. */
445 int
446 aarch64_zero_register_p (const aarch64_opnd_info *operand)
447 {
448 return ((aarch64_get_operand_class (operand->type)
449 == AARCH64_OPND_CLASS_INT_REG)
450 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
451 && operand->reg.regno == 31);
452 }
453
454 /* Return true if the operand *OPERAND that has the operand code
455 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
456 qualified by the qualifier TARGET. */
457
458 static inline int
459 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
460 aarch64_opnd_qualifier_t target)
461 {
462 switch (operand->qualifier)
463 {
464 case AARCH64_OPND_QLF_W:
465 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
466 return 1;
467 break;
468 case AARCH64_OPND_QLF_X:
469 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
470 return 1;
471 break;
472 case AARCH64_OPND_QLF_WSP:
473 if (target == AARCH64_OPND_QLF_W
474 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
475 return 1;
476 break;
477 case AARCH64_OPND_QLF_SP:
478 if (target == AARCH64_OPND_QLF_X
479 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
480 return 1;
481 break;
482 default:
483 break;
484 }
485
486 return 0;
487 }
488
489 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
490 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
491
492 Return NIL if more than one expected qualifiers are found. */
493
494 aarch64_opnd_qualifier_t
495 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
496 int idx,
497 const aarch64_opnd_qualifier_t known_qlf,
498 int known_idx)
499 {
500 int i, saved_i;
501
502 /* Special case.
503
504 When the known qualifier is NIL, we have to assume that there is only
505 one qualifier sequence in the *QSEQ_LIST and return the corresponding
506 qualifier directly. One scenario is that for instruction
507 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
508 which has only one possible valid qualifier sequence
509 NIL, S_D
510 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
511 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
512
513 Because the qualifier NIL has dual roles in the qualifier sequence:
514 it can mean no qualifier for the operand, or the qualifer sequence is
515 not in use (when all qualifiers in the sequence are NILs), we have to
516 handle this special case here. */
517 if (known_qlf == AARCH64_OPND_NIL)
518 {
519 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
520 return qseq_list[0][idx];
521 }
522
523 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
524 {
525 if (qseq_list[i][known_idx] == known_qlf)
526 {
527 if (saved_i != -1)
528 /* More than one sequences are found to have KNOWN_QLF at
529 KNOWN_IDX. */
530 return AARCH64_OPND_NIL;
531 saved_i = i;
532 }
533 }
534
535 return qseq_list[saved_i][idx];
536 }
537
538 enum operand_qualifier_kind
539 {
540 OQK_NIL,
541 OQK_OPD_VARIANT,
542 OQK_VALUE_IN_RANGE,
543 OQK_MISC,
544 };
545
546 /* Operand qualifier description. */
547 struct operand_qualifier_data
548 {
549 /* The usage of the three data fields depends on the qualifier kind. */
550 int data0;
551 int data1;
552 int data2;
553 /* Description. */
554 const char *desc;
555 /* Kind. */
556 enum operand_qualifier_kind kind;
557 };
558
559 /* Indexed by the operand qualifier enumerators. */
560 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
561 {
562 {0, 0, 0, "NIL", OQK_NIL},
563
564 /* Operand variant qualifiers.
565 First 3 fields:
566 element size, number of elements and common value for encoding. */
567
568 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
569 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
570 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
571 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
572
573 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
574 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
575 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
576 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
577 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
578
579 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
580 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
581 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
582 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
583 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
584 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
585 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
586 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
587 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
588 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
589
590 /* Qualifiers constraining the value range.
591 First 3 fields:
592 Lower bound, higher bound, unused. */
593
594 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
595 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
596 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
597 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
598 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
599 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
600
601 /* Qualifiers for miscellaneous purpose.
602 First 3 fields:
603 unused, unused and unused. */
604
605 {0, 0, 0, "lsl", 0},
606 {0, 0, 0, "msl", 0},
607
608 {0, 0, 0, "retrieving", 0},
609 };
610
611 static inline bfd_boolean
612 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
613 {
614 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
615 ? TRUE : FALSE;
616 }
617
618 static inline bfd_boolean
619 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
620 {
621 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
622 ? TRUE : FALSE;
623 }
624
625 const char*
626 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
627 {
628 return aarch64_opnd_qualifiers[qualifier].desc;
629 }
630
631 /* Given an operand qualifier, return the expected data element size
632 of a qualified operand. */
633 unsigned char
634 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
635 {
636 assert (operand_variant_qualifier_p (qualifier) == TRUE);
637 return aarch64_opnd_qualifiers[qualifier].data0;
638 }
639
640 unsigned char
641 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
642 {
643 assert (operand_variant_qualifier_p (qualifier) == TRUE);
644 return aarch64_opnd_qualifiers[qualifier].data1;
645 }
646
647 aarch64_insn
648 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
649 {
650 assert (operand_variant_qualifier_p (qualifier) == TRUE);
651 return aarch64_opnd_qualifiers[qualifier].data2;
652 }
653
654 static int
655 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
656 {
657 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
658 return aarch64_opnd_qualifiers[qualifier].data0;
659 }
660
661 static int
662 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
663 {
664 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
665 return aarch64_opnd_qualifiers[qualifier].data1;
666 }
667
668 #ifdef DEBUG_AARCH64
669 void
670 aarch64_verbose (const char *str, ...)
671 {
672 va_list ap;
673 va_start (ap, str);
674 printf ("#### ");
675 vprintf (str, ap);
676 printf ("\n");
677 va_end (ap);
678 }
679
680 static inline void
681 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
682 {
683 int i;
684 printf ("#### \t");
685 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
686 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
687 printf ("\n");
688 }
689
690 static void
691 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
692 const aarch64_opnd_qualifier_t *qualifier)
693 {
694 int i;
695 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
696
697 aarch64_verbose ("dump_match_qualifiers:");
698 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
699 curr[i] = opnd[i].qualifier;
700 dump_qualifier_sequence (curr);
701 aarch64_verbose ("against");
702 dump_qualifier_sequence (qualifier);
703 }
704 #endif /* DEBUG_AARCH64 */
705
706 /* TODO improve this, we can have an extra field at the runtime to
707 store the number of operands rather than calculating it every time. */
708
709 int
710 aarch64_num_of_operands (const aarch64_opcode *opcode)
711 {
712 int i = 0;
713 const enum aarch64_opnd *opnds = opcode->operands;
714 while (opnds[i++] != AARCH64_OPND_NIL)
715 ;
716 --i;
717 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
718 return i;
719 }
720
721 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
722 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
723
724 N.B. on the entry, it is very likely that only some operands in *INST
725 have had their qualifiers been established.
726
727 If STOP_AT is not -1, the function will only try to match
728 the qualifier sequence for operands before and including the operand
729 of index STOP_AT; and on success *RET will only be filled with the first
730 (STOP_AT+1) qualifiers.
731
732 A couple examples of the matching algorithm:
733
734 X,W,NIL should match
735 X,W,NIL
736
737 NIL,NIL should match
738 X ,NIL
739
740 Apart from serving the main encoding routine, this can also be called
741 during or after the operand decoding. */
742
743 int
744 aarch64_find_best_match (const aarch64_inst *inst,
745 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
746 int stop_at, aarch64_opnd_qualifier_t *ret)
747 {
748 int found = 0;
749 int i, num_opnds;
750 const aarch64_opnd_qualifier_t *qualifiers;
751
752 num_opnds = aarch64_num_of_operands (inst->opcode);
753 if (num_opnds == 0)
754 {
755 DEBUG_TRACE ("SUCCEED: no operand");
756 return 1;
757 }
758
759 if (stop_at < 0 || stop_at >= num_opnds)
760 stop_at = num_opnds - 1;
761
762 /* For each pattern. */
763 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
764 {
765 int j;
766 qualifiers = *qualifiers_list;
767
768 /* Start as positive. */
769 found = 1;
770
771 DEBUG_TRACE ("%d", i);
772 #ifdef DEBUG_AARCH64
773 if (debug_dump)
774 dump_match_qualifiers (inst->operands, qualifiers);
775 #endif
776
777 /* Most opcodes has much fewer patterns in the list.
778 First NIL qualifier indicates the end in the list. */
779 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
780 {
781 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
782 if (i)
783 found = 0;
784 break;
785 }
786
787 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
788 {
789 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
790 {
791 /* Either the operand does not have qualifier, or the qualifier
792 for the operand needs to be deduced from the qualifier
793 sequence.
794 In the latter case, any constraint checking related with
795 the obtained qualifier should be done later in
796 operand_general_constraint_met_p. */
797 continue;
798 }
799 else if (*qualifiers != inst->operands[j].qualifier)
800 {
801 /* Unless the target qualifier can also qualify the operand
802 (which has already had a non-nil qualifier), non-equal
803 qualifiers are generally un-matched. */
804 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
805 continue;
806 else
807 {
808 found = 0;
809 break;
810 }
811 }
812 else
813 continue; /* Equal qualifiers are certainly matched. */
814 }
815
816 /* Qualifiers established. */
817 if (found == 1)
818 break;
819 }
820
821 if (found == 1)
822 {
823 /* Fill the result in *RET. */
824 int j;
825 qualifiers = *qualifiers_list;
826
827 DEBUG_TRACE ("complete qualifiers using list %d", i);
828 #ifdef DEBUG_AARCH64
829 if (debug_dump)
830 dump_qualifier_sequence (qualifiers);
831 #endif
832
833 for (j = 0; j <= stop_at; ++j, ++qualifiers)
834 ret[j] = *qualifiers;
835 for (; j < AARCH64_MAX_OPND_NUM; ++j)
836 ret[j] = AARCH64_OPND_QLF_NIL;
837
838 DEBUG_TRACE ("SUCCESS");
839 return 1;
840 }
841
842 DEBUG_TRACE ("FAIL");
843 return 0;
844 }
845
846 /* Operand qualifier matching and resolving.
847
848 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
849 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
850
851 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
852 succeeds. */
853
854 static int
855 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
856 {
857 int i, nops;
858 aarch64_opnd_qualifier_seq_t qualifiers;
859
860 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
861 qualifiers))
862 {
863 DEBUG_TRACE ("matching FAIL");
864 return 0;
865 }
866
867 if (inst->opcode->flags & F_STRICT)
868 {
869 /* Require an exact qualifier match, even for NIL qualifiers. */
870 nops = aarch64_num_of_operands (inst->opcode);
871 for (i = 0; i < nops; ++i)
872 if (inst->operands[i].qualifier != qualifiers[i])
873 return FALSE;
874 }
875
876 /* Update the qualifiers. */
877 if (update_p == TRUE)
878 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
879 {
880 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
881 break;
882 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
883 "update %s with %s for operand %d",
884 aarch64_get_qualifier_name (inst->operands[i].qualifier),
885 aarch64_get_qualifier_name (qualifiers[i]), i);
886 inst->operands[i].qualifier = qualifiers[i];
887 }
888
889 DEBUG_TRACE ("matching SUCCESS");
890 return 1;
891 }
892
893 /* Return TRUE if VALUE is a wide constant that can be moved into a general
894 register by MOVZ.
895
896 IS32 indicates whether value is a 32-bit immediate or not.
897 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
898 amount will be returned in *SHIFT_AMOUNT. */
899
900 bfd_boolean
901 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
902 {
903 int amount;
904
905 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
906
907 if (is32)
908 {
909 /* Allow all zeros or all ones in top 32-bits, so that
910 32-bit constant expressions like ~0x80000000 are
911 permitted. */
912 uint64_t ext = value;
913 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
914 /* Immediate out of range. */
915 return FALSE;
916 value &= (int64_t) 0xffffffff;
917 }
918
919 /* first, try movz then movn */
920 amount = -1;
921 if ((value & ((int64_t) 0xffff << 0)) == value)
922 amount = 0;
923 else if ((value & ((int64_t) 0xffff << 16)) == value)
924 amount = 16;
925 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
926 amount = 32;
927 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
928 amount = 48;
929
930 if (amount == -1)
931 {
932 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
933 return FALSE;
934 }
935
936 if (shift_amount != NULL)
937 *shift_amount = amount;
938
939 DEBUG_TRACE ("exit TRUE with amount %d", amount);
940
941 return TRUE;
942 }
943
944 /* Build the accepted values for immediate logical SIMD instructions.
945
946 The standard encodings of the immediate value are:
947 N imms immr SIMD size R S
948 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
949 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
950 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
951 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
952 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
953 0 11110s 00000r 2 UInt(r) UInt(s)
954 where all-ones value of S is reserved.
955
956 Let's call E the SIMD size.
957
958 The immediate value is: S+1 bits '1' rotated to the right by R.
959
960 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
961 (remember S != E - 1). */
962
963 #define TOTAL_IMM_NB 5334
964
965 typedef struct
966 {
967 uint64_t imm;
968 aarch64_insn encoding;
969 } simd_imm_encoding;
970
971 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
972
973 static int
974 simd_imm_encoding_cmp(const void *i1, const void *i2)
975 {
976 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
977 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
978
979 if (imm1->imm < imm2->imm)
980 return -1;
981 if (imm1->imm > imm2->imm)
982 return +1;
983 return 0;
984 }
985
986 /* immediate bitfield standard encoding
987 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
988 1 ssssss rrrrrr 64 rrrrrr ssssss
989 0 0sssss 0rrrrr 32 rrrrr sssss
990 0 10ssss 00rrrr 16 rrrr ssss
991 0 110sss 000rrr 8 rrr sss
992 0 1110ss 0000rr 4 rr ss
993 0 11110s 00000r 2 r s */
994 static inline int
995 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
996 {
997 return (is64 << 12) | (r << 6) | s;
998 }
999
1000 static void
1001 build_immediate_table (void)
1002 {
1003 uint32_t log_e, e, s, r, s_mask;
1004 uint64_t mask, imm;
1005 int nb_imms;
1006 int is64;
1007
1008 nb_imms = 0;
1009 for (log_e = 1; log_e <= 6; log_e++)
1010 {
1011 /* Get element size. */
1012 e = 1u << log_e;
1013 if (log_e == 6)
1014 {
1015 is64 = 1;
1016 mask = 0xffffffffffffffffull;
1017 s_mask = 0;
1018 }
1019 else
1020 {
1021 is64 = 0;
1022 mask = (1ull << e) - 1;
1023 /* log_e s_mask
1024 1 ((1 << 4) - 1) << 2 = 111100
1025 2 ((1 << 3) - 1) << 3 = 111000
1026 3 ((1 << 2) - 1) << 4 = 110000
1027 4 ((1 << 1) - 1) << 5 = 100000
1028 5 ((1 << 0) - 1) << 6 = 000000 */
1029 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1030 }
1031 for (s = 0; s < e - 1; s++)
1032 for (r = 0; r < e; r++)
1033 {
1034 /* s+1 consecutive bits to 1 (s < 63) */
1035 imm = (1ull << (s + 1)) - 1;
1036 /* rotate right by r */
1037 if (r != 0)
1038 imm = (imm >> r) | ((imm << (e - r)) & mask);
1039 /* replicate the constant depending on SIMD size */
1040 switch (log_e)
1041 {
1042 case 1: imm = (imm << 2) | imm;
1043 case 2: imm = (imm << 4) | imm;
1044 case 3: imm = (imm << 8) | imm;
1045 case 4: imm = (imm << 16) | imm;
1046 case 5: imm = (imm << 32) | imm;
1047 case 6: break;
1048 default: abort ();
1049 }
1050 simd_immediates[nb_imms].imm = imm;
1051 simd_immediates[nb_imms].encoding =
1052 encode_immediate_bitfield(is64, s | s_mask, r);
1053 nb_imms++;
1054 }
1055 }
1056 assert (nb_imms == TOTAL_IMM_NB);
1057 qsort(simd_immediates, nb_imms,
1058 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1059 }
1060
1061 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1062 be accepted by logical (immediate) instructions
1063 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1064
1065 ESIZE is the number of bytes in the decoded immediate value.
1066 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1067 VALUE will be returned in *ENCODING. */
1068
1069 bfd_boolean
1070 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1071 {
1072 simd_imm_encoding imm_enc;
1073 const simd_imm_encoding *imm_encoding;
1074 static bfd_boolean initialized = FALSE;
1075 uint64_t upper;
1076 int i;
1077
1078 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1079 value, is32);
1080
1081 if (initialized == FALSE)
1082 {
1083 build_immediate_table ();
1084 initialized = TRUE;
1085 }
1086
1087 /* Allow all zeros or all ones in top bits, so that
1088 constant expressions like ~1 are permitted. */
1089 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1090 if ((value & ~upper) != value && (value | upper) != value)
1091 return FALSE;
1092
1093 /* Replicate to a full 64-bit value. */
1094 value &= ~upper;
1095 for (i = esize * 8; i < 64; i *= 2)
1096 value |= (value << i);
1097
1098 imm_enc.imm = value;
1099 imm_encoding = (const simd_imm_encoding *)
1100 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1101 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1102 if (imm_encoding == NULL)
1103 {
1104 DEBUG_TRACE ("exit with FALSE");
1105 return FALSE;
1106 }
1107 if (encoding != NULL)
1108 *encoding = imm_encoding->encoding;
1109 DEBUG_TRACE ("exit with TRUE");
1110 return TRUE;
1111 }
1112
1113 /* If 64-bit immediate IMM is in the format of
1114 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1115 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1116 of value "abcdefgh". Otherwise return -1. */
1117 int
1118 aarch64_shrink_expanded_imm8 (uint64_t imm)
1119 {
1120 int i, ret;
1121 uint32_t byte;
1122
1123 ret = 0;
1124 for (i = 0; i < 8; i++)
1125 {
1126 byte = (imm >> (8 * i)) & 0xff;
1127 if (byte == 0xff)
1128 ret |= 1 << i;
1129 else if (byte != 0x00)
1130 return -1;
1131 }
1132 return ret;
1133 }
1134
1135 /* Utility inline functions for operand_general_constraint_met_p. */
1136
1137 static inline void
1138 set_error (aarch64_operand_error *mismatch_detail,
1139 enum aarch64_operand_error_kind kind, int idx,
1140 const char* error)
1141 {
1142 if (mismatch_detail == NULL)
1143 return;
1144 mismatch_detail->kind = kind;
1145 mismatch_detail->index = idx;
1146 mismatch_detail->error = error;
1147 }
1148
1149 static inline void
1150 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1151 const char* error)
1152 {
1153 if (mismatch_detail == NULL)
1154 return;
1155 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1156 }
1157
1158 static inline void
1159 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1160 int idx, int lower_bound, int upper_bound,
1161 const char* error)
1162 {
1163 if (mismatch_detail == NULL)
1164 return;
1165 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1166 mismatch_detail->data[0] = lower_bound;
1167 mismatch_detail->data[1] = upper_bound;
1168 }
1169
1170 static inline void
1171 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1172 int idx, int lower_bound, int upper_bound)
1173 {
1174 if (mismatch_detail == NULL)
1175 return;
1176 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1177 _("immediate value"));
1178 }
1179
1180 static inline void
1181 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1182 int idx, int lower_bound, int upper_bound)
1183 {
1184 if (mismatch_detail == NULL)
1185 return;
1186 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1187 _("immediate offset"));
1188 }
1189
1190 static inline void
1191 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1192 int idx, int lower_bound, int upper_bound)
1193 {
1194 if (mismatch_detail == NULL)
1195 return;
1196 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1197 _("register number"));
1198 }
1199
1200 static inline void
1201 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1202 int idx, int lower_bound, int upper_bound)
1203 {
1204 if (mismatch_detail == NULL)
1205 return;
1206 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1207 _("register element index"));
1208 }
1209
1210 static inline void
1211 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1212 int idx, int lower_bound, int upper_bound)
1213 {
1214 if (mismatch_detail == NULL)
1215 return;
1216 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1217 _("shift amount"));
1218 }
1219
1220 static inline void
1221 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1222 int alignment)
1223 {
1224 if (mismatch_detail == NULL)
1225 return;
1226 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1227 mismatch_detail->data[0] = alignment;
1228 }
1229
1230 static inline void
1231 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1232 int expected_num)
1233 {
1234 if (mismatch_detail == NULL)
1235 return;
1236 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1237 mismatch_detail->data[0] = expected_num;
1238 }
1239
1240 static inline void
1241 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1242 const char* error)
1243 {
1244 if (mismatch_detail == NULL)
1245 return;
1246 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1247 }
1248
1249 /* General constraint checking based on operand code.
1250
1251 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1252 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1253
1254 This function has to be called after the qualifiers for all operands
1255 have been resolved.
1256
1257 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1258 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1259 of error message during the disassembling where error message is not
1260 wanted. We avoid the dynamic construction of strings of error messages
1261 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1262 use a combination of error code, static string and some integer data to
1263 represent an error. */
1264
1265 static int
1266 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1267 enum aarch64_opnd type,
1268 const aarch64_opcode *opcode,
1269 aarch64_operand_error *mismatch_detail)
1270 {
1271 unsigned num;
1272 unsigned char size;
1273 int64_t imm;
1274 const aarch64_opnd_info *opnd = opnds + idx;
1275 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1276
1277 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1278
1279 switch (aarch64_operands[type].op_class)
1280 {
1281 case AARCH64_OPND_CLASS_INT_REG:
1282 /* Check pair reg constraints for cas* instructions. */
1283 if (type == AARCH64_OPND_PAIRREG)
1284 {
1285 assert (idx == 1 || idx == 3);
1286 if (opnds[idx - 1].reg.regno % 2 != 0)
1287 {
1288 set_syntax_error (mismatch_detail, idx - 1,
1289 _("reg pair must start from even reg"));
1290 return 0;
1291 }
1292 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1293 {
1294 set_syntax_error (mismatch_detail, idx,
1295 _("reg pair must be contiguous"));
1296 return 0;
1297 }
1298 break;
1299 }
1300
1301 /* <Xt> may be optional in some IC and TLBI instructions. */
1302 if (type == AARCH64_OPND_Rt_SYS)
1303 {
1304 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1305 == AARCH64_OPND_CLASS_SYSTEM));
1306 if (opnds[1].present
1307 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1308 {
1309 set_other_error (mismatch_detail, idx, _("extraneous register"));
1310 return 0;
1311 }
1312 if (!opnds[1].present
1313 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1314 {
1315 set_other_error (mismatch_detail, idx, _("missing register"));
1316 return 0;
1317 }
1318 }
1319 switch (qualifier)
1320 {
1321 case AARCH64_OPND_QLF_WSP:
1322 case AARCH64_OPND_QLF_SP:
1323 if (!aarch64_stack_pointer_p (opnd))
1324 {
1325 set_other_error (mismatch_detail, idx,
1326 _("stack pointer register expected"));
1327 return 0;
1328 }
1329 break;
1330 default:
1331 break;
1332 }
1333 break;
1334
1335 case AARCH64_OPND_CLASS_COND:
1336 if (type == AARCH64_OPND_COND1
1337 && (opnds[idx].cond->value & 0xe) == 0xe)
1338 {
1339 /* Not allow AL or NV. */
1340 set_syntax_error (mismatch_detail, idx, NULL);
1341 }
1342 break;
1343
1344 case AARCH64_OPND_CLASS_ADDRESS:
1345 /* Check writeback. */
1346 switch (opcode->iclass)
1347 {
1348 case ldst_pos:
1349 case ldst_unscaled:
1350 case ldstnapair_offs:
1351 case ldstpair_off:
1352 case ldst_unpriv:
1353 if (opnd->addr.writeback == 1)
1354 {
1355 set_syntax_error (mismatch_detail, idx,
1356 _("unexpected address writeback"));
1357 return 0;
1358 }
1359 break;
1360 case ldst_imm9:
1361 case ldstpair_indexed:
1362 case asisdlsep:
1363 case asisdlsop:
1364 if (opnd->addr.writeback == 0)
1365 {
1366 set_syntax_error (mismatch_detail, idx,
1367 _("address writeback expected"));
1368 return 0;
1369 }
1370 break;
1371 default:
1372 assert (opnd->addr.writeback == 0);
1373 break;
1374 }
1375 switch (type)
1376 {
1377 case AARCH64_OPND_ADDR_SIMM7:
1378 /* Scaled signed 7 bits immediate offset. */
1379 /* Get the size of the data element that is accessed, which may be
1380 different from that of the source register size,
1381 e.g. in strb/ldrb. */
1382 size = aarch64_get_qualifier_esize (opnd->qualifier);
1383 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1384 {
1385 set_offset_out_of_range_error (mismatch_detail, idx,
1386 -64 * size, 63 * size);
1387 return 0;
1388 }
1389 if (!value_aligned_p (opnd->addr.offset.imm, size))
1390 {
1391 set_unaligned_error (mismatch_detail, idx, size);
1392 return 0;
1393 }
1394 break;
1395 case AARCH64_OPND_ADDR_SIMM9:
1396 /* Unscaled signed 9 bits immediate offset. */
1397 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1398 {
1399 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1400 return 0;
1401 }
1402 break;
1403
1404 case AARCH64_OPND_ADDR_SIMM9_2:
1405 /* Unscaled signed 9 bits immediate offset, which has to be negative
1406 or unaligned. */
1407 size = aarch64_get_qualifier_esize (qualifier);
1408 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1409 && !value_aligned_p (opnd->addr.offset.imm, size))
1410 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1411 return 1;
1412 set_other_error (mismatch_detail, idx,
1413 _("negative or unaligned offset expected"));
1414 return 0;
1415
1416 case AARCH64_OPND_SIMD_ADDR_POST:
1417 /* AdvSIMD load/store multiple structures, post-index. */
1418 assert (idx == 1);
1419 if (opnd->addr.offset.is_reg)
1420 {
1421 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1422 return 1;
1423 else
1424 {
1425 set_other_error (mismatch_detail, idx,
1426 _("invalid register offset"));
1427 return 0;
1428 }
1429 }
1430 else
1431 {
1432 const aarch64_opnd_info *prev = &opnds[idx-1];
1433 unsigned num_bytes; /* total number of bytes transferred. */
1434 /* The opcode dependent area stores the number of elements in
1435 each structure to be loaded/stored. */
1436 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1437 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1438 /* Special handling of loading single structure to all lane. */
1439 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1440 * aarch64_get_qualifier_esize (prev->qualifier);
1441 else
1442 num_bytes = prev->reglist.num_regs
1443 * aarch64_get_qualifier_esize (prev->qualifier)
1444 * aarch64_get_qualifier_nelem (prev->qualifier);
1445 if ((int) num_bytes != opnd->addr.offset.imm)
1446 {
1447 set_other_error (mismatch_detail, idx,
1448 _("invalid post-increment amount"));
1449 return 0;
1450 }
1451 }
1452 break;
1453
1454 case AARCH64_OPND_ADDR_REGOFF:
1455 /* Get the size of the data element that is accessed, which may be
1456 different from that of the source register size,
1457 e.g. in strb/ldrb. */
1458 size = aarch64_get_qualifier_esize (opnd->qualifier);
1459 /* It is either no shift or shift by the binary logarithm of SIZE. */
1460 if (opnd->shifter.amount != 0
1461 && opnd->shifter.amount != (int)get_logsz (size))
1462 {
1463 set_other_error (mismatch_detail, idx,
1464 _("invalid shift amount"));
1465 return 0;
1466 }
1467 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1468 operators. */
1469 switch (opnd->shifter.kind)
1470 {
1471 case AARCH64_MOD_UXTW:
1472 case AARCH64_MOD_LSL:
1473 case AARCH64_MOD_SXTW:
1474 case AARCH64_MOD_SXTX: break;
1475 default:
1476 set_other_error (mismatch_detail, idx,
1477 _("invalid extend/shift operator"));
1478 return 0;
1479 }
1480 break;
1481
1482 case AARCH64_OPND_ADDR_UIMM12:
1483 imm = opnd->addr.offset.imm;
1484 /* Get the size of the data element that is accessed, which may be
1485 different from that of the source register size,
1486 e.g. in strb/ldrb. */
1487 size = aarch64_get_qualifier_esize (qualifier);
1488 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1489 {
1490 set_offset_out_of_range_error (mismatch_detail, idx,
1491 0, 4095 * size);
1492 return 0;
1493 }
1494 if (!value_aligned_p (opnd->addr.offset.imm, size))
1495 {
1496 set_unaligned_error (mismatch_detail, idx, size);
1497 return 0;
1498 }
1499 break;
1500
1501 case AARCH64_OPND_ADDR_PCREL14:
1502 case AARCH64_OPND_ADDR_PCREL19:
1503 case AARCH64_OPND_ADDR_PCREL21:
1504 case AARCH64_OPND_ADDR_PCREL26:
1505 imm = opnd->imm.value;
1506 if (operand_need_shift_by_two (get_operand_from_code (type)))
1507 {
1508 /* The offset value in a PC-relative branch instruction is alway
1509 4-byte aligned and is encoded without the lowest 2 bits. */
1510 if (!value_aligned_p (imm, 4))
1511 {
1512 set_unaligned_error (mismatch_detail, idx, 4);
1513 return 0;
1514 }
1515 /* Right shift by 2 so that we can carry out the following check
1516 canonically. */
1517 imm >>= 2;
1518 }
1519 size = get_operand_fields_width (get_operand_from_code (type));
1520 if (!value_fit_signed_field_p (imm, size))
1521 {
1522 set_other_error (mismatch_detail, idx,
1523 _("immediate out of range"));
1524 return 0;
1525 }
1526 break;
1527
1528 default:
1529 break;
1530 }
1531 break;
1532
1533 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1534 if (type == AARCH64_OPND_LEt)
1535 {
1536 /* Get the upper bound for the element index. */
1537 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1538 if (!value_in_range_p (opnd->reglist.index, 0, num))
1539 {
1540 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1541 return 0;
1542 }
1543 }
1544 /* The opcode dependent area stores the number of elements in
1545 each structure to be loaded/stored. */
1546 num = get_opcode_dependent_value (opcode);
1547 switch (type)
1548 {
1549 case AARCH64_OPND_LVt:
1550 assert (num >= 1 && num <= 4);
1551 /* Unless LD1/ST1, the number of registers should be equal to that
1552 of the structure elements. */
1553 if (num != 1 && opnd->reglist.num_regs != num)
1554 {
1555 set_reg_list_error (mismatch_detail, idx, num);
1556 return 0;
1557 }
1558 break;
1559 case AARCH64_OPND_LVt_AL:
1560 case AARCH64_OPND_LEt:
1561 assert (num >= 1 && num <= 4);
1562 /* The number of registers should be equal to that of the structure
1563 elements. */
1564 if (opnd->reglist.num_regs != num)
1565 {
1566 set_reg_list_error (mismatch_detail, idx, num);
1567 return 0;
1568 }
1569 break;
1570 default:
1571 break;
1572 }
1573 break;
1574
1575 case AARCH64_OPND_CLASS_IMMEDIATE:
1576 /* Constraint check on immediate operand. */
1577 imm = opnd->imm.value;
1578 /* E.g. imm_0_31 constrains value to be 0..31. */
1579 if (qualifier_value_in_range_constraint_p (qualifier)
1580 && !value_in_range_p (imm, get_lower_bound (qualifier),
1581 get_upper_bound (qualifier)))
1582 {
1583 set_imm_out_of_range_error (mismatch_detail, idx,
1584 get_lower_bound (qualifier),
1585 get_upper_bound (qualifier));
1586 return 0;
1587 }
1588
1589 switch (type)
1590 {
1591 case AARCH64_OPND_AIMM:
1592 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1593 {
1594 set_other_error (mismatch_detail, idx,
1595 _("invalid shift operator"));
1596 return 0;
1597 }
1598 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1599 {
1600 set_other_error (mismatch_detail, idx,
1601 _("shift amount expected to be 0 or 12"));
1602 return 0;
1603 }
1604 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1605 {
1606 set_other_error (mismatch_detail, idx,
1607 _("immediate out of range"));
1608 return 0;
1609 }
1610 break;
1611
1612 case AARCH64_OPND_HALF:
1613 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1614 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1615 {
1616 set_other_error (mismatch_detail, idx,
1617 _("invalid shift operator"));
1618 return 0;
1619 }
1620 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1621 if (!value_aligned_p (opnd->shifter.amount, 16))
1622 {
1623 set_other_error (mismatch_detail, idx,
1624 _("shift amount should be a multiple of 16"));
1625 return 0;
1626 }
1627 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1628 {
1629 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1630 0, size * 8 - 16);
1631 return 0;
1632 }
1633 if (opnd->imm.value < 0)
1634 {
1635 set_other_error (mismatch_detail, idx,
1636 _("negative immediate value not allowed"));
1637 return 0;
1638 }
1639 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1640 {
1641 set_other_error (mismatch_detail, idx,
1642 _("immediate out of range"));
1643 return 0;
1644 }
1645 break;
1646
1647 case AARCH64_OPND_IMM_MOV:
1648 {
1649 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1650 imm = opnd->imm.value;
1651 assert (idx == 1);
1652 switch (opcode->op)
1653 {
1654 case OP_MOV_IMM_WIDEN:
1655 imm = ~imm;
1656 /* Fall through... */
1657 case OP_MOV_IMM_WIDE:
1658 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
1659 {
1660 set_other_error (mismatch_detail, idx,
1661 _("immediate out of range"));
1662 return 0;
1663 }
1664 break;
1665 case OP_MOV_IMM_LOG:
1666 if (!aarch64_logical_immediate_p (imm, esize, NULL))
1667 {
1668 set_other_error (mismatch_detail, idx,
1669 _("immediate out of range"));
1670 return 0;
1671 }
1672 break;
1673 default:
1674 assert (0);
1675 return 0;
1676 }
1677 }
1678 break;
1679
1680 case AARCH64_OPND_NZCV:
1681 case AARCH64_OPND_CCMP_IMM:
1682 case AARCH64_OPND_EXCEPTION:
1683 case AARCH64_OPND_UIMM4:
1684 case AARCH64_OPND_UIMM7:
1685 case AARCH64_OPND_UIMM3_OP1:
1686 case AARCH64_OPND_UIMM3_OP2:
1687 size = get_operand_fields_width (get_operand_from_code (type));
1688 assert (size < 32);
1689 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1690 {
1691 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1692 (1 << size) - 1);
1693 return 0;
1694 }
1695 break;
1696
1697 case AARCH64_OPND_WIDTH:
1698 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
1699 && opnds[0].type == AARCH64_OPND_Rd);
1700 size = get_upper_bound (qualifier);
1701 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1702 /* lsb+width <= reg.size */
1703 {
1704 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1705 size - opnds[idx-1].imm.value);
1706 return 0;
1707 }
1708 break;
1709
1710 case AARCH64_OPND_LIMM:
1711 {
1712 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
1713 uint64_t uimm = opnd->imm.value;
1714 if (opcode->op == OP_BIC)
1715 uimm = ~uimm;
1716 if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
1717 {
1718 set_other_error (mismatch_detail, idx,
1719 _("immediate out of range"));
1720 return 0;
1721 }
1722 }
1723 break;
1724
1725 case AARCH64_OPND_IMM0:
1726 case AARCH64_OPND_FPIMM0:
1727 if (opnd->imm.value != 0)
1728 {
1729 set_other_error (mismatch_detail, idx,
1730 _("immediate zero expected"));
1731 return 0;
1732 }
1733 break;
1734
1735 case AARCH64_OPND_SHLL_IMM:
1736 assert (idx == 2);
1737 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1738 if (opnd->imm.value != size)
1739 {
1740 set_other_error (mismatch_detail, idx,
1741 _("invalid shift amount"));
1742 return 0;
1743 }
1744 break;
1745
1746 case AARCH64_OPND_IMM_VLSL:
1747 size = aarch64_get_qualifier_esize (qualifier);
1748 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1749 {
1750 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1751 size * 8 - 1);
1752 return 0;
1753 }
1754 break;
1755
1756 case AARCH64_OPND_IMM_VLSR:
1757 size = aarch64_get_qualifier_esize (qualifier);
1758 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1759 {
1760 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1761 return 0;
1762 }
1763 break;
1764
1765 case AARCH64_OPND_SIMD_IMM:
1766 case AARCH64_OPND_SIMD_IMM_SFT:
1767 /* Qualifier check. */
1768 switch (qualifier)
1769 {
1770 case AARCH64_OPND_QLF_LSL:
1771 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1772 {
1773 set_other_error (mismatch_detail, idx,
1774 _("invalid shift operator"));
1775 return 0;
1776 }
1777 break;
1778 case AARCH64_OPND_QLF_MSL:
1779 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1780 {
1781 set_other_error (mismatch_detail, idx,
1782 _("invalid shift operator"));
1783 return 0;
1784 }
1785 break;
1786 case AARCH64_OPND_QLF_NIL:
1787 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1788 {
1789 set_other_error (mismatch_detail, idx,
1790 _("shift is not permitted"));
1791 return 0;
1792 }
1793 break;
1794 default:
1795 assert (0);
1796 return 0;
1797 }
1798 /* Is the immediate valid? */
1799 assert (idx == 1);
1800 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1801 {
1802 /* uimm8 or simm8 */
1803 if (!value_in_range_p (opnd->imm.value, -128, 255))
1804 {
1805 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1806 return 0;
1807 }
1808 }
1809 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1810 {
1811 /* uimm64 is not
1812 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1813 ffffffffgggggggghhhhhhhh'. */
1814 set_other_error (mismatch_detail, idx,
1815 _("invalid value for immediate"));
1816 return 0;
1817 }
1818 /* Is the shift amount valid? */
1819 switch (opnd->shifter.kind)
1820 {
1821 case AARCH64_MOD_LSL:
1822 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1823 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1824 {
1825 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1826 (size - 1) * 8);
1827 return 0;
1828 }
1829 if (!value_aligned_p (opnd->shifter.amount, 8))
1830 {
1831 set_unaligned_error (mismatch_detail, idx, 8);
1832 return 0;
1833 }
1834 break;
1835 case AARCH64_MOD_MSL:
1836 /* Only 8 and 16 are valid shift amount. */
1837 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1838 {
1839 set_other_error (mismatch_detail, idx,
1840 _("shift amount expected to be 0 or 16"));
1841 return 0;
1842 }
1843 break;
1844 default:
1845 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1846 {
1847 set_other_error (mismatch_detail, idx,
1848 _("invalid shift operator"));
1849 return 0;
1850 }
1851 break;
1852 }
1853 break;
1854
1855 case AARCH64_OPND_FPIMM:
1856 case AARCH64_OPND_SIMD_FPIMM:
1857 if (opnd->imm.is_fp == 0)
1858 {
1859 set_other_error (mismatch_detail, idx,
1860 _("floating-point immediate expected"));
1861 return 0;
1862 }
1863 /* The value is expected to be an 8-bit floating-point constant with
1864 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1865 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1866 instruction). */
1867 if (!value_in_range_p (opnd->imm.value, 0, 255))
1868 {
1869 set_other_error (mismatch_detail, idx,
1870 _("immediate out of range"));
1871 return 0;
1872 }
1873 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1874 {
1875 set_other_error (mismatch_detail, idx,
1876 _("invalid shift operator"));
1877 return 0;
1878 }
1879 break;
1880
1881 default:
1882 break;
1883 }
1884 break;
1885
1886 case AARCH64_OPND_CLASS_CP_REG:
1887 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1888 valid range: C0 - C15. */
1889 if (opnd->reg.regno > 15)
1890 {
1891 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1892 return 0;
1893 }
1894 break;
1895
1896 case AARCH64_OPND_CLASS_SYSTEM:
1897 switch (type)
1898 {
1899 case AARCH64_OPND_PSTATEFIELD:
1900 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1901 /* MSR UAO, #uimm4
1902 MSR PAN, #uimm4
1903 The immediate must be #0 or #1. */
1904 if ((opnd->pstatefield == 0x03 /* UAO. */
1905 || opnd->pstatefield == 0x04) /* PAN. */
1906 && opnds[1].imm.value > 1)
1907 {
1908 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1909 return 0;
1910 }
1911 /* MSR SPSel, #uimm4
1912 Uses uimm4 as a control value to select the stack pointer: if
1913 bit 0 is set it selects the current exception level's stack
1914 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1915 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1916 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1917 {
1918 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1919 return 0;
1920 }
1921 break;
1922 default:
1923 break;
1924 }
1925 break;
1926
1927 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1928 /* Get the upper bound for the element index. */
1929 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1930 /* Index out-of-range. */
1931 if (!value_in_range_p (opnd->reglane.index, 0, num))
1932 {
1933 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1934 return 0;
1935 }
1936 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1937 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1938 number is encoded in "size:M:Rm":
1939 size <Vm>
1940 00 RESERVED
1941 01 0:Rm
1942 10 M:Rm
1943 11 RESERVED */
1944 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1945 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1946 {
1947 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1948 return 0;
1949 }
1950 break;
1951
1952 case AARCH64_OPND_CLASS_MODIFIED_REG:
1953 assert (idx == 1 || idx == 2);
1954 switch (type)
1955 {
1956 case AARCH64_OPND_Rm_EXT:
1957 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1958 && opnd->shifter.kind != AARCH64_MOD_LSL)
1959 {
1960 set_other_error (mismatch_detail, idx,
1961 _("extend operator expected"));
1962 return 0;
1963 }
1964 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1965 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1966 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1967 case. */
1968 if (!aarch64_stack_pointer_p (opnds + 0)
1969 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1970 {
1971 if (!opnd->shifter.operator_present)
1972 {
1973 set_other_error (mismatch_detail, idx,
1974 _("missing extend operator"));
1975 return 0;
1976 }
1977 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1978 {
1979 set_other_error (mismatch_detail, idx,
1980 _("'LSL' operator not allowed"));
1981 return 0;
1982 }
1983 }
1984 assert (opnd->shifter.operator_present /* Default to LSL. */
1985 || opnd->shifter.kind == AARCH64_MOD_LSL);
1986 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1987 {
1988 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1989 return 0;
1990 }
1991 /* In the 64-bit form, the final register operand is written as Wm
1992 for all but the (possibly omitted) UXTX/LSL and SXTX
1993 operators.
1994 N.B. GAS allows X register to be used with any operator as a
1995 programming convenience. */
1996 if (qualifier == AARCH64_OPND_QLF_X
1997 && opnd->shifter.kind != AARCH64_MOD_LSL
1998 && opnd->shifter.kind != AARCH64_MOD_UXTX
1999 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2000 {
2001 set_other_error (mismatch_detail, idx, _("W register expected"));
2002 return 0;
2003 }
2004 break;
2005
2006 case AARCH64_OPND_Rm_SFT:
2007 /* ROR is not available to the shifted register operand in
2008 arithmetic instructions. */
2009 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2010 {
2011 set_other_error (mismatch_detail, idx,
2012 _("shift operator expected"));
2013 return 0;
2014 }
2015 if (opnd->shifter.kind == AARCH64_MOD_ROR
2016 && opcode->iclass != log_shift)
2017 {
2018 set_other_error (mismatch_detail, idx,
2019 _("'ROR' operator not allowed"));
2020 return 0;
2021 }
2022 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2023 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2024 {
2025 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2026 return 0;
2027 }
2028 break;
2029
2030 default:
2031 break;
2032 }
2033 break;
2034
2035 default:
2036 break;
2037 }
2038
2039 return 1;
2040 }
2041
2042 /* Main entrypoint for the operand constraint checking.
2043
2044 Return 1 if operands of *INST meet the constraint applied by the operand
2045 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2046 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2047 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2048 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2049 error kind when it is notified that an instruction does not pass the check).
2050
2051 Un-determined operand qualifiers may get established during the process. */
2052
2053 int
2054 aarch64_match_operands_constraint (aarch64_inst *inst,
2055 aarch64_operand_error *mismatch_detail)
2056 {
2057 int i;
2058
2059 DEBUG_TRACE ("enter");
2060
2061 /* Match operands' qualifier.
2062 *INST has already had qualifier establish for some, if not all, of
2063 its operands; we need to find out whether these established
2064 qualifiers match one of the qualifier sequence in
2065 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2066 with the corresponding qualifier in such a sequence.
2067 Only basic operand constraint checking is done here; the more thorough
2068 constraint checking will carried out by operand_general_constraint_met_p,
2069 which has be to called after this in order to get all of the operands'
2070 qualifiers established. */
2071 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2072 {
2073 DEBUG_TRACE ("FAIL on operand qualifier matching");
2074 if (mismatch_detail)
2075 {
2076 /* Return an error type to indicate that it is the qualifier
2077 matching failure; we don't care about which operand as there
2078 are enough information in the opcode table to reproduce it. */
2079 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2080 mismatch_detail->index = -1;
2081 mismatch_detail->error = NULL;
2082 }
2083 return 0;
2084 }
2085
2086 /* Match operands' constraint. */
2087 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2088 {
2089 enum aarch64_opnd type = inst->opcode->operands[i];
2090 if (type == AARCH64_OPND_NIL)
2091 break;
2092 if (inst->operands[i].skip)
2093 {
2094 DEBUG_TRACE ("skip the incomplete operand %d", i);
2095 continue;
2096 }
2097 if (operand_general_constraint_met_p (inst->operands, i, type,
2098 inst->opcode, mismatch_detail) == 0)
2099 {
2100 DEBUG_TRACE ("FAIL on operand %d", i);
2101 return 0;
2102 }
2103 }
2104
2105 DEBUG_TRACE ("PASS");
2106
2107 return 1;
2108 }
2109
2110 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2111 Also updates the TYPE of each INST->OPERANDS with the corresponding
2112 value of OPCODE->OPERANDS.
2113
2114 Note that some operand qualifiers may need to be manually cleared by
2115 the caller before it further calls the aarch64_opcode_encode; by
2116 doing this, it helps the qualifier matching facilities work
2117 properly. */
2118
2119 const aarch64_opcode*
2120 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2121 {
2122 int i;
2123 const aarch64_opcode *old = inst->opcode;
2124
2125 inst->opcode = opcode;
2126
2127 /* Update the operand types. */
2128 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2129 {
2130 inst->operands[i].type = opcode->operands[i];
2131 if (opcode->operands[i] == AARCH64_OPND_NIL)
2132 break;
2133 }
2134
2135 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2136
2137 return old;
2138 }
2139
2140 int
2141 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2142 {
2143 int i;
2144 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2145 if (operands[i] == operand)
2146 return i;
2147 else if (operands[i] == AARCH64_OPND_NIL)
2148 break;
2149 return -1;
2150 }
2151 \f
2152 /* [0][0] 32-bit integer regs with sp Wn
2153 [0][1] 64-bit integer regs with sp Xn sf=1
2154 [1][0] 32-bit integer regs with #0 Wn
2155 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2156 static const char *int_reg[2][2][32] = {
2157 #define R32 "w"
2158 #define R64 "x"
2159 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2160 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2161 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2162 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2163 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2164 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2165 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2166 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2167 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2168 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2169 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2170 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2171 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2172 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2173 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2174 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2175 #undef R64
2176 #undef R32
2177 };
2178
2179 /* Return the integer register name.
2180 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2181
2182 static inline const char *
2183 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2184 {
2185 const int has_zr = sp_reg_p ? 0 : 1;
2186 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2187 return int_reg[has_zr][is_64][regno];
2188 }
2189
2190 /* Like get_int_reg_name, but IS_64 is always 1. */
2191
2192 static inline const char *
2193 get_64bit_int_reg_name (int regno, int sp_reg_p)
2194 {
2195 const int has_zr = sp_reg_p ? 0 : 1;
2196 return int_reg[has_zr][1][regno];
2197 }
2198
2199 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2200
2201 typedef union
2202 {
2203 uint64_t i;
2204 double d;
2205 } double_conv_t;
2206
2207 typedef union
2208 {
2209 uint32_t i;
2210 float f;
2211 } single_conv_t;
2212
2213 typedef union
2214 {
2215 uint32_t i;
2216 float f;
2217 } half_conv_t;
2218
2219 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2220 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2221 (depending on the type of the instruction). IMM8 will be expanded to a
2222 single-precision floating-point value (SIZE == 4) or a double-precision
2223 floating-point value (SIZE == 8). A half-precision floating-point value
2224 (SIZE == 2) is expanded to a single-precision floating-point value. The
2225 expanded value is returned. */
2226
2227 static uint64_t
2228 expand_fp_imm (int size, uint32_t imm8)
2229 {
2230 uint64_t imm;
2231 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2232
2233 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2234 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2235 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2236 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2237 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2238 if (size == 8)
2239 {
2240 imm = (imm8_7 << (63-32)) /* imm8<7> */
2241 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2242 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2243 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2244 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2245 imm <<= 32;
2246 }
2247 else if (size == 4 || size == 2)
2248 {
2249 imm = (imm8_7 << 31) /* imm8<7> */
2250 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2251 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2252 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2253 }
2254 else
2255 {
2256 /* An unsupported size. */
2257 assert (0);
2258 }
2259
2260 return imm;
2261 }
2262
2263 /* Produce the string representation of the register list operand *OPND
2264 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2265 the register name that comes before the register number, such as "v". */
2266 static void
2267 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2268 const char *prefix)
2269 {
2270 const int num_regs = opnd->reglist.num_regs;
2271 const int first_reg = opnd->reglist.first_regno;
2272 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2273 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2274 char tb[8]; /* Temporary buffer. */
2275
2276 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2277 assert (num_regs >= 1 && num_regs <= 4);
2278
2279 /* Prepare the index if any. */
2280 if (opnd->reglist.has_index)
2281 snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2282 else
2283 tb[0] = '\0';
2284
2285 /* The hyphenated form is preferred for disassembly if there are
2286 more than two registers in the list, and the register numbers
2287 are monotonically increasing in increments of one. */
2288 if (num_regs > 2 && last_reg > first_reg)
2289 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2290 prefix, last_reg, qlf_name, tb);
2291 else
2292 {
2293 const int reg0 = first_reg;
2294 const int reg1 = (first_reg + 1) & 0x1f;
2295 const int reg2 = (first_reg + 2) & 0x1f;
2296 const int reg3 = (first_reg + 3) & 0x1f;
2297
2298 switch (num_regs)
2299 {
2300 case 1:
2301 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2302 break;
2303 case 2:
2304 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2305 prefix, reg1, qlf_name, tb);
2306 break;
2307 case 3:
2308 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2309 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2310 prefix, reg2, qlf_name, tb);
2311 break;
2312 case 4:
2313 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2314 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2315 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2316 break;
2317 }
2318 }
2319 }
2320
2321 /* Produce the string representation of the register offset address operand
2322 *OPND in the buffer pointed by BUF of size SIZE. */
2323 static void
2324 print_register_offset_address (char *buf, size_t size,
2325 const aarch64_opnd_info *opnd)
2326 {
2327 char tb[16]; /* Temporary buffer. */
2328 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2329 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2330 bfd_boolean print_extend_p = TRUE;
2331 bfd_boolean print_amount_p = TRUE;
2332 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2333
2334 switch (opnd->shifter.kind)
2335 {
2336 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2337 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2338 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2339 case AARCH64_MOD_SXTX: break;
2340 default: assert (0);
2341 }
2342
2343 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2344 || !opnd->shifter.amount_present))
2345 {
2346 /* Not print the shift/extend amount when the amount is zero and
2347 when it is not the special case of 8-bit load/store instruction. */
2348 print_amount_p = FALSE;
2349 /* Likewise, no need to print the shift operator LSL in such a
2350 situation. */
2351 if (lsl_p)
2352 print_extend_p = FALSE;
2353 }
2354
2355 /* Prepare for the extend/shift. */
2356 if (print_extend_p)
2357 {
2358 if (print_amount_p)
2359 snprintf (tb, sizeof (tb), ",%s #%d", shift_name, opnd->shifter.amount);
2360 else
2361 snprintf (tb, sizeof (tb), ",%s", shift_name);
2362 }
2363 else
2364 tb[0] = '\0';
2365
2366 snprintf (buf, size, "[%s,%s%s]",
2367 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2368 get_int_reg_name (opnd->addr.offset.regno,
2369 wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2370 0 /* sp_reg_p */),
2371 tb);
2372 }
2373
2374 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2375 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2376 PC, PCREL_P and ADDRESS are used to pass in and return information about
2377 the PC-relative address calculation, where the PC value is passed in
2378 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2379 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2380 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2381
2382 The function serves both the disassembler and the assembler diagnostics
2383 issuer, which is the reason why it lives in this file. */
2384
2385 void
2386 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2387 const aarch64_opcode *opcode,
2388 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2389 bfd_vma *address)
2390 {
2391 int i;
2392 const char *name = NULL;
2393 const aarch64_opnd_info *opnd = opnds + idx;
2394 enum aarch64_modifier_kind kind;
2395 uint64_t addr;
2396
2397 buf[0] = '\0';
2398 if (pcrel_p)
2399 *pcrel_p = 0;
2400
2401 switch (opnd->type)
2402 {
2403 case AARCH64_OPND_Rd:
2404 case AARCH64_OPND_Rn:
2405 case AARCH64_OPND_Rm:
2406 case AARCH64_OPND_Rt:
2407 case AARCH64_OPND_Rt2:
2408 case AARCH64_OPND_Rs:
2409 case AARCH64_OPND_Ra:
2410 case AARCH64_OPND_Rt_SYS:
2411 case AARCH64_OPND_PAIRREG:
2412 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2413 the <ic_op>, therefore we we use opnd->present to override the
2414 generic optional-ness information. */
2415 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2416 break;
2417 /* Omit the operand, e.g. RET. */
2418 if (optional_operand_p (opcode, idx)
2419 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2420 break;
2421 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2422 || opnd->qualifier == AARCH64_OPND_QLF_X);
2423 snprintf (buf, size, "%s",
2424 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2425 break;
2426
2427 case AARCH64_OPND_Rd_SP:
2428 case AARCH64_OPND_Rn_SP:
2429 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2430 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2431 || opnd->qualifier == AARCH64_OPND_QLF_X
2432 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2433 snprintf (buf, size, "%s",
2434 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2435 break;
2436
2437 case AARCH64_OPND_Rm_EXT:
2438 kind = opnd->shifter.kind;
2439 assert (idx == 1 || idx == 2);
2440 if ((aarch64_stack_pointer_p (opnds)
2441 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2442 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2443 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2444 && kind == AARCH64_MOD_UXTW)
2445 || (opnd->qualifier == AARCH64_OPND_QLF_X
2446 && kind == AARCH64_MOD_UXTX)))
2447 {
2448 /* 'LSL' is the preferred form in this case. */
2449 kind = AARCH64_MOD_LSL;
2450 if (opnd->shifter.amount == 0)
2451 {
2452 /* Shifter omitted. */
2453 snprintf (buf, size, "%s",
2454 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2455 break;
2456 }
2457 }
2458 if (opnd->shifter.amount)
2459 snprintf (buf, size, "%s, %s #%d",
2460 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2461 aarch64_operand_modifiers[kind].name,
2462 opnd->shifter.amount);
2463 else
2464 snprintf (buf, size, "%s, %s",
2465 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2466 aarch64_operand_modifiers[kind].name);
2467 break;
2468
2469 case AARCH64_OPND_Rm_SFT:
2470 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2471 || opnd->qualifier == AARCH64_OPND_QLF_X);
2472 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2473 snprintf (buf, size, "%s",
2474 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2475 else
2476 snprintf (buf, size, "%s, %s #%d",
2477 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2478 aarch64_operand_modifiers[opnd->shifter.kind].name,
2479 opnd->shifter.amount);
2480 break;
2481
2482 case AARCH64_OPND_Fd:
2483 case AARCH64_OPND_Fn:
2484 case AARCH64_OPND_Fm:
2485 case AARCH64_OPND_Fa:
2486 case AARCH64_OPND_Ft:
2487 case AARCH64_OPND_Ft2:
2488 case AARCH64_OPND_Sd:
2489 case AARCH64_OPND_Sn:
2490 case AARCH64_OPND_Sm:
2491 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2492 opnd->reg.regno);
2493 break;
2494
2495 case AARCH64_OPND_Vd:
2496 case AARCH64_OPND_Vn:
2497 case AARCH64_OPND_Vm:
2498 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2499 aarch64_get_qualifier_name (opnd->qualifier));
2500 break;
2501
2502 case AARCH64_OPND_Ed:
2503 case AARCH64_OPND_En:
2504 case AARCH64_OPND_Em:
2505 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
2506 aarch64_get_qualifier_name (opnd->qualifier),
2507 opnd->reglane.index);
2508 break;
2509
2510 case AARCH64_OPND_VdD1:
2511 case AARCH64_OPND_VnD1:
2512 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2513 break;
2514
2515 case AARCH64_OPND_LVn:
2516 case AARCH64_OPND_LVt:
2517 case AARCH64_OPND_LVt_AL:
2518 case AARCH64_OPND_LEt:
2519 print_register_list (buf, size, opnd, "v");
2520 break;
2521
2522 case AARCH64_OPND_Cn:
2523 case AARCH64_OPND_Cm:
2524 snprintf (buf, size, "C%d", opnd->reg.regno);
2525 break;
2526
2527 case AARCH64_OPND_IDX:
2528 case AARCH64_OPND_IMM:
2529 case AARCH64_OPND_WIDTH:
2530 case AARCH64_OPND_UIMM3_OP1:
2531 case AARCH64_OPND_UIMM3_OP2:
2532 case AARCH64_OPND_BIT_NUM:
2533 case AARCH64_OPND_IMM_VLSL:
2534 case AARCH64_OPND_IMM_VLSR:
2535 case AARCH64_OPND_SHLL_IMM:
2536 case AARCH64_OPND_IMM0:
2537 case AARCH64_OPND_IMMR:
2538 case AARCH64_OPND_IMMS:
2539 case AARCH64_OPND_FBITS:
2540 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2541 break;
2542
2543 case AARCH64_OPND_IMM_MOV:
2544 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2545 {
2546 case 4: /* e.g. MOV Wd, #<imm32>. */
2547 {
2548 int imm32 = opnd->imm.value;
2549 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2550 }
2551 break;
2552 case 8: /* e.g. MOV Xd, #<imm64>. */
2553 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2554 opnd->imm.value, opnd->imm.value);
2555 break;
2556 default: assert (0);
2557 }
2558 break;
2559
2560 case AARCH64_OPND_FPIMM0:
2561 snprintf (buf, size, "#0.0");
2562 break;
2563
2564 case AARCH64_OPND_LIMM:
2565 case AARCH64_OPND_AIMM:
2566 case AARCH64_OPND_HALF:
2567 if (opnd->shifter.amount)
2568 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2569 opnd->shifter.amount);
2570 else
2571 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2572 break;
2573
2574 case AARCH64_OPND_SIMD_IMM:
2575 case AARCH64_OPND_SIMD_IMM_SFT:
2576 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2577 || opnd->shifter.kind == AARCH64_MOD_NONE)
2578 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2579 else
2580 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2581 aarch64_operand_modifiers[opnd->shifter.kind].name,
2582 opnd->shifter.amount);
2583 break;
2584
2585 case AARCH64_OPND_FPIMM:
2586 case AARCH64_OPND_SIMD_FPIMM:
2587 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2588 {
2589 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2590 {
2591 half_conv_t c;
2592 c.i = expand_fp_imm (2, opnd->imm.value);
2593 snprintf (buf, size, "#%.18e", c.f);
2594 }
2595 break;
2596 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2597 {
2598 single_conv_t c;
2599 c.i = expand_fp_imm (4, opnd->imm.value);
2600 snprintf (buf, size, "#%.18e", c.f);
2601 }
2602 break;
2603 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2604 {
2605 double_conv_t c;
2606 c.i = expand_fp_imm (8, opnd->imm.value);
2607 snprintf (buf, size, "#%.18e", c.d);
2608 }
2609 break;
2610 default: assert (0);
2611 }
2612 break;
2613
2614 case AARCH64_OPND_CCMP_IMM:
2615 case AARCH64_OPND_NZCV:
2616 case AARCH64_OPND_EXCEPTION:
2617 case AARCH64_OPND_UIMM4:
2618 case AARCH64_OPND_UIMM7:
2619 if (optional_operand_p (opcode, idx) == TRUE
2620 && (opnd->imm.value ==
2621 (int64_t) get_optional_operand_default_value (opcode)))
2622 /* Omit the operand, e.g. DCPS1. */
2623 break;
2624 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2625 break;
2626
2627 case AARCH64_OPND_COND:
2628 case AARCH64_OPND_COND1:
2629 snprintf (buf, size, "%s", opnd->cond->names[0]);
2630 break;
2631
2632 case AARCH64_OPND_ADDR_ADRP:
2633 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2634 + opnd->imm.value;
2635 if (pcrel_p)
2636 *pcrel_p = 1;
2637 if (address)
2638 *address = addr;
2639 /* This is not necessary during the disassembling, as print_address_func
2640 in the disassemble_info will take care of the printing. But some
2641 other callers may be still interested in getting the string in *STR,
2642 so here we do snprintf regardless. */
2643 snprintf (buf, size, "#0x%" PRIx64, addr);
2644 break;
2645
2646 case AARCH64_OPND_ADDR_PCREL14:
2647 case AARCH64_OPND_ADDR_PCREL19:
2648 case AARCH64_OPND_ADDR_PCREL21:
2649 case AARCH64_OPND_ADDR_PCREL26:
2650 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2651 if (pcrel_p)
2652 *pcrel_p = 1;
2653 if (address)
2654 *address = addr;
2655 /* This is not necessary during the disassembling, as print_address_func
2656 in the disassemble_info will take care of the printing. But some
2657 other callers may be still interested in getting the string in *STR,
2658 so here we do snprintf regardless. */
2659 snprintf (buf, size, "#0x%" PRIx64, addr);
2660 break;
2661
2662 case AARCH64_OPND_ADDR_SIMPLE:
2663 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2664 case AARCH64_OPND_SIMD_ADDR_POST:
2665 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2666 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2667 {
2668 if (opnd->addr.offset.is_reg)
2669 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2670 else
2671 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2672 }
2673 else
2674 snprintf (buf, size, "[%s]", name);
2675 break;
2676
2677 case AARCH64_OPND_ADDR_REGOFF:
2678 print_register_offset_address (buf, size, opnd);
2679 break;
2680
2681 case AARCH64_OPND_ADDR_SIMM7:
2682 case AARCH64_OPND_ADDR_SIMM9:
2683 case AARCH64_OPND_ADDR_SIMM9_2:
2684 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2685 if (opnd->addr.writeback)
2686 {
2687 if (opnd->addr.preind)
2688 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2689 else
2690 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2691 }
2692 else
2693 {
2694 if (opnd->addr.offset.imm)
2695 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2696 else
2697 snprintf (buf, size, "[%s]", name);
2698 }
2699 break;
2700
2701 case AARCH64_OPND_ADDR_UIMM12:
2702 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2703 if (opnd->addr.offset.imm)
2704 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2705 else
2706 snprintf (buf, size, "[%s]", name);
2707 break;
2708
2709 case AARCH64_OPND_SYSREG:
2710 for (i = 0; aarch64_sys_regs[i].name; ++i)
2711 if (aarch64_sys_regs[i].value == opnd->sysreg
2712 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2713 break;
2714 if (aarch64_sys_regs[i].name)
2715 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2716 else
2717 {
2718 /* Implementation defined system register. */
2719 unsigned int value = opnd->sysreg;
2720 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2721 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2722 value & 0x7);
2723 }
2724 break;
2725
2726 case AARCH64_OPND_PSTATEFIELD:
2727 for (i = 0; aarch64_pstatefields[i].name; ++i)
2728 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2729 break;
2730 assert (aarch64_pstatefields[i].name);
2731 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2732 break;
2733
2734 case AARCH64_OPND_SYSREG_AT:
2735 case AARCH64_OPND_SYSREG_DC:
2736 case AARCH64_OPND_SYSREG_IC:
2737 case AARCH64_OPND_SYSREG_TLBI:
2738 snprintf (buf, size, "%s", opnd->sysins_op->name);
2739 break;
2740
2741 case AARCH64_OPND_BARRIER:
2742 snprintf (buf, size, "%s", opnd->barrier->name);
2743 break;
2744
2745 case AARCH64_OPND_BARRIER_ISB:
2746 /* Operand can be omitted, e.g. in DCPS1. */
2747 if (! optional_operand_p (opcode, idx)
2748 || (opnd->barrier->value
2749 != get_optional_operand_default_value (opcode)))
2750 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2751 break;
2752
2753 case AARCH64_OPND_PRFOP:
2754 if (opnd->prfop->name != NULL)
2755 snprintf (buf, size, "%s", opnd->prfop->name);
2756 else
2757 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2758 break;
2759
2760 case AARCH64_OPND_BARRIER_PSB:
2761 snprintf (buf, size, "%s", opnd->hint_option->name);
2762 break;
2763
2764 default:
2765 assert (0);
2766 }
2767 }
2768 \f
2769 #define CPENC(op0,op1,crn,crm,op2) \
2770 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2771 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2772 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2773 /* for 3.9.10 System Instructions */
2774 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2775
2776 #define C0 0
2777 #define C1 1
2778 #define C2 2
2779 #define C3 3
2780 #define C4 4
2781 #define C5 5
2782 #define C6 6
2783 #define C7 7
2784 #define C8 8
2785 #define C9 9
2786 #define C10 10
2787 #define C11 11
2788 #define C12 12
2789 #define C13 13
2790 #define C14 14
2791 #define C15 15
2792
2793 #ifdef F_DEPRECATED
2794 #undef F_DEPRECATED
2795 #endif
2796 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2797
2798 #ifdef F_ARCHEXT
2799 #undef F_ARCHEXT
2800 #endif
2801 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2802
2803 #ifdef F_HASXT
2804 #undef F_HASXT
2805 #endif
2806 #define F_HASXT 0x4 /* System instruction register <Xt>
2807 operand. */
2808
2809
2810 /* TODO there are two more issues need to be resolved
2811 1. handle read-only and write-only system registers
2812 2. handle cpu-implementation-defined system registers. */
2813 const aarch64_sys_reg aarch64_sys_regs [] =
2814 {
2815 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
2816 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
2817 { "elr_el1", CPEN_(0,C0,1), 0 },
2818 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
2819 { "sp_el0", CPEN_(0,C1,0), 0 },
2820 { "spsel", CPEN_(0,C2,0), 0 },
2821 { "daif", CPEN_(3,C2,1), 0 },
2822 { "currentel", CPEN_(0,C2,2), 0 }, /* RO */
2823 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
2824 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
2825 { "nzcv", CPEN_(3,C2,0), 0 },
2826 { "fpcr", CPEN_(3,C4,0), 0 },
2827 { "fpsr", CPEN_(3,C4,1), 0 },
2828 { "dspsr_el0", CPEN_(3,C5,0), 0 },
2829 { "dlr_el0", CPEN_(3,C5,1), 0 },
2830 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
2831 { "elr_el2", CPEN_(4,C0,1), 0 },
2832 { "sp_el1", CPEN_(4,C1,0), 0 },
2833 { "spsr_irq", CPEN_(4,C3,0), 0 },
2834 { "spsr_abt", CPEN_(4,C3,1), 0 },
2835 { "spsr_und", CPEN_(4,C3,2), 0 },
2836 { "spsr_fiq", CPEN_(4,C3,3), 0 },
2837 { "spsr_el3", CPEN_(6,C0,0), 0 },
2838 { "elr_el3", CPEN_(6,C0,1), 0 },
2839 { "sp_el2", CPEN_(6,C1,0), 0 },
2840 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
2841 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
2842 { "midr_el1", CPENC(3,0,C0,C0,0), 0 }, /* RO */
2843 { "ctr_el0", CPENC(3,3,C0,C0,1), 0 }, /* RO */
2844 { "mpidr_el1", CPENC(3,0,C0,C0,5), 0 }, /* RO */
2845 { "revidr_el1", CPENC(3,0,C0,C0,6), 0 }, /* RO */
2846 { "aidr_el1", CPENC(3,1,C0,C0,7), 0 }, /* RO */
2847 { "dczid_el0", CPENC(3,3,C0,C0,7), 0 }, /* RO */
2848 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), 0 }, /* RO */
2849 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), 0 }, /* RO */
2850 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), 0 }, /* RO */
2851 { "id_afr0_el1", CPENC(3,0,C0,C1,3), 0 }, /* RO */
2852 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), 0 }, /* RO */
2853 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), 0 }, /* RO */
2854 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), 0 }, /* RO */
2855 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), 0 }, /* RO */
2856 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), 0 }, /* RO */
2857 { "id_isar0_el1", CPENC(3,0,C0,C2,0), 0 }, /* RO */
2858 { "id_isar1_el1", CPENC(3,0,C0,C2,1), 0 }, /* RO */
2859 { "id_isar2_el1", CPENC(3,0,C0,C2,2), 0 }, /* RO */
2860 { "id_isar3_el1", CPENC(3,0,C0,C2,3), 0 }, /* RO */
2861 { "id_isar4_el1", CPENC(3,0,C0,C2,4), 0 }, /* RO */
2862 { "id_isar5_el1", CPENC(3,0,C0,C2,5), 0 }, /* RO */
2863 { "mvfr0_el1", CPENC(3,0,C0,C3,0), 0 }, /* RO */
2864 { "mvfr1_el1", CPENC(3,0,C0,C3,1), 0 }, /* RO */
2865 { "mvfr2_el1", CPENC(3,0,C0,C3,2), 0 }, /* RO */
2866 { "ccsidr_el1", CPENC(3,1,C0,C0,0), 0 }, /* RO */
2867 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), 0 }, /* RO */
2868 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), 0 }, /* RO */
2869 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), 0 }, /* RO */
2870 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), 0 }, /* RO */
2871 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), 0 }, /* RO */
2872 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), 0 }, /* RO */
2873 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), 0 }, /* RO */
2874 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), 0 }, /* RO */
2875 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
2876 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), 0 }, /* RO */
2877 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), 0 }, /* RO */
2878 { "clidr_el1", CPENC(3,1,C0,C0,1), 0 }, /* RO */
2879 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 }, /* RO */
2880 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
2881 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
2882 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
2883 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
2884 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
2885 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
2886 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
2887 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
2888 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
2889 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
2890 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
2891 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
2892 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
2893 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
2894 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
2895 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
2896 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
2897 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
2898 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
2899 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
2900 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
2901 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
2902 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
2903 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
2904 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
2905 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
2906 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
2907 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
2908 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
2909 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
2910 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
2911 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
2912 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
2913 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
2914 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
2915 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
2916 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
2917 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
2918 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
2919 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
2920 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
2921 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
2922 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
2923 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
2924 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
2925 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
2926 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
2927 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
2928 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
2929 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
2930 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
2931 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
2932 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
2933 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
2934 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
2935 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
2936 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
2937 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
2938 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
2939 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
2940 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
2941 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
2942 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
2943 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
2944 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
2945 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
2946 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
2947 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
2948 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
2949 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
2950 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
2951 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
2952 { "rvbar_el1", CPENC(3,0,C12,C0,1), 0 }, /* RO */
2953 { "rvbar_el2", CPENC(3,4,C12,C0,1), 0 }, /* RO */
2954 { "rvbar_el3", CPENC(3,6,C12,C0,1), 0 }, /* RO */
2955 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
2956 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
2957 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
2958 { "isr_el1", CPENC(3,0,C12,C1,0), 0 }, /* RO */
2959 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
2960 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
2961 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
2962 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
2963 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
2964 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
2965 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RO */
2966 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
2967 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
2968 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
2969 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
2970 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RO */
2971 { "cntpct_el0", CPENC(3,3,C14,C0,1), 0 }, /* RO */
2972 { "cntvct_el0", CPENC(3,3,C14,C0,2), 0 }, /* RO */
2973 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
2974 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
2975 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
2976 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
2977 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
2978 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
2979 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
2980 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
2981 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
2982 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
2983 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
2984 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
2985 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
2986 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
2987 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
2988 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
2989 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
2990 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
2991 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
2992 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
2993 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
2994 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
2995 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
2996 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
2997 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
2998 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
2999 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
3000 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
3001 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
3002 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
3003 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), 0 }, /* r */
3004 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
3005 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
3006 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* r */
3007 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), 0 }, /* w */
3008 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 }, /* r */
3009 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 }, /* w */
3010 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
3011 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
3012 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
3013 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
3014 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
3015 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
3016 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
3017 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
3018 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
3019 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
3020 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
3021 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
3022 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
3023 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
3024 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
3025 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
3026 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
3027 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
3028 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
3029 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
3030 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
3031 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
3032 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
3033 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
3034 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
3035 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
3036 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
3037 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
3038 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
3039 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
3040 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
3041 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
3042 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
3043 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
3044 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
3045 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
3046 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
3047 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
3048 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
3049 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
3050 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
3051 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
3052 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
3053 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
3054 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
3055 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
3056 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
3057 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
3058 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
3059 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
3060 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
3061 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
3062 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
3063 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
3064 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
3065 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
3066 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
3067 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
3068 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
3069 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
3070 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
3071 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
3072 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
3073 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
3074 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
3075 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
3076 { "mdrar_el1", CPENC(2,0,C1, C0, 0), 0 }, /* r */
3077 { "oslar_el1", CPENC(2,0,C1, C0, 4), 0 }, /* w */
3078 { "oslsr_el1", CPENC(2,0,C1, C1, 4), 0 }, /* r */
3079 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
3080 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
3081 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
3082 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
3083 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), 0 }, /* r */
3084 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
3085 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
3086 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
3087 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT }, /* ro */
3088 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
3089 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
3090 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
3091 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
3092 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
3093 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
3094 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* ro */
3095 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
3096 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
3097 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
3098 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
3099 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
3100 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
3101 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), 0 }, /* w */
3102 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
3103 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), 0 }, /* r */
3104 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), 0 }, /* r */
3105 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
3106 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
3107 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
3108 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
3109 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
3110 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
3111 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
3112 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
3113 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
3114 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
3115 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
3116 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
3117 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
3118 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
3119 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
3120 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
3121 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
3122 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
3123 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
3124 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
3125 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
3126 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
3127 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
3128 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
3129 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
3130 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
3131 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
3132 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
3133 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
3134 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
3135 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
3136 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
3137 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
3138 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
3139 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
3140 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
3141 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
3142 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
3143 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
3144 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
3145 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
3146 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
3147 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
3148 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
3149 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
3150 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
3151 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
3152 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
3153 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
3154 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
3155 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
3156 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
3157 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
3158 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
3159 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
3160 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
3161 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
3162 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
3163 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
3164 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
3165 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
3166 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
3167 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
3168 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
3169 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
3170 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
3171 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
3172 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
3173 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
3174 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
3175 { 0, CPENC(0,0,0,0,0), 0 },
3176 };
3177
3178 bfd_boolean
3179 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3180 {
3181 return (reg->flags & F_DEPRECATED) != 0;
3182 }
3183
3184 bfd_boolean
3185 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
3186 const aarch64_sys_reg *reg)
3187 {
3188 if (!(reg->flags & F_ARCHEXT))
3189 return TRUE;
3190
3191 /* PAN. Values are from aarch64_sys_regs. */
3192 if (reg->value == CPEN_(0,C2,3)
3193 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3194 return FALSE;
3195
3196 /* Virtualization host extensions: system registers. */
3197 if ((reg->value == CPENC (3, 4, C2, C0, 1)
3198 || reg->value == CPENC (3, 4, C13, C0, 1)
3199 || reg->value == CPENC (3, 4, C14, C3, 0)
3200 || reg->value == CPENC (3, 4, C14, C3, 1)
3201 || reg->value == CPENC (3, 4, C14, C3, 2))
3202 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3203 return FALSE;
3204
3205 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3206 if ((reg->value == CPEN_ (5, C0, 0)
3207 || reg->value == CPEN_ (5, C0, 1)
3208 || reg->value == CPENC (3, 5, C1, C0, 0)
3209 || reg->value == CPENC (3, 5, C1, C0, 2)
3210 || reg->value == CPENC (3, 5, C2, C0, 0)
3211 || reg->value == CPENC (3, 5, C2, C0, 1)
3212 || reg->value == CPENC (3, 5, C2, C0, 2)
3213 || reg->value == CPENC (3, 5, C5, C1, 0)
3214 || reg->value == CPENC (3, 5, C5, C1, 1)
3215 || reg->value == CPENC (3, 5, C5, C2, 0)
3216 || reg->value == CPENC (3, 5, C6, C0, 0)
3217 || reg->value == CPENC (3, 5, C10, C2, 0)
3218 || reg->value == CPENC (3, 5, C10, C3, 0)
3219 || reg->value == CPENC (3, 5, C12, C0, 0)
3220 || reg->value == CPENC (3, 5, C13, C0, 1)
3221 || reg->value == CPENC (3, 5, C14, C1, 0))
3222 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3223 return FALSE;
3224
3225 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3226 if ((reg->value == CPENC (3, 5, C14, C2, 0)
3227 || reg->value == CPENC (3, 5, C14, C2, 1)
3228 || reg->value == CPENC (3, 5, C14, C2, 2)
3229 || reg->value == CPENC (3, 5, C14, C3, 0)
3230 || reg->value == CPENC (3, 5, C14, C3, 1)
3231 || reg->value == CPENC (3, 5, C14, C3, 2))
3232 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
3233 return FALSE;
3234
3235 /* ARMv8.2 features. */
3236
3237 /* ID_AA64MMFR2_EL1. */
3238 if (reg->value == CPENC (3, 0, C0, C7, 2)
3239 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3240 return FALSE;
3241
3242 /* PSTATE.UAO. */
3243 if (reg->value == CPEN_ (0, C2, 4)
3244 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3245 return FALSE;
3246
3247 /* RAS extension. */
3248
3249 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
3250 ERXMISC0_EL1 AND ERXMISC1_EL1. */
3251 if ((reg->value == CPENC (3, 0, C5, C3, 0)
3252 || reg->value == CPENC (3, 0, C5, C3, 1)
3253 || reg->value == CPENC (3, 0, C5, C3, 2)
3254 || reg->value == CPENC (3, 0, C5, C3, 3)
3255 || reg->value == CPENC (3, 0, C5, C4, 0)
3256 || reg->value == CPENC (3, 0, C5, C4, 1)
3257 || reg->value == CPENC (3, 0, C5, C4, 2)
3258 || reg->value == CPENC (3, 0, C5, C4, 3)
3259 || reg->value == CPENC (3, 0, C5, C5, 0)
3260 || reg->value == CPENC (3, 0, C5, C5, 1))
3261 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3262 return FALSE;
3263
3264 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3265 if ((reg->value == CPENC (3, 4, C5, C2, 3)
3266 || reg->value == CPENC (3, 0, C12, C1, 1)
3267 || reg->value == CPENC (3, 4, C12, C1, 1))
3268 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
3269 return FALSE;
3270
3271 /* Statistical Profiling extension. */
3272 if ((reg->value == CPENC (3, 0, C9, C10, 0)
3273 || reg->value == CPENC (3, 0, C9, C10, 1)
3274 || reg->value == CPENC (3, 0, C9, C10, 3)
3275 || reg->value == CPENC (3, 0, C9, C10, 7)
3276 || reg->value == CPENC (3, 0, C9, C9, 0)
3277 || reg->value == CPENC (3, 0, C9, C9, 2)
3278 || reg->value == CPENC (3, 0, C9, C9, 3)
3279 || reg->value == CPENC (3, 0, C9, C9, 4)
3280 || reg->value == CPENC (3, 0, C9, C9, 5)
3281 || reg->value == CPENC (3, 0, C9, C9, 6)
3282 || reg->value == CPENC (3, 0, C9, C9, 7)
3283 || reg->value == CPENC (3, 4, C9, C9, 0)
3284 || reg->value == CPENC (3, 5, C9, C9, 0))
3285 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
3286 return FALSE;
3287
3288 return TRUE;
3289 }
3290
3291 const aarch64_sys_reg aarch64_pstatefields [] =
3292 {
3293 { "spsel", 0x05, 0 },
3294 { "daifset", 0x1e, 0 },
3295 { "daifclr", 0x1f, 0 },
3296 { "pan", 0x04, F_ARCHEXT },
3297 { "uao", 0x03, F_ARCHEXT },
3298 { 0, CPENC(0,0,0,0,0), 0 },
3299 };
3300
3301 bfd_boolean
3302 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
3303 const aarch64_sys_reg *reg)
3304 {
3305 if (!(reg->flags & F_ARCHEXT))
3306 return TRUE;
3307
3308 /* PAN. Values are from aarch64_pstatefields. */
3309 if (reg->value == 0x04
3310 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
3311 return FALSE;
3312
3313 /* UAO. Values are from aarch64_pstatefields. */
3314 if (reg->value == 0x03
3315 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3316 return FALSE;
3317
3318 return TRUE;
3319 }
3320
3321 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3322 {
3323 { "ialluis", CPENS(0,C7,C1,0), 0 },
3324 { "iallu", CPENS(0,C7,C5,0), 0 },
3325 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
3326 { 0, CPENS(0,0,0,0), 0 }
3327 };
3328
3329 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3330 {
3331 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
3332 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
3333 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
3334 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
3335 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
3336 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
3337 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
3338 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
3339 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
3340 { 0, CPENS(0,0,0,0), 0 }
3341 };
3342
3343 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3344 {
3345 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
3346 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
3347 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
3348 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
3349 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
3350 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
3351 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
3352 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
3353 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
3354 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
3355 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
3356 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
3357 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
3358 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
3359 { 0, CPENS(0,0,0,0), 0 }
3360 };
3361
3362 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3363 {
3364 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3365 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
3366 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
3367 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
3368 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3369 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
3370 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
3371 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
3372 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
3373 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
3374 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
3375 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
3376 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
3377 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
3378 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3379 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3380 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
3381 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
3382 { "alle2", CPENS(4,C8,C7,0), 0 },
3383 { "alle2is", CPENS(4,C8,C3,0), 0 },
3384 { "alle1", CPENS(4,C8,C7,4), 0 },
3385 { "alle1is", CPENS(4,C8,C3,4), 0 },
3386 { "alle3", CPENS(6,C8,C7,0), 0 },
3387 { "alle3is", CPENS(6,C8,C3,0), 0 },
3388 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
3389 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
3390 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
3391 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
3392 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
3393 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
3394 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
3395 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
3396 { 0, CPENS(0,0,0,0), 0 }
3397 };
3398
3399 bfd_boolean
3400 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
3401 {
3402 return (sys_ins_reg->flags & F_HASXT) != 0;
3403 }
3404
3405 extern bfd_boolean
3406 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
3407 const aarch64_sys_ins_reg *reg)
3408 {
3409 if (!(reg->flags & F_ARCHEXT))
3410 return TRUE;
3411
3412 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3413 if (reg->value == CPENS (3, C7, C12, 1)
3414 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3415 return FALSE;
3416
3417 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3418 if ((reg->value == CPENS (0, C7, C9, 0)
3419 || reg->value == CPENS (0, C7, C9, 1))
3420 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
3421 return FALSE;
3422
3423 return TRUE;
3424 }
3425
3426 #undef C0
3427 #undef C1
3428 #undef C2
3429 #undef C3
3430 #undef C4
3431 #undef C5
3432 #undef C6
3433 #undef C7
3434 #undef C8
3435 #undef C9
3436 #undef C10
3437 #undef C11
3438 #undef C12
3439 #undef C13
3440 #undef C14
3441 #undef C15
3442
3443 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
3444 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
3445
3446 static bfd_boolean
3447 verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
3448 const aarch64_insn insn)
3449 {
3450 int t = BITS (insn, 4, 0);
3451 int n = BITS (insn, 9, 5);
3452 int t2 = BITS (insn, 14, 10);
3453
3454 if (BIT (insn, 23))
3455 {
3456 /* Write back enabled. */
3457 if ((t == n || t2 == n) && n != 31)
3458 return FALSE;
3459 }
3460
3461 if (BIT (insn, 22))
3462 {
3463 /* Load */
3464 if (t == t2)
3465 return FALSE;
3466 }
3467
3468 return TRUE;
3469 }
3470
3471 /* Include the opcode description table as well as the operand description
3472 table. */
3473 #define VERIFIER(x) verify_##x
3474 #include "aarch64-tbl.h"
This page took 0.159434 seconds and 4 git commands to generate.