include/opcode/
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
CommitLineData
a06ea964 1/* aarch64-opc.c -- AArch64 opcode support.
fb098a1e 2 Copyright 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30
31#include "aarch64-opc.h"
32
33#ifdef DEBUG_AARCH64
34int debug_dump = FALSE;
35#endif /* DEBUG_AARCH64 */
36
37/* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
39
40static inline bfd_boolean
41vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42{
43 return ((qualifier >= AARCH64_OPND_QLF_V_8B
44 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45 : FALSE);
46}
47
48static inline bfd_boolean
49fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50{
51 return ((qualifier >= AARCH64_OPND_QLF_S_B
52 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53 : FALSE);
54}
55
56enum data_pattern
57{
58 DP_UNKNOWN,
59 DP_VECTOR_3SAME,
60 DP_VECTOR_LONG,
61 DP_VECTOR_WIDE,
62 DP_VECTOR_ACROSS_LANES,
63};
64
65static const char significant_operand_index [] =
66{
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
72};
73
74/* Given a sequence of qualifiers in QUALIFIERS, determine and return
75 the data pattern.
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
78
79static enum data_pattern
80get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81{
82 if (vector_qualifier_p (qualifiers[0]) == TRUE)
83 {
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers[0] == qualifiers[1]
87 && vector_qualifier_p (qualifiers[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers[0])
89 == aarch64_get_qualifier_esize (qualifiers[1]))
90 && (aarch64_get_qualifier_esize (qualifiers[0])
91 == aarch64_get_qualifier_esize (qualifiers[2])))
92 return DP_VECTOR_3SAME;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
95 or v.8h, v.16b. */
96 if (vector_qualifier_p (qualifiers[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers[0])
99 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100 return DP_VECTOR_LONG;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers[0] == qualifiers[1]
103 && vector_qualifier_p (qualifiers[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers[0])
106 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers[0])
108 == aarch64_get_qualifier_esize (qualifiers[1])))
109 return DP_VECTOR_WIDE;
110 }
111 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112 {
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers[1]) == TRUE
115 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116 return DP_VECTOR_ACROSS_LANES;
117 }
118
119 return DP_UNKNOWN;
120}
121
122/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124/* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
128 benefit. */
129
130int
131aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132{
133 return
134 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135}
136\f
137const aarch64_field fields[] =
138{
139 { 0, 0 }, /* NIL. */
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
196 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
197 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
198 { 31, 1 }, /* b5: in the test bit and branch instructions. */
199 { 19, 5 }, /* b40: in the test bit and branch instructions. */
200 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
201};
202
203enum aarch64_operand_class
204aarch64_get_operand_class (enum aarch64_opnd type)
205{
206 return aarch64_operands[type].op_class;
207}
208
209const char *
210aarch64_get_operand_name (enum aarch64_opnd type)
211{
212 return aarch64_operands[type].name;
213}
214
215/* Get operand description string.
216 This is usually for the diagnosis purpose. */
217const char *
218aarch64_get_operand_desc (enum aarch64_opnd type)
219{
220 return aarch64_operands[type].desc;
221}
222
223/* Table of all conditional affixes. */
224const aarch64_cond aarch64_conds[16] =
225{
226 {{"eq"}, 0x0},
227 {{"ne"}, 0x1},
228 {{"cs", "hs"}, 0x2},
229 {{"cc", "lo", "ul"}, 0x3},
230 {{"mi"}, 0x4},
231 {{"pl"}, 0x5},
232 {{"vs"}, 0x6},
233 {{"vc"}, 0x7},
234 {{"hi"}, 0x8},
235 {{"ls"}, 0x9},
236 {{"ge"}, 0xa},
237 {{"lt"}, 0xb},
238 {{"gt"}, 0xc},
239 {{"le"}, 0xd},
240 {{"al"}, 0xe},
241 {{"nv"}, 0xf},
242};
243
244const aarch64_cond *
245get_cond_from_value (aarch64_insn value)
246{
247 assert (value < 16);
248 return &aarch64_conds[(unsigned int) value];
249}
250
251const aarch64_cond *
252get_inverted_cond (const aarch64_cond *cond)
253{
254 return &aarch64_conds[cond->value ^ 0x1];
255}
256
257/* Table describing the operand extension/shifting operators; indexed by
258 enum aarch64_modifier_kind.
259
260 The value column provides the most common values for encoding modifiers,
261 which enables table-driven encoding/decoding for the modifiers. */
262const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
263{
264 {"none", 0x0},
265 {"msl", 0x0},
266 {"ror", 0x3},
267 {"asr", 0x2},
268 {"lsr", 0x1},
269 {"lsl", 0x0},
270 {"uxtb", 0x0},
271 {"uxth", 0x1},
272 {"uxtw", 0x2},
273 {"uxtx", 0x3},
274 {"sxtb", 0x4},
275 {"sxth", 0x5},
276 {"sxtw", 0x6},
277 {"sxtx", 0x7},
278 {NULL, 0},
279};
280
281enum aarch64_modifier_kind
282aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
283{
284 return desc - aarch64_operand_modifiers;
285}
286
287aarch64_insn
288aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
289{
290 return aarch64_operand_modifiers[kind].value;
291}
292
293enum aarch64_modifier_kind
294aarch64_get_operand_modifier_from_value (aarch64_insn value,
295 bfd_boolean extend_p)
296{
297 if (extend_p == TRUE)
298 return AARCH64_MOD_UXTB + value;
299 else
300 return AARCH64_MOD_LSL - value;
301}
302
303bfd_boolean
304aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
305{
306 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
307 ? TRUE : FALSE;
308}
309
310static inline bfd_boolean
311aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
312{
313 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
314 ? TRUE : FALSE;
315}
316
317const struct aarch64_name_value_pair aarch64_barrier_options[16] =
318{
319 { "#0x00", 0x0 },
320 { "oshld", 0x1 },
321 { "oshst", 0x2 },
322 { "osh", 0x3 },
323 { "#0x04", 0x4 },
324 { "nshld", 0x5 },
325 { "nshst", 0x6 },
326 { "nsh", 0x7 },
327 { "#0x08", 0x8 },
328 { "ishld", 0x9 },
329 { "ishst", 0xa },
330 { "ish", 0xb },
331 { "#0x0c", 0xc },
332 { "ld", 0xd },
333 { "st", 0xe },
334 { "sy", 0xf },
335};
336
a32c3ff8 337/* op -> op: load = 0 instruction = 1 store = 2
a06ea964
NC
338 l -> level: 1-3
339 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
a32c3ff8 340#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
a06ea964
NC
341const struct aarch64_name_value_pair aarch64_prfops[32] =
342{
343 { "pldl1keep", B(0, 1, 0) },
344 { "pldl1strm", B(0, 1, 1) },
345 { "pldl2keep", B(0, 2, 0) },
346 { "pldl2strm", B(0, 2, 1) },
347 { "pldl3keep", B(0, 3, 0) },
348 { "pldl3strm", B(0, 3, 1) },
349 { "#0x06", 0x06 },
350 { "#0x07", 0x07 },
a32c3ff8
NC
351 { "plil1keep", B(1, 1, 0) },
352 { "plil1strm", B(1, 1, 1) },
353 { "plil2keep", B(1, 2, 0) },
354 { "plil2strm", B(1, 2, 1) },
355 { "plil3keep", B(1, 3, 0) },
356 { "plil3strm", B(1, 3, 1) },
a06ea964
NC
357 { "#0x0e", 0x0e },
358 { "#0x0f", 0x0f },
a32c3ff8
NC
359 { "pstl1keep", B(2, 1, 0) },
360 { "pstl1strm", B(2, 1, 1) },
361 { "pstl2keep", B(2, 2, 0) },
362 { "pstl2strm", B(2, 2, 1) },
363 { "pstl3keep", B(2, 3, 0) },
364 { "pstl3strm", B(2, 3, 1) },
a06ea964
NC
365 { "#0x16", 0x16 },
366 { "#0x17", 0x17 },
367 { "#0x18", 0x18 },
368 { "#0x19", 0x19 },
369 { "#0x1a", 0x1a },
370 { "#0x1b", 0x1b },
371 { "#0x1c", 0x1c },
372 { "#0x1d", 0x1d },
373 { "#0x1e", 0x1e },
374 { "#0x1f", 0x1f },
375};
376#undef B
377\f
378/* Utilities on value constraint. */
379
380static inline int
381value_in_range_p (int64_t value, int low, int high)
382{
383 return (value >= low && value <= high) ? 1 : 0;
384}
385
386static inline int
387value_aligned_p (int64_t value, int align)
388{
389 return ((value & (align - 1)) == 0) ? 1 : 0;
390}
391
392/* A signed value fits in a field. */
393static inline int
394value_fit_signed_field_p (int64_t value, unsigned width)
395{
396 assert (width < 32);
397 if (width < sizeof (value) * 8)
398 {
399 int64_t lim = (int64_t)1 << (width - 1);
400 if (value >= -lim && value < lim)
401 return 1;
402 }
403 return 0;
404}
405
406/* An unsigned value fits in a field. */
407static inline int
408value_fit_unsigned_field_p (int64_t value, unsigned width)
409{
410 assert (width < 32);
411 if (width < sizeof (value) * 8)
412 {
413 int64_t lim = (int64_t)1 << width;
414 if (value >= 0 && value < lim)
415 return 1;
416 }
417 return 0;
418}
419
420/* Return 1 if OPERAND is SP or WSP. */
421int
422aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
423{
424 return ((aarch64_get_operand_class (operand->type)
425 == AARCH64_OPND_CLASS_INT_REG)
426 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
427 && operand->reg.regno == 31);
428}
429
430/* Return 1 if OPERAND is XZR or WZP. */
431int
432aarch64_zero_register_p (const aarch64_opnd_info *operand)
433{
434 return ((aarch64_get_operand_class (operand->type)
435 == AARCH64_OPND_CLASS_INT_REG)
436 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
437 && operand->reg.regno == 31);
438}
439
440/* Return true if the operand *OPERAND that has the operand code
441 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
442 qualified by the qualifier TARGET. */
443
444static inline int
445operand_also_qualified_p (const struct aarch64_opnd_info *operand,
446 aarch64_opnd_qualifier_t target)
447{
448 switch (operand->qualifier)
449 {
450 case AARCH64_OPND_QLF_W:
451 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
452 return 1;
453 break;
454 case AARCH64_OPND_QLF_X:
455 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
456 return 1;
457 break;
458 case AARCH64_OPND_QLF_WSP:
459 if (target == AARCH64_OPND_QLF_W
460 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
461 return 1;
462 break;
463 case AARCH64_OPND_QLF_SP:
464 if (target == AARCH64_OPND_QLF_X
465 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
466 return 1;
467 break;
468 default:
469 break;
470 }
471
472 return 0;
473}
474
475/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
476 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
477
478 Return NIL if more than one expected qualifiers are found. */
479
480aarch64_opnd_qualifier_t
481aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
482 int idx,
483 const aarch64_opnd_qualifier_t known_qlf,
484 int known_idx)
485{
486 int i, saved_i;
487
488 /* Special case.
489
490 When the known qualifier is NIL, we have to assume that there is only
491 one qualifier sequence in the *QSEQ_LIST and return the corresponding
492 qualifier directly. One scenario is that for instruction
493 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
494 which has only one possible valid qualifier sequence
495 NIL, S_D
496 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
497 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
498
499 Because the qualifier NIL has dual roles in the qualifier sequence:
500 it can mean no qualifier for the operand, or the qualifer sequence is
501 not in use (when all qualifiers in the sequence are NILs), we have to
502 handle this special case here. */
503 if (known_qlf == AARCH64_OPND_NIL)
504 {
505 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
506 return qseq_list[0][idx];
507 }
508
509 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
510 {
511 if (qseq_list[i][known_idx] == known_qlf)
512 {
513 if (saved_i != -1)
514 /* More than one sequences are found to have KNOWN_QLF at
515 KNOWN_IDX. */
516 return AARCH64_OPND_NIL;
517 saved_i = i;
518 }
519 }
520
521 return qseq_list[saved_i][idx];
522}
523
524enum operand_qualifier_kind
525{
526 OQK_NIL,
527 OQK_OPD_VARIANT,
528 OQK_VALUE_IN_RANGE,
529 OQK_MISC,
530};
531
532/* Operand qualifier description. */
533struct operand_qualifier_data
534{
535 /* The usage of the three data fields depends on the qualifier kind. */
536 int data0;
537 int data1;
538 int data2;
539 /* Description. */
540 const char *desc;
541 /* Kind. */
542 enum operand_qualifier_kind kind;
543};
544
545/* Indexed by the operand qualifier enumerators. */
546struct operand_qualifier_data aarch64_opnd_qualifiers[] =
547{
548 {0, 0, 0, "NIL", OQK_NIL},
549
550 /* Operand variant qualifiers.
551 First 3 fields:
552 element size, number of elements and common value for encoding. */
553
554 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
555 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
556 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
557 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
558
559 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
560 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
561 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
562 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
563 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
564
565 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
566 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
567 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
568 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
569 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
570 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
571 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
572 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
573 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
574
575 /* Qualifiers constraining the value range.
576 First 3 fields:
577 Lower bound, higher bound, unused. */
578
579 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
580 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
581 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
582 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
583 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
584 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
585
586 /* Qualifiers for miscellaneous purpose.
587 First 3 fields:
588 unused, unused and unused. */
589
590 {0, 0, 0, "lsl", 0},
591 {0, 0, 0, "msl", 0},
592
593 {0, 0, 0, "retrieving", 0},
594};
595
596static inline bfd_boolean
597operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
598{
599 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
600 ? TRUE : FALSE;
601}
602
603static inline bfd_boolean
604qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
605{
606 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
607 ? TRUE : FALSE;
608}
609
610const char*
611aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
612{
613 return aarch64_opnd_qualifiers[qualifier].desc;
614}
615
616/* Given an operand qualifier, return the expected data element size
617 of a qualified operand. */
618unsigned char
619aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
620{
621 assert (operand_variant_qualifier_p (qualifier) == TRUE);
622 return aarch64_opnd_qualifiers[qualifier].data0;
623}
624
625unsigned char
626aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
627{
628 assert (operand_variant_qualifier_p (qualifier) == TRUE);
629 return aarch64_opnd_qualifiers[qualifier].data1;
630}
631
632aarch64_insn
633aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
634{
635 assert (operand_variant_qualifier_p (qualifier) == TRUE);
636 return aarch64_opnd_qualifiers[qualifier].data2;
637}
638
639static int
640get_lower_bound (aarch64_opnd_qualifier_t qualifier)
641{
642 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
643 return aarch64_opnd_qualifiers[qualifier].data0;
644}
645
646static int
647get_upper_bound (aarch64_opnd_qualifier_t qualifier)
648{
649 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
650 return aarch64_opnd_qualifiers[qualifier].data1;
651}
652
653#ifdef DEBUG_AARCH64
654void
655aarch64_verbose (const char *str, ...)
656{
657 va_list ap;
658 va_start (ap, str);
659 printf ("#### ");
660 vprintf (str, ap);
661 printf ("\n");
662 va_end (ap);
663}
664
665static inline void
666dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
667{
668 int i;
669 printf ("#### \t");
670 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
671 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
672 printf ("\n");
673}
674
675static void
676dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
677 const aarch64_opnd_qualifier_t *qualifier)
678{
679 int i;
680 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
681
682 aarch64_verbose ("dump_match_qualifiers:");
683 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
684 curr[i] = opnd[i].qualifier;
685 dump_qualifier_sequence (curr);
686 aarch64_verbose ("against");
687 dump_qualifier_sequence (qualifier);
688}
689#endif /* DEBUG_AARCH64 */
690
691/* TODO improve this, we can have an extra field at the runtime to
692 store the number of operands rather than calculating it every time. */
693
694int
695aarch64_num_of_operands (const aarch64_opcode *opcode)
696{
697 int i = 0;
698 const enum aarch64_opnd *opnds = opcode->operands;
699 while (opnds[i++] != AARCH64_OPND_NIL)
700 ;
701 --i;
702 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
703 return i;
704}
705
706/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
707 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
708
709 N.B. on the entry, it is very likely that only some operands in *INST
710 have had their qualifiers been established.
711
712 If STOP_AT is not -1, the function will only try to match
713 the qualifier sequence for operands before and including the operand
714 of index STOP_AT; and on success *RET will only be filled with the first
715 (STOP_AT+1) qualifiers.
716
717 A couple examples of the matching algorithm:
718
719 X,W,NIL should match
720 X,W,NIL
721
722 NIL,NIL should match
723 X ,NIL
724
725 Apart from serving the main encoding routine, this can also be called
726 during or after the operand decoding. */
727
728int
729aarch64_find_best_match (const aarch64_inst *inst,
730 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
731 int stop_at, aarch64_opnd_qualifier_t *ret)
732{
733 int found = 0;
734 int i, num_opnds;
735 const aarch64_opnd_qualifier_t *qualifiers;
736
737 num_opnds = aarch64_num_of_operands (inst->opcode);
738 if (num_opnds == 0)
739 {
740 DEBUG_TRACE ("SUCCEED: no operand");
741 return 1;
742 }
743
744 if (stop_at < 0 || stop_at >= num_opnds)
745 stop_at = num_opnds - 1;
746
747 /* For each pattern. */
748 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
749 {
750 int j;
751 qualifiers = *qualifiers_list;
752
753 /* Start as positive. */
754 found = 1;
755
756 DEBUG_TRACE ("%d", i);
757#ifdef DEBUG_AARCH64
758 if (debug_dump)
759 dump_match_qualifiers (inst->operands, qualifiers);
760#endif
761
762 /* Most opcodes has much fewer patterns in the list.
763 First NIL qualifier indicates the end in the list. */
764 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
765 {
766 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
767 if (i)
768 found = 0;
769 break;
770 }
771
772 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
773 {
774 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
775 {
776 /* Either the operand does not have qualifier, or the qualifier
777 for the operand needs to be deduced from the qualifier
778 sequence.
779 In the latter case, any constraint checking related with
780 the obtained qualifier should be done later in
781 operand_general_constraint_met_p. */
782 continue;
783 }
784 else if (*qualifiers != inst->operands[j].qualifier)
785 {
786 /* Unless the target qualifier can also qualify the operand
787 (which has already had a non-nil qualifier), non-equal
788 qualifiers are generally un-matched. */
789 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
790 continue;
791 else
792 {
793 found = 0;
794 break;
795 }
796 }
797 else
798 continue; /* Equal qualifiers are certainly matched. */
799 }
800
801 /* Qualifiers established. */
802 if (found == 1)
803 break;
804 }
805
806 if (found == 1)
807 {
808 /* Fill the result in *RET. */
809 int j;
810 qualifiers = *qualifiers_list;
811
812 DEBUG_TRACE ("complete qualifiers using list %d", i);
813#ifdef DEBUG_AARCH64
814 if (debug_dump)
815 dump_qualifier_sequence (qualifiers);
816#endif
817
818 for (j = 0; j <= stop_at; ++j, ++qualifiers)
819 ret[j] = *qualifiers;
820 for (; j < AARCH64_MAX_OPND_NUM; ++j)
821 ret[j] = AARCH64_OPND_QLF_NIL;
822
823 DEBUG_TRACE ("SUCCESS");
824 return 1;
825 }
826
827 DEBUG_TRACE ("FAIL");
828 return 0;
829}
830
831/* Operand qualifier matching and resolving.
832
833 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
834 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
835
836 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
837 succeeds. */
838
839static int
840match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
841{
842 int i;
843 aarch64_opnd_qualifier_seq_t qualifiers;
844
845 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
846 qualifiers))
847 {
848 DEBUG_TRACE ("matching FAIL");
849 return 0;
850 }
851
852 /* Update the qualifiers. */
853 if (update_p == TRUE)
854 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
855 {
856 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
857 break;
858 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
859 "update %s with %s for operand %d",
860 aarch64_get_qualifier_name (inst->operands[i].qualifier),
861 aarch64_get_qualifier_name (qualifiers[i]), i);
862 inst->operands[i].qualifier = qualifiers[i];
863 }
864
865 DEBUG_TRACE ("matching SUCCESS");
866 return 1;
867}
868
869/* Return TRUE if VALUE is a wide constant that can be moved into a general
870 register by MOVZ.
871
872 IS32 indicates whether value is a 32-bit immediate or not.
873 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
874 amount will be returned in *SHIFT_AMOUNT. */
875
876bfd_boolean
877aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
878{
879 int amount;
880
881 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
882
883 if (is32)
884 {
885 /* Allow all zeros or all ones in top 32-bits, so that
886 32-bit constant expressions like ~0x80000000 are
887 permitted. */
888 uint64_t ext = value;
889 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
890 /* Immediate out of range. */
891 return FALSE;
892 value &= (int64_t) 0xffffffff;
893 }
894
895 /* first, try movz then movn */
896 amount = -1;
897 if ((value & ((int64_t) 0xffff << 0)) == value)
898 amount = 0;
899 else if ((value & ((int64_t) 0xffff << 16)) == value)
900 amount = 16;
901 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
902 amount = 32;
903 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
904 amount = 48;
905
906 if (amount == -1)
907 {
908 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
909 return FALSE;
910 }
911
912 if (shift_amount != NULL)
913 *shift_amount = amount;
914
915 DEBUG_TRACE ("exit TRUE with amount %d", amount);
916
917 return TRUE;
918}
919
920/* Build the accepted values for immediate logical SIMD instructions.
921
922 The standard encodings of the immediate value are:
923 N imms immr SIMD size R S
924 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
925 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
926 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
927 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
928 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
929 0 11110s 00000r 2 UInt(r) UInt(s)
930 where all-ones value of S is reserved.
931
932 Let's call E the SIMD size.
933
934 The immediate value is: S+1 bits '1' rotated to the right by R.
935
936 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
937 (remember S != E - 1). */
938
939#define TOTAL_IMM_NB 5334
940
941typedef struct
942{
943 uint64_t imm;
944 aarch64_insn encoding;
945} simd_imm_encoding;
946
947static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
948
949static int
950simd_imm_encoding_cmp(const void *i1, const void *i2)
951{
952 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
953 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
954
955 if (imm1->imm < imm2->imm)
956 return -1;
957 if (imm1->imm > imm2->imm)
958 return +1;
959 return 0;
960}
961
962/* immediate bitfield standard encoding
963 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
964 1 ssssss rrrrrr 64 rrrrrr ssssss
965 0 0sssss 0rrrrr 32 rrrrr sssss
966 0 10ssss 00rrrr 16 rrrr ssss
967 0 110sss 000rrr 8 rrr sss
968 0 1110ss 0000rr 4 rr ss
969 0 11110s 00000r 2 r s */
970static inline int
971encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
972{
973 return (is64 << 12) | (r << 6) | s;
974}
975
976static void
977build_immediate_table (void)
978{
979 uint32_t log_e, e, s, r, s_mask;
980 uint64_t mask, imm;
981 int nb_imms;
982 int is64;
983
984 nb_imms = 0;
985 for (log_e = 1; log_e <= 6; log_e++)
986 {
987 /* Get element size. */
988 e = 1u << log_e;
989 if (log_e == 6)
990 {
991 is64 = 1;
992 mask = 0xffffffffffffffffull;
993 s_mask = 0;
994 }
995 else
996 {
997 is64 = 0;
998 mask = (1ull << e) - 1;
999 /* log_e s_mask
1000 1 ((1 << 4) - 1) << 2 = 111100
1001 2 ((1 << 3) - 1) << 3 = 111000
1002 3 ((1 << 2) - 1) << 4 = 110000
1003 4 ((1 << 1) - 1) << 5 = 100000
1004 5 ((1 << 0) - 1) << 6 = 000000 */
1005 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1006 }
1007 for (s = 0; s < e - 1; s++)
1008 for (r = 0; r < e; r++)
1009 {
1010 /* s+1 consecutive bits to 1 (s < 63) */
1011 imm = (1ull << (s + 1)) - 1;
1012 /* rotate right by r */
1013 if (r != 0)
1014 imm = (imm >> r) | ((imm << (e - r)) & mask);
1015 /* replicate the constant depending on SIMD size */
1016 switch (log_e)
1017 {
1018 case 1: imm = (imm << 2) | imm;
1019 case 2: imm = (imm << 4) | imm;
1020 case 3: imm = (imm << 8) | imm;
1021 case 4: imm = (imm << 16) | imm;
1022 case 5: imm = (imm << 32) | imm;
1023 case 6: break;
1024 default: abort ();
1025 }
1026 simd_immediates[nb_imms].imm = imm;
1027 simd_immediates[nb_imms].encoding =
1028 encode_immediate_bitfield(is64, s | s_mask, r);
1029 nb_imms++;
1030 }
1031 }
1032 assert (nb_imms == TOTAL_IMM_NB);
1033 qsort(simd_immediates, nb_imms,
1034 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1035}
1036
1037/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1038 be accepted by logical (immediate) instructions
1039 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1040
1041 IS32 indicates whether or not VALUE is a 32-bit immediate.
1042 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1043 VALUE will be returned in *ENCODING. */
1044
1045bfd_boolean
1046aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1047{
1048 simd_imm_encoding imm_enc;
1049 const simd_imm_encoding *imm_encoding;
1050 static bfd_boolean initialized = FALSE;
1051
1052 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1053 value, is32);
1054
1055 if (initialized == FALSE)
1056 {
1057 build_immediate_table ();
1058 initialized = TRUE;
1059 }
1060
1061 if (is32)
1062 {
1063 /* Allow all zeros or all ones in top 32-bits, so that
1064 constant expressions like ~1 are permitted. */
1065 if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1066 return 0xffffffff;
1067 /* Replicate the 32 lower bits to the 32 upper bits. */
1068 value &= 0xffffffff;
1069 value |= value << 32;
1070 }
1071
1072 imm_enc.imm = value;
1073 imm_encoding = (const simd_imm_encoding *)
1074 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1075 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1076 if (imm_encoding == NULL)
1077 {
1078 DEBUG_TRACE ("exit with FALSE");
1079 return FALSE;
1080 }
1081 if (encoding != NULL)
1082 *encoding = imm_encoding->encoding;
1083 DEBUG_TRACE ("exit with TRUE");
1084 return TRUE;
1085}
1086
1087/* If 64-bit immediate IMM is in the format of
1088 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1089 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1090 of value "abcdefgh". Otherwise return -1. */
1091int
1092aarch64_shrink_expanded_imm8 (uint64_t imm)
1093{
1094 int i, ret;
1095 uint32_t byte;
1096
1097 ret = 0;
1098 for (i = 0; i < 8; i++)
1099 {
1100 byte = (imm >> (8 * i)) & 0xff;
1101 if (byte == 0xff)
1102 ret |= 1 << i;
1103 else if (byte != 0x00)
1104 return -1;
1105 }
1106 return ret;
1107}
1108
1109/* Utility inline functions for operand_general_constraint_met_p. */
1110
1111static inline void
1112set_error (aarch64_operand_error *mismatch_detail,
1113 enum aarch64_operand_error_kind kind, int idx,
1114 const char* error)
1115{
1116 if (mismatch_detail == NULL)
1117 return;
1118 mismatch_detail->kind = kind;
1119 mismatch_detail->index = idx;
1120 mismatch_detail->error = error;
1121}
1122
1123static inline void
1124set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1125 int idx, int lower_bound, int upper_bound,
1126 const char* error)
1127{
1128 if (mismatch_detail == NULL)
1129 return;
1130 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1131 mismatch_detail->data[0] = lower_bound;
1132 mismatch_detail->data[1] = upper_bound;
1133}
1134
1135static inline void
1136set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1137 int idx, int lower_bound, int upper_bound)
1138{
1139 if (mismatch_detail == NULL)
1140 return;
1141 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1142 _("immediate value"));
1143}
1144
1145static inline void
1146set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1147 int idx, int lower_bound, int upper_bound)
1148{
1149 if (mismatch_detail == NULL)
1150 return;
1151 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1152 _("immediate offset"));
1153}
1154
1155static inline void
1156set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1157 int idx, int lower_bound, int upper_bound)
1158{
1159 if (mismatch_detail == NULL)
1160 return;
1161 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1162 _("register number"));
1163}
1164
1165static inline void
1166set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1167 int idx, int lower_bound, int upper_bound)
1168{
1169 if (mismatch_detail == NULL)
1170 return;
1171 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1172 _("register element index"));
1173}
1174
1175static inline void
1176set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1177 int idx, int lower_bound, int upper_bound)
1178{
1179 if (mismatch_detail == NULL)
1180 return;
1181 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1182 _("shift amount"));
1183}
1184
1185static inline void
1186set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1187 int alignment)
1188{
1189 if (mismatch_detail == NULL)
1190 return;
1191 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1192 mismatch_detail->data[0] = alignment;
1193}
1194
1195static inline void
1196set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1197 int expected_num)
1198{
1199 if (mismatch_detail == NULL)
1200 return;
1201 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1202 mismatch_detail->data[0] = expected_num;
1203}
1204
1205static inline void
1206set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1207 const char* error)
1208{
1209 if (mismatch_detail == NULL)
1210 return;
1211 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1212}
1213
1214/* General constraint checking based on operand code.
1215
1216 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1217 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1218
1219 This function has to be called after the qualifiers for all operands
1220 have been resolved.
1221
1222 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1223 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1224 of error message during the disassembling where error message is not
1225 wanted. We avoid the dynamic construction of strings of error messages
1226 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1227 use a combination of error code, static string and some integer data to
1228 represent an error. */
1229
1230static int
1231operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1232 enum aarch64_opnd type,
1233 const aarch64_opcode *opcode,
1234 aarch64_operand_error *mismatch_detail)
1235{
1236 unsigned num;
1237 unsigned char size;
1238 int64_t imm;
1239 const aarch64_opnd_info *opnd = opnds + idx;
1240 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1241
1242 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1243
1244 switch (aarch64_operands[type].op_class)
1245 {
1246 case AARCH64_OPND_CLASS_INT_REG:
1247 /* <Xt> may be optional in some IC and TLBI instructions. */
1248 if (type == AARCH64_OPND_Rt_SYS)
1249 {
1250 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1251 == AARCH64_OPND_CLASS_SYSTEM));
1252 if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1253 {
1254 set_other_error (mismatch_detail, idx, _("extraneous register"));
1255 return 0;
1256 }
1257 if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1258 {
1259 set_other_error (mismatch_detail, idx, _("missing register"));
1260 return 0;
1261 }
1262 }
1263 switch (qualifier)
1264 {
1265 case AARCH64_OPND_QLF_WSP:
1266 case AARCH64_OPND_QLF_SP:
1267 if (!aarch64_stack_pointer_p (opnd))
1268 {
1269 set_other_error (mismatch_detail, idx,
1270 _("stack pointer register expected"));
1271 return 0;
1272 }
1273 break;
1274 default:
1275 break;
1276 }
1277 break;
1278
1279 case AARCH64_OPND_CLASS_ADDRESS:
1280 /* Check writeback. */
1281 switch (opcode->iclass)
1282 {
1283 case ldst_pos:
1284 case ldst_unscaled:
1285 case ldstnapair_offs:
1286 case ldstpair_off:
1287 case ldst_unpriv:
1288 if (opnd->addr.writeback == 1)
1289 {
1290 set_other_error (mismatch_detail, idx,
1291 _("unexpected address writeback"));
1292 return 0;
1293 }
1294 break;
1295 case ldst_imm9:
1296 case ldstpair_indexed:
1297 case asisdlsep:
1298 case asisdlsop:
1299 if (opnd->addr.writeback == 0)
1300 {
1301 set_other_error (mismatch_detail, idx,
1302 _("address writeback expected"));
1303 return 0;
1304 }
1305 break;
1306 default:
1307 assert (opnd->addr.writeback == 0);
1308 break;
1309 }
1310 switch (type)
1311 {
1312 case AARCH64_OPND_ADDR_SIMM7:
1313 /* Scaled signed 7 bits immediate offset. */
1314 /* Get the size of the data element that is accessed, which may be
1315 different from that of the source register size,
1316 e.g. in strb/ldrb. */
1317 size = aarch64_get_qualifier_esize (opnd->qualifier);
1318 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1319 {
1320 set_offset_out_of_range_error (mismatch_detail, idx,
1321 -64 * size, 63 * size);
1322 return 0;
1323 }
1324 if (!value_aligned_p (opnd->addr.offset.imm, size))
1325 {
1326 set_unaligned_error (mismatch_detail, idx, size);
1327 return 0;
1328 }
1329 break;
1330 case AARCH64_OPND_ADDR_SIMM9:
1331 /* Unscaled signed 9 bits immediate offset. */
1332 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1333 {
1334 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1335 return 0;
1336 }
1337 break;
1338
1339 case AARCH64_OPND_ADDR_SIMM9_2:
1340 /* Unscaled signed 9 bits immediate offset, which has to be negative
1341 or unaligned. */
1342 size = aarch64_get_qualifier_esize (qualifier);
1343 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1344 && !value_aligned_p (opnd->addr.offset.imm, size))
1345 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1346 return 1;
1347 set_other_error (mismatch_detail, idx,
1348 _("negative or unaligned offset expected"));
1349 return 0;
1350
1351 case AARCH64_OPND_SIMD_ADDR_POST:
1352 /* AdvSIMD load/store multiple structures, post-index. */
1353 assert (idx == 1);
1354 if (opnd->addr.offset.is_reg)
1355 {
1356 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1357 return 1;
1358 else
1359 {
1360 set_other_error (mismatch_detail, idx,
1361 _("invalid register offset"));
1362 return 0;
1363 }
1364 }
1365 else
1366 {
1367 const aarch64_opnd_info *prev = &opnds[idx-1];
1368 unsigned num_bytes; /* total number of bytes transferred. */
1369 /* The opcode dependent area stores the number of elements in
1370 each structure to be loaded/stored. */
1371 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1372 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1373 /* Special handling of loading single structure to all lane. */
1374 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1375 * aarch64_get_qualifier_esize (prev->qualifier);
1376 else
1377 num_bytes = prev->reglist.num_regs
1378 * aarch64_get_qualifier_esize (prev->qualifier)
1379 * aarch64_get_qualifier_nelem (prev->qualifier);
1380 if ((int) num_bytes != opnd->addr.offset.imm)
1381 {
1382 set_other_error (mismatch_detail, idx,
1383 _("invalid post-increment amount"));
1384 return 0;
1385 }
1386 }
1387 break;
1388
1389 case AARCH64_OPND_ADDR_REGOFF:
1390 /* Get the size of the data element that is accessed, which may be
1391 different from that of the source register size,
1392 e.g. in strb/ldrb. */
1393 size = aarch64_get_qualifier_esize (opnd->qualifier);
1394 /* It is either no shift or shift by the binary logarithm of SIZE. */
1395 if (opnd->shifter.amount != 0
1396 && opnd->shifter.amount != (int)get_logsz (size))
1397 {
1398 set_other_error (mismatch_detail, idx,
1399 _("invalid shift amount"));
1400 return 0;
1401 }
1402 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1403 operators. */
1404 switch (opnd->shifter.kind)
1405 {
1406 case AARCH64_MOD_UXTW:
1407 case AARCH64_MOD_LSL:
1408 case AARCH64_MOD_SXTW:
1409 case AARCH64_MOD_SXTX: break;
1410 default:
1411 set_other_error (mismatch_detail, idx,
1412 _("invalid extend/shift operator"));
1413 return 0;
1414 }
1415 break;
1416
1417 case AARCH64_OPND_ADDR_UIMM12:
1418 imm = opnd->addr.offset.imm;
1419 /* Get the size of the data element that is accessed, which may be
1420 different from that of the source register size,
1421 e.g. in strb/ldrb. */
1422 size = aarch64_get_qualifier_esize (qualifier);
1423 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1424 {
1425 set_offset_out_of_range_error (mismatch_detail, idx,
1426 0, 4095 * size);
1427 return 0;
1428 }
9de794e1 1429 if (!value_aligned_p (opnd->addr.offset.imm, size))
a06ea964
NC
1430 {
1431 set_unaligned_error (mismatch_detail, idx, size);
1432 return 0;
1433 }
1434 break;
1435
1436 case AARCH64_OPND_ADDR_PCREL14:
1437 case AARCH64_OPND_ADDR_PCREL19:
1438 case AARCH64_OPND_ADDR_PCREL21:
1439 case AARCH64_OPND_ADDR_PCREL26:
1440 imm = opnd->imm.value;
1441 if (operand_need_shift_by_two (get_operand_from_code (type)))
1442 {
1443 /* The offset value in a PC-relative branch instruction is alway
1444 4-byte aligned and is encoded without the lowest 2 bits. */
1445 if (!value_aligned_p (imm, 4))
1446 {
1447 set_unaligned_error (mismatch_detail, idx, 4);
1448 return 0;
1449 }
1450 /* Right shift by 2 so that we can carry out the following check
1451 canonically. */
1452 imm >>= 2;
1453 }
1454 size = get_operand_fields_width (get_operand_from_code (type));
1455 if (!value_fit_signed_field_p (imm, size))
1456 {
1457 set_other_error (mismatch_detail, idx,
1458 _("immediate out of range"));
1459 return 0;
1460 }
1461 break;
1462
1463 default:
1464 break;
1465 }
1466 break;
1467
1468 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1469 /* The opcode dependent area stores the number of elements in
1470 each structure to be loaded/stored. */
1471 num = get_opcode_dependent_value (opcode);
1472 switch (type)
1473 {
1474 case AARCH64_OPND_LVt:
1475 assert (num >= 1 && num <= 4);
1476 /* Unless LD1/ST1, the number of registers should be equal to that
1477 of the structure elements. */
1478 if (num != 1 && opnd->reglist.num_regs != num)
1479 {
1480 set_reg_list_error (mismatch_detail, idx, num);
1481 return 0;
1482 }
1483 break;
1484 case AARCH64_OPND_LVt_AL:
1485 case AARCH64_OPND_LEt:
1486 assert (num >= 1 && num <= 4);
1487 /* The number of registers should be equal to that of the structure
1488 elements. */
1489 if (opnd->reglist.num_regs != num)
1490 {
1491 set_reg_list_error (mismatch_detail, idx, num);
1492 return 0;
1493 }
1494 break;
1495 default:
1496 break;
1497 }
1498 break;
1499
1500 case AARCH64_OPND_CLASS_IMMEDIATE:
1501 /* Constraint check on immediate operand. */
1502 imm = opnd->imm.value;
1503 /* E.g. imm_0_31 constrains value to be 0..31. */
1504 if (qualifier_value_in_range_constraint_p (qualifier)
1505 && !value_in_range_p (imm, get_lower_bound (qualifier),
1506 get_upper_bound (qualifier)))
1507 {
1508 set_imm_out_of_range_error (mismatch_detail, idx,
1509 get_lower_bound (qualifier),
1510 get_upper_bound (qualifier));
1511 return 0;
1512 }
1513
1514 switch (type)
1515 {
1516 case AARCH64_OPND_AIMM:
1517 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1518 {
1519 set_other_error (mismatch_detail, idx,
1520 _("invalid shift operator"));
1521 return 0;
1522 }
1523 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1524 {
1525 set_other_error (mismatch_detail, idx,
1526 _("shift amount expected to be 0 or 12"));
1527 return 0;
1528 }
1529 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1530 {
1531 set_other_error (mismatch_detail, idx,
1532 _("immediate out of range"));
1533 return 0;
1534 }
1535 break;
1536
1537 case AARCH64_OPND_HALF:
1538 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1539 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1540 {
1541 set_other_error (mismatch_detail, idx,
1542 _("invalid shift operator"));
1543 return 0;
1544 }
1545 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1546 if (!value_aligned_p (opnd->shifter.amount, 16))
1547 {
1548 set_other_error (mismatch_detail, idx,
1549 _("shift amount should be a multiple of 16"));
1550 return 0;
1551 }
1552 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1553 {
1554 set_sft_amount_out_of_range_error (mismatch_detail, idx,
1555 0, size * 8 - 16);
1556 return 0;
1557 }
1558 if (opnd->imm.value < 0)
1559 {
1560 set_other_error (mismatch_detail, idx,
1561 _("negative immediate value not allowed"));
1562 return 0;
1563 }
1564 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1565 {
1566 set_other_error (mismatch_detail, idx,
1567 _("immediate out of range"));
1568 return 0;
1569 }
1570 break;
1571
1572 case AARCH64_OPND_IMM_MOV:
1573 {
1574 int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1575 imm = opnd->imm.value;
1576 assert (idx == 1);
1577 switch (opcode->op)
1578 {
1579 case OP_MOV_IMM_WIDEN:
1580 imm = ~imm;
1581 /* Fall through... */
1582 case OP_MOV_IMM_WIDE:
1583 if (!aarch64_wide_constant_p (imm, is32, NULL))
1584 {
1585 set_other_error (mismatch_detail, idx,
1586 _("immediate out of range"));
1587 return 0;
1588 }
1589 break;
1590 case OP_MOV_IMM_LOG:
1591 if (!aarch64_logical_immediate_p (imm, is32, NULL))
1592 {
1593 set_other_error (mismatch_detail, idx,
1594 _("immediate out of range"));
1595 return 0;
1596 }
1597 break;
1598 default:
1599 assert (0);
1600 return 0;
1601 }
1602 }
1603 break;
1604
1605 case AARCH64_OPND_NZCV:
1606 case AARCH64_OPND_CCMP_IMM:
1607 case AARCH64_OPND_EXCEPTION:
1608 case AARCH64_OPND_UIMM4:
1609 case AARCH64_OPND_UIMM7:
1610 case AARCH64_OPND_UIMM3_OP1:
1611 case AARCH64_OPND_UIMM3_OP2:
1612 size = get_operand_fields_width (get_operand_from_code (type));
1613 assert (size < 32);
1614 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1615 {
1616 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1617 (1 << size) - 1);
1618 return 0;
1619 }
1620 break;
1621
1622 case AARCH64_OPND_WIDTH:
1623 assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1624 && opnds[0].type == AARCH64_OPND_Rd);
1625 size = get_upper_bound (qualifier);
1626 if (opnd->imm.value + opnds[idx-1].imm.value > size)
1627 /* lsb+width <= reg.size */
1628 {
1629 set_imm_out_of_range_error (mismatch_detail, idx, 1,
1630 size - opnds[idx-1].imm.value);
1631 return 0;
1632 }
1633 break;
1634
1635 case AARCH64_OPND_LIMM:
1636 {
1637 int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1638 uint64_t uimm = opnd->imm.value;
1639 if (opcode->op == OP_BIC)
1640 uimm = ~uimm;
1641 if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1642 {
1643 set_other_error (mismatch_detail, idx,
1644 _("immediate out of range"));
1645 return 0;
1646 }
1647 }
1648 break;
1649
1650 case AARCH64_OPND_IMM0:
1651 case AARCH64_OPND_FPIMM0:
1652 if (opnd->imm.value != 0)
1653 {
1654 set_other_error (mismatch_detail, idx,
1655 _("immediate zero expected"));
1656 return 0;
1657 }
1658 break;
1659
1660 case AARCH64_OPND_SHLL_IMM:
1661 assert (idx == 2);
1662 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1663 if (opnd->imm.value != size)
1664 {
1665 set_other_error (mismatch_detail, idx,
1666 _("invalid shift amount"));
1667 return 0;
1668 }
1669 break;
1670
1671 case AARCH64_OPND_IMM_VLSL:
1672 size = aarch64_get_qualifier_esize (qualifier);
1673 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1674 {
1675 set_imm_out_of_range_error (mismatch_detail, idx, 0,
1676 size * 8 - 1);
1677 return 0;
1678 }
1679 break;
1680
1681 case AARCH64_OPND_IMM_VLSR:
1682 size = aarch64_get_qualifier_esize (qualifier);
1683 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1684 {
1685 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1686 return 0;
1687 }
1688 break;
1689
1690 case AARCH64_OPND_SIMD_IMM:
1691 case AARCH64_OPND_SIMD_IMM_SFT:
1692 /* Qualifier check. */
1693 switch (qualifier)
1694 {
1695 case AARCH64_OPND_QLF_LSL:
1696 if (opnd->shifter.kind != AARCH64_MOD_LSL)
1697 {
1698 set_other_error (mismatch_detail, idx,
1699 _("invalid shift operator"));
1700 return 0;
1701 }
1702 break;
1703 case AARCH64_OPND_QLF_MSL:
1704 if (opnd->shifter.kind != AARCH64_MOD_MSL)
1705 {
1706 set_other_error (mismatch_detail, idx,
1707 _("invalid shift operator"));
1708 return 0;
1709 }
1710 break;
1711 case AARCH64_OPND_QLF_NIL:
1712 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1713 {
1714 set_other_error (mismatch_detail, idx,
1715 _("shift is not permitted"));
1716 return 0;
1717 }
1718 break;
1719 default:
1720 assert (0);
1721 return 0;
1722 }
1723 /* Is the immediate valid? */
1724 assert (idx == 1);
1725 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1726 {
1727 /* uimm8 */
1728 if (!value_in_range_p (opnd->imm.value, 0, 255))
1729 {
1730 set_imm_out_of_range_error (mismatch_detail, idx, 0, 255);
1731 return 0;
1732 }
1733 }
1734 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1735 {
1736 /* uimm64 is not
1737 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1738 ffffffffgggggggghhhhhhhh'. */
1739 set_other_error (mismatch_detail, idx,
1740 _("invalid value for immediate"));
1741 return 0;
1742 }
1743 /* Is the shift amount valid? */
1744 switch (opnd->shifter.kind)
1745 {
1746 case AARCH64_MOD_LSL:
1747 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
f5555712 1748 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
a06ea964 1749 {
f5555712
YZ
1750 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1751 (size - 1) * 8);
a06ea964
NC
1752 return 0;
1753 }
f5555712 1754 if (!value_aligned_p (opnd->shifter.amount, 8))
a06ea964 1755 {
f5555712 1756 set_unaligned_error (mismatch_detail, idx, 8);
a06ea964
NC
1757 return 0;
1758 }
1759 break;
1760 case AARCH64_MOD_MSL:
1761 /* Only 8 and 16 are valid shift amount. */
1762 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1763 {
1764 set_other_error (mismatch_detail, idx,
1765 _("shift amount expected to be 0 or 16"));
1766 return 0;
1767 }
1768 break;
1769 default:
1770 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1771 {
1772 set_other_error (mismatch_detail, idx,
1773 _("invalid shift operator"));
1774 return 0;
1775 }
1776 break;
1777 }
1778 break;
1779
1780 case AARCH64_OPND_FPIMM:
1781 case AARCH64_OPND_SIMD_FPIMM:
1782 if (opnd->imm.is_fp == 0)
1783 {
1784 set_other_error (mismatch_detail, idx,
1785 _("floating-point immediate expected"));
1786 return 0;
1787 }
1788 /* The value is expected to be an 8-bit floating-point constant with
1789 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1790 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1791 instruction). */
1792 if (!value_in_range_p (opnd->imm.value, 0, 255))
1793 {
1794 set_other_error (mismatch_detail, idx,
1795 _("immediate out of range"));
1796 return 0;
1797 }
1798 if (opnd->shifter.kind != AARCH64_MOD_NONE)
1799 {
1800 set_other_error (mismatch_detail, idx,
1801 _("invalid shift operator"));
1802 return 0;
1803 }
1804 break;
1805
1806 default:
1807 break;
1808 }
1809 break;
1810
1811 case AARCH64_OPND_CLASS_CP_REG:
1812 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1813 valid range: C0 - C15. */
1814 if (opnd->reg.regno > 15)
1815 {
1816 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1817 return 0;
1818 }
1819 break;
1820
1821 case AARCH64_OPND_CLASS_SYSTEM:
1822 switch (type)
1823 {
1824 case AARCH64_OPND_PSTATEFIELD:
1825 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1826 /* MSR SPSel, #uimm4
1827 Uses uimm4 as a control value to select the stack pointer: if
1828 bit 0 is set it selects the current exception level's stack
1829 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1830 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1831 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1832 {
1833 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1834 return 0;
1835 }
1836 break;
1837 default:
1838 break;
1839 }
1840 break;
1841
1842 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1843 /* Get the upper bound for the element index. */
1844 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1845 /* Index out-of-range. */
1846 if (!value_in_range_p (opnd->reglane.index, 0, num))
1847 {
1848 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1849 return 0;
1850 }
1851 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1852 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1853 number is encoded in "size:M:Rm":
1854 size <Vm>
1855 00 RESERVED
1856 01 0:Rm
1857 10 M:Rm
1858 11 RESERVED */
1859 if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1860 && !value_in_range_p (opnd->reglane.regno, 0, 15))
1861 {
1862 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1863 return 0;
1864 }
1865 break;
1866
1867 case AARCH64_OPND_CLASS_MODIFIED_REG:
1868 assert (idx == 1 || idx == 2);
1869 switch (type)
1870 {
1871 case AARCH64_OPND_Rm_EXT:
1872 if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1873 && opnd->shifter.kind != AARCH64_MOD_LSL)
1874 {
1875 set_other_error (mismatch_detail, idx,
1876 _("extend operator expected"));
1877 return 0;
1878 }
1879 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1880 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1881 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1882 case. */
1883 if (!aarch64_stack_pointer_p (opnds + 0)
1884 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1885 {
1886 if (!opnd->shifter.operator_present)
1887 {
1888 set_other_error (mismatch_detail, idx,
1889 _("missing extend operator"));
1890 return 0;
1891 }
1892 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1893 {
1894 set_other_error (mismatch_detail, idx,
1895 _("'LSL' operator not allowed"));
1896 return 0;
1897 }
1898 }
1899 assert (opnd->shifter.operator_present /* Default to LSL. */
1900 || opnd->shifter.kind == AARCH64_MOD_LSL);
1901 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1902 {
1903 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1904 return 0;
1905 }
1906 /* In the 64-bit form, the final register operand is written as Wm
1907 for all but the (possibly omitted) UXTX/LSL and SXTX
1908 operators.
1909 N.B. GAS allows X register to be used with any operator as a
1910 programming convenience. */
1911 if (qualifier == AARCH64_OPND_QLF_X
1912 && opnd->shifter.kind != AARCH64_MOD_LSL
1913 && opnd->shifter.kind != AARCH64_MOD_UXTX
1914 && opnd->shifter.kind != AARCH64_MOD_SXTX)
1915 {
1916 set_other_error (mismatch_detail, idx, _("W register expected"));
1917 return 0;
1918 }
1919 break;
1920
1921 case AARCH64_OPND_Rm_SFT:
1922 /* ROR is not available to the shifted register operand in
1923 arithmetic instructions. */
1924 if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1925 {
1926 set_other_error (mismatch_detail, idx,
1927 _("shift operator expected"));
1928 return 0;
1929 }
1930 if (opnd->shifter.kind == AARCH64_MOD_ROR
1931 && opcode->iclass != log_shift)
1932 {
1933 set_other_error (mismatch_detail, idx,
1934 _("'ROR' operator not allowed"));
1935 return 0;
1936 }
1937 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1938 if (!value_in_range_p (opnd->shifter.amount, 0, num))
1939 {
1940 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1941 return 0;
1942 }
1943 break;
1944
1945 default:
1946 break;
1947 }
1948 break;
1949
1950 default:
1951 break;
1952 }
1953
1954 return 1;
1955}
1956
1957/* Main entrypoint for the operand constraint checking.
1958
1959 Return 1 if operands of *INST meet the constraint applied by the operand
1960 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
1961 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
1962 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
1963 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
1964 error kind when it is notified that an instruction does not pass the check).
1965
1966 Un-determined operand qualifiers may get established during the process. */
1967
1968int
1969aarch64_match_operands_constraint (aarch64_inst *inst,
1970 aarch64_operand_error *mismatch_detail)
1971{
1972 int i;
1973
1974 DEBUG_TRACE ("enter");
1975
1976 /* Match operands' qualifier.
1977 *INST has already had qualifier establish for some, if not all, of
1978 its operands; we need to find out whether these established
1979 qualifiers match one of the qualifier sequence in
1980 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
1981 with the corresponding qualifier in such a sequence.
1982 Only basic operand constraint checking is done here; the more thorough
1983 constraint checking will carried out by operand_general_constraint_met_p,
1984 which has be to called after this in order to get all of the operands'
1985 qualifiers established. */
1986 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
1987 {
1988 DEBUG_TRACE ("FAIL on operand qualifier matching");
1989 if (mismatch_detail)
1990 {
1991 /* Return an error type to indicate that it is the qualifier
1992 matching failure; we don't care about which operand as there
1993 are enough information in the opcode table to reproduce it. */
1994 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
1995 mismatch_detail->index = -1;
1996 mismatch_detail->error = NULL;
1997 }
1998 return 0;
1999 }
2000
2001 /* Match operands' constraint. */
2002 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2003 {
2004 enum aarch64_opnd type = inst->opcode->operands[i];
2005 if (type == AARCH64_OPND_NIL)
2006 break;
2007 if (inst->operands[i].skip)
2008 {
2009 DEBUG_TRACE ("skip the incomplete operand %d", i);
2010 continue;
2011 }
2012 if (operand_general_constraint_met_p (inst->operands, i, type,
2013 inst->opcode, mismatch_detail) == 0)
2014 {
2015 DEBUG_TRACE ("FAIL on operand %d", i);
2016 return 0;
2017 }
2018 }
2019
2020 DEBUG_TRACE ("PASS");
2021
2022 return 1;
2023}
2024
2025/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2026 Also updates the TYPE of each INST->OPERANDS with the corresponding
2027 value of OPCODE->OPERANDS.
2028
2029 Note that some operand qualifiers may need to be manually cleared by
2030 the caller before it further calls the aarch64_opcode_encode; by
2031 doing this, it helps the qualifier matching facilities work
2032 properly. */
2033
2034const aarch64_opcode*
2035aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2036{
2037 int i;
2038 const aarch64_opcode *old = inst->opcode;
2039
2040 inst->opcode = opcode;
2041
2042 /* Update the operand types. */
2043 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2044 {
2045 inst->operands[i].type = opcode->operands[i];
2046 if (opcode->operands[i] == AARCH64_OPND_NIL)
2047 break;
2048 }
2049
2050 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2051
2052 return old;
2053}
2054
2055int
2056aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2057{
2058 int i;
2059 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2060 if (operands[i] == operand)
2061 return i;
2062 else if (operands[i] == AARCH64_OPND_NIL)
2063 break;
2064 return -1;
2065}
2066\f
2067/* [0][0] 32-bit integer regs with sp Wn
2068 [0][1] 64-bit integer regs with sp Xn sf=1
2069 [1][0] 32-bit integer regs with #0 Wn
2070 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2071static const char *int_reg[2][2][32] = {
2072#define R32 "w"
2073#define R64 "x"
2074 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2075 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2076 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2077 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", "wsp" },
2078 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2079 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2080 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2081 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", "sp" } },
2082 { { R32 "0", R32 "1", R32 "2", R32 "3", R32 "4", R32 "5", R32 "6", R32 "7",
2083 R32 "8", R32 "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2084 R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2085 R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2086 { R64 "0", R64 "1", R64 "2", R64 "3", R64 "4", R64 "5", R64 "6", R64 "7",
2087 R64 "8", R64 "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2088 R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2089 R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2090#undef R64
2091#undef R32
2092};
2093
2094/* Return the integer register name.
2095 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2096
2097static inline const char *
2098get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2099{
2100 const int has_zr = sp_reg_p ? 0 : 1;
2101 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2102 return int_reg[has_zr][is_64][regno];
2103}
2104
2105/* Like get_int_reg_name, but IS_64 is always 1. */
2106
2107static inline const char *
2108get_64bit_int_reg_name (int regno, int sp_reg_p)
2109{
2110 const int has_zr = sp_reg_p ? 0 : 1;
2111 return int_reg[has_zr][1][regno];
2112}
2113
2114/* Types for expanding an encoded 8-bit value to a floating-point value. */
2115
2116typedef union
2117{
2118 uint64_t i;
2119 double d;
2120} double_conv_t;
2121
2122typedef union
2123{
2124 uint32_t i;
2125 float f;
2126} single_conv_t;
2127
2128/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2129 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2130 (depending on the type of the instruction). IMM8 will be expanded to a
2131 single-precision floating-point value (IS_DP == 0) or a double-precision
2132 floating-point value (IS_DP == 1). The expanded value is returned. */
2133
2134static uint64_t
2135expand_fp_imm (int is_dp, uint32_t imm8)
2136{
2137 uint64_t imm;
2138 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2139
2140 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2141 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2142 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2143 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2144 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2145 if (is_dp)
2146 {
2147 imm = (imm8_7 << (63-32)) /* imm8<7> */
2148 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2149 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2150 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2151 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2152 imm <<= 32;
2153 }
2154 else
2155 {
2156 imm = (imm8_7 << 31) /* imm8<7> */
2157 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2158 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2159 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2160 }
2161
2162 return imm;
2163}
2164
2165/* Produce the string representation of the register list operand *OPND
2166 in the buffer pointed by BUF of size SIZE. */
2167static void
2168print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2169{
2170 const int num_regs = opnd->reglist.num_regs;
2171 const int first_reg = opnd->reglist.first_regno;
2172 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2173 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2174 char tb[8]; /* Temporary buffer. */
2175
2176 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2177 assert (num_regs >= 1 && num_regs <= 4);
2178
2179 /* Prepare the index if any. */
2180 if (opnd->reglist.has_index)
2181 snprintf (tb, 8, "[%d]", opnd->reglist.index);
2182 else
2183 tb[0] = '\0';
2184
2185 /* The hyphenated form is preferred for disassembly if there are
2186 more than two registers in the list, and the register numbers
2187 are monotonically increasing in increments of one. */
2188 if (num_regs > 2 && last_reg > first_reg)
2189 snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2190 last_reg, qlf_name, tb);
2191 else
2192 {
2193 const int reg0 = first_reg;
2194 const int reg1 = (first_reg + 1) & 0x1f;
2195 const int reg2 = (first_reg + 2) & 0x1f;
2196 const int reg3 = (first_reg + 3) & 0x1f;
2197
2198 switch (num_regs)
2199 {
2200 case 1:
2201 snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2202 break;
2203 case 2:
2204 snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2205 reg1, qlf_name, tb);
2206 break;
2207 case 3:
2208 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2209 reg1, qlf_name, reg2, qlf_name, tb);
2210 break;
2211 case 4:
2212 snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2213 reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2214 reg3, qlf_name, tb);
2215 break;
2216 }
2217 }
2218}
2219
2220/* Produce the string representation of the register offset address operand
2221 *OPND in the buffer pointed by BUF of size SIZE. */
2222static void
2223print_register_offset_address (char *buf, size_t size,
2224 const aarch64_opnd_info *opnd)
2225{
2226 const size_t tblen = 16;
2227 char tb[tblen]; /* Temporary buffer. */
2228 bfd_boolean lsl_p = FALSE; /* Is LSL shift operator? */
2229 bfd_boolean wm_p = FALSE; /* Should Rm be Wm? */
2230 bfd_boolean print_extend_p = TRUE;
2231 bfd_boolean print_amount_p = TRUE;
2232 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2233
2234 switch (opnd->shifter.kind)
2235 {
2236 case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2237 case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2238 case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2239 case AARCH64_MOD_SXTX: break;
2240 default: assert (0);
2241 }
2242
2243 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2244 || !opnd->shifter.amount_present))
2245 {
2246 /* Not print the shift/extend amount when the amount is zero and
2247 when it is not the special case of 8-bit load/store instruction. */
2248 print_amount_p = FALSE;
2249 /* Likewise, no need to print the shift operator LSL in such a
2250 situation. */
2251 if (lsl_p)
2252 print_extend_p = FALSE;
2253 }
2254
2255 /* Prepare for the extend/shift. */
2256 if (print_extend_p)
2257 {
2258 if (print_amount_p)
2259 snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2260 else
2261 snprintf (tb, tblen, ",%s", shift_name);
2262 }
2263 else
2264 tb[0] = '\0';
2265
2266 snprintf (buf, size, "[%s,%c%d%s]",
2267 get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2268 wm_p ? 'w' : 'x', opnd->addr.offset.regno, tb);
2269}
2270
2271/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2272 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2273 PC, PCREL_P and ADDRESS are used to pass in and return information about
2274 the PC-relative address calculation, where the PC value is passed in
2275 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2276 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2277 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2278
2279 The function serves both the disassembler and the assembler diagnostics
2280 issuer, which is the reason why it lives in this file. */
2281
2282void
2283aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2284 const aarch64_opcode *opcode,
2285 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2286 bfd_vma *address)
2287{
2288 int i;
2289 const char *name = NULL;
2290 const aarch64_opnd_info *opnd = opnds + idx;
2291 enum aarch64_modifier_kind kind;
2292 uint64_t addr;
2293
2294 buf[0] = '\0';
2295 if (pcrel_p)
2296 *pcrel_p = 0;
2297
2298 switch (opnd->type)
2299 {
2300 case AARCH64_OPND_Rd:
2301 case AARCH64_OPND_Rn:
2302 case AARCH64_OPND_Rm:
2303 case AARCH64_OPND_Rt:
2304 case AARCH64_OPND_Rt2:
2305 case AARCH64_OPND_Rs:
2306 case AARCH64_OPND_Ra:
2307 case AARCH64_OPND_Rt_SYS:
2308 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2309 the <ic_op>, therefore we we use opnd->present to override the
2310 generic optional-ness information. */
2311 if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2312 break;
2313 /* Omit the operand, e.g. RET. */
2314 if (optional_operand_p (opcode, idx)
2315 && opnd->reg.regno == get_optional_operand_default_value (opcode))
2316 break;
2317 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2318 || opnd->qualifier == AARCH64_OPND_QLF_X);
2319 snprintf (buf, size, "%s",
2320 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2321 break;
2322
2323 case AARCH64_OPND_Rd_SP:
2324 case AARCH64_OPND_Rn_SP:
2325 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2326 || opnd->qualifier == AARCH64_OPND_QLF_WSP
2327 || opnd->qualifier == AARCH64_OPND_QLF_X
2328 || opnd->qualifier == AARCH64_OPND_QLF_SP);
2329 snprintf (buf, size, "%s",
2330 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2331 break;
2332
2333 case AARCH64_OPND_Rm_EXT:
2334 kind = opnd->shifter.kind;
2335 assert (idx == 1 || idx == 2);
2336 if ((aarch64_stack_pointer_p (opnds)
2337 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2338 && ((opnd->qualifier == AARCH64_OPND_QLF_W
2339 && opnds[0].qualifier == AARCH64_OPND_QLF_W
2340 && kind == AARCH64_MOD_UXTW)
2341 || (opnd->qualifier == AARCH64_OPND_QLF_X
2342 && kind == AARCH64_MOD_UXTX)))
2343 {
2344 /* 'LSL' is the preferred form in this case. */
2345 kind = AARCH64_MOD_LSL;
2346 if (opnd->shifter.amount == 0)
2347 {
2348 /* Shifter omitted. */
2349 snprintf (buf, size, "%s",
2350 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2351 break;
2352 }
2353 }
2354 if (opnd->shifter.amount)
2355 snprintf (buf, size, "%s, %s #%d",
2356 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2357 aarch64_operand_modifiers[kind].name,
2358 opnd->shifter.amount);
2359 else
2360 snprintf (buf, size, "%s, %s",
2361 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2362 aarch64_operand_modifiers[kind].name);
2363 break;
2364
2365 case AARCH64_OPND_Rm_SFT:
2366 assert (opnd->qualifier == AARCH64_OPND_QLF_W
2367 || opnd->qualifier == AARCH64_OPND_QLF_X);
2368 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2369 snprintf (buf, size, "%s",
2370 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2371 else
2372 snprintf (buf, size, "%s, %s #%d",
2373 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2374 aarch64_operand_modifiers[opnd->shifter.kind].name,
2375 opnd->shifter.amount);
2376 break;
2377
2378 case AARCH64_OPND_Fd:
2379 case AARCH64_OPND_Fn:
2380 case AARCH64_OPND_Fm:
2381 case AARCH64_OPND_Fa:
2382 case AARCH64_OPND_Ft:
2383 case AARCH64_OPND_Ft2:
2384 case AARCH64_OPND_Sd:
2385 case AARCH64_OPND_Sn:
2386 case AARCH64_OPND_Sm:
2387 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2388 opnd->reg.regno);
2389 break;
2390
2391 case AARCH64_OPND_Vd:
2392 case AARCH64_OPND_Vn:
2393 case AARCH64_OPND_Vm:
2394 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2395 aarch64_get_qualifier_name (opnd->qualifier));
2396 break;
2397
2398 case AARCH64_OPND_Ed:
2399 case AARCH64_OPND_En:
2400 case AARCH64_OPND_Em:
2401 snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2402 aarch64_get_qualifier_name (opnd->qualifier),
2403 opnd->reglane.index);
2404 break;
2405
2406 case AARCH64_OPND_VdD1:
2407 case AARCH64_OPND_VnD1:
2408 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2409 break;
2410
2411 case AARCH64_OPND_LVn:
2412 case AARCH64_OPND_LVt:
2413 case AARCH64_OPND_LVt_AL:
2414 case AARCH64_OPND_LEt:
2415 print_register_list (buf, size, opnd);
2416 break;
2417
2418 case AARCH64_OPND_Cn:
2419 case AARCH64_OPND_Cm:
2420 snprintf (buf, size, "C%d", opnd->reg.regno);
2421 break;
2422
2423 case AARCH64_OPND_IDX:
2424 case AARCH64_OPND_IMM:
2425 case AARCH64_OPND_WIDTH:
2426 case AARCH64_OPND_UIMM3_OP1:
2427 case AARCH64_OPND_UIMM3_OP2:
2428 case AARCH64_OPND_BIT_NUM:
2429 case AARCH64_OPND_IMM_VLSL:
2430 case AARCH64_OPND_IMM_VLSR:
2431 case AARCH64_OPND_SHLL_IMM:
2432 case AARCH64_OPND_IMM0:
2433 case AARCH64_OPND_IMMR:
2434 case AARCH64_OPND_IMMS:
2435 case AARCH64_OPND_FBITS:
a06ea964
NC
2436 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2437 break;
2438
fb098a1e
YZ
2439 case AARCH64_OPND_IMM_MOV:
2440 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2441 {
2442 case 4: /* e.g. MOV Wd, #<imm32>. */
2443 {
2444 int imm32 = opnd->imm.value;
2445 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2446 }
2447 break;
2448 case 8: /* e.g. MOV Xd, #<imm64>. */
2449 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2450 opnd->imm.value, opnd->imm.value);
2451 break;
2452 default: assert (0);
2453 }
2454 break;
2455
a06ea964
NC
2456 case AARCH64_OPND_FPIMM0:
2457 snprintf (buf, size, "#0.0");
2458 break;
2459
2460 case AARCH64_OPND_LIMM:
2461 case AARCH64_OPND_AIMM:
2462 case AARCH64_OPND_HALF:
2463 if (opnd->shifter.amount)
2464 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2465 opnd->shifter.amount);
2466 else
2467 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2468 break;
2469
2470 case AARCH64_OPND_SIMD_IMM:
2471 case AARCH64_OPND_SIMD_IMM_SFT:
2472 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2473 || opnd->shifter.kind == AARCH64_MOD_NONE)
2474 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2475 else
2476 snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2477 aarch64_operand_modifiers[opnd->shifter.kind].name,
2478 opnd->shifter.amount);
2479 break;
2480
2481 case AARCH64_OPND_FPIMM:
2482 case AARCH64_OPND_SIMD_FPIMM:
2483 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2484 {
2485 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2486 {
2487 single_conv_t c;
2488 c.i = expand_fp_imm (0, opnd->imm.value);
2489 snprintf (buf, size, "#%.18e", c.f);
2490 }
2491 break;
2492 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2493 {
2494 double_conv_t c;
2495 c.i = expand_fp_imm (1, opnd->imm.value);
2496 snprintf (buf, size, "#%.18e", c.d);
2497 }
2498 break;
2499 default: assert (0);
2500 }
2501 break;
2502
2503 case AARCH64_OPND_CCMP_IMM:
2504 case AARCH64_OPND_NZCV:
2505 case AARCH64_OPND_EXCEPTION:
2506 case AARCH64_OPND_UIMM4:
2507 case AARCH64_OPND_UIMM7:
2508 if (optional_operand_p (opcode, idx) == TRUE
2509 && (opnd->imm.value ==
2510 (int64_t) get_optional_operand_default_value (opcode)))
2511 /* Omit the operand, e.g. DCPS1. */
2512 break;
2513 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2514 break;
2515
2516 case AARCH64_OPND_COND:
2517 snprintf (buf, size, "%s", opnd->cond->names[0]);
2518 break;
2519
2520 case AARCH64_OPND_ADDR_ADRP:
2521 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2522 + opnd->imm.value;
2523 if (pcrel_p)
2524 *pcrel_p = 1;
2525 if (address)
2526 *address = addr;
2527 /* This is not necessary during the disassembling, as print_address_func
2528 in the disassemble_info will take care of the printing. But some
2529 other callers may be still interested in getting the string in *STR,
2530 so here we do snprintf regardless. */
2531 snprintf (buf, size, "#0x%" PRIx64, addr);
2532 break;
2533
2534 case AARCH64_OPND_ADDR_PCREL14:
2535 case AARCH64_OPND_ADDR_PCREL19:
2536 case AARCH64_OPND_ADDR_PCREL21:
2537 case AARCH64_OPND_ADDR_PCREL26:
2538 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2539 if (pcrel_p)
2540 *pcrel_p = 1;
2541 if (address)
2542 *address = addr;
2543 /* This is not necessary during the disassembling, as print_address_func
2544 in the disassemble_info will take care of the printing. But some
2545 other callers may be still interested in getting the string in *STR,
2546 so here we do snprintf regardless. */
2547 snprintf (buf, size, "#0x%" PRIx64, addr);
2548 break;
2549
2550 case AARCH64_OPND_ADDR_SIMPLE:
2551 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2552 case AARCH64_OPND_SIMD_ADDR_POST:
2553 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2554 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2555 {
2556 if (opnd->addr.offset.is_reg)
2557 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2558 else
2559 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2560 }
2561 else
2562 snprintf (buf, size, "[%s]", name);
2563 break;
2564
2565 case AARCH64_OPND_ADDR_REGOFF:
2566 print_register_offset_address (buf, size, opnd);
2567 break;
2568
2569 case AARCH64_OPND_ADDR_SIMM7:
2570 case AARCH64_OPND_ADDR_SIMM9:
2571 case AARCH64_OPND_ADDR_SIMM9_2:
2572 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2573 if (opnd->addr.writeback)
2574 {
2575 if (opnd->addr.preind)
2576 snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2577 else
2578 snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2579 }
2580 else
2581 {
2582 if (opnd->addr.offset.imm)
2583 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2584 else
2585 snprintf (buf, size, "[%s]", name);
2586 }
2587 break;
2588
2589 case AARCH64_OPND_ADDR_UIMM12:
2590 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2591 if (opnd->addr.offset.imm)
2592 snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2593 else
2594 snprintf (buf, size, "[%s]", name);
2595 break;
2596
2597 case AARCH64_OPND_SYSREG:
2598 for (i = 0; aarch64_sys_regs[i].name; ++i)
2599 if (aarch64_sys_regs[i].value == opnd->sysreg)
2600 break;
2601 if (aarch64_sys_regs[i].name)
2602 snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2603 else
2604 {
2605 /* Implementation defined system register. */
2606 unsigned int value = opnd->sysreg;
2607 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2608 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2609 value & 0x7);
2610 }
2611 break;
2612
2613 case AARCH64_OPND_PSTATEFIELD:
2614 for (i = 0; aarch64_pstatefields[i].name; ++i)
2615 if (aarch64_pstatefields[i].value == opnd->pstatefield)
2616 break;
2617 assert (aarch64_pstatefields[i].name);
2618 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2619 break;
2620
2621 case AARCH64_OPND_SYSREG_AT:
2622 case AARCH64_OPND_SYSREG_DC:
2623 case AARCH64_OPND_SYSREG_IC:
2624 case AARCH64_OPND_SYSREG_TLBI:
2625 snprintf (buf, size, "%s", opnd->sysins_op->template);
2626 break;
2627
2628 case AARCH64_OPND_BARRIER:
2629 snprintf (buf, size, "%s", opnd->barrier->name);
2630 break;
2631
2632 case AARCH64_OPND_BARRIER_ISB:
2633 /* Operand can be omitted, e.g. in DCPS1. */
2634 if (! optional_operand_p (opcode, idx)
2635 || (opnd->barrier->value
2636 != get_optional_operand_default_value (opcode)))
2637 snprintf (buf, size, "#0x%x", opnd->barrier->value);
2638 break;
2639
2640 case AARCH64_OPND_PRFOP:
2641 snprintf (buf, size, "%s", opnd->prfop->name);
2642 break;
2643
2644 default:
2645 assert (0);
2646 }
2647}
2648\f
2649#define CPENC(op0,op1,crn,crm,op2) \
2650 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2651 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2652#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2653 /* for 3.9.10 System Instructions */
2654#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2655
2656#define C0 0
2657#define C1 1
2658#define C2 2
2659#define C3 3
2660#define C4 4
2661#define C5 5
2662#define C6 6
2663#define C7 7
2664#define C8 8
2665#define C9 9
2666#define C10 10
2667#define C11 11
2668#define C12 12
2669#define C13 13
2670#define C14 14
2671#define C15 15
2672
2673/* TODO there are two more issues need to be resolved
2674 1. handle read-only and write-only system registers
2675 2. handle cpu-implementation-defined system registers. */
2676const struct aarch64_name_value_pair aarch64_sys_regs [] =
2677{
2678 { "spsr_el1", CPEN_(0,C0,0) }, /* = spsr_svc */
2679 { "elr_el1", CPEN_(0,C0,1) },
2680 { "sp_el0", CPEN_(0,C1,0) },
2681 { "spsel", CPEN_(0,C2,0) },
2682 { "daif", CPEN_(3,C2,1) },
2683 { "currentel", CPEN_(0,C2,2) }, /* RO */
2684 { "nzcv", CPEN_(3,C2,0) },
2685 { "fpcr", CPEN_(3,C4,0) },
2686 { "fpsr", CPEN_(3,C4,1) },
2687 { "dspsr_el0", CPEN_(3,C5,0) },
2688 { "dlr_el0", CPEN_(3,C5,1) },
2689 { "spsr_el2", CPEN_(4,C0,0) }, /* = spsr_hyp */
2690 { "elr_el2", CPEN_(4,C0,1) },
2691 { "sp_el1", CPEN_(4,C1,0) },
2692 { "spsr_irq", CPEN_(4,C3,0) },
2693 { "spsr_abt", CPEN_(4,C3,1) },
2694 { "spsr_und", CPEN_(4,C3,2) },
2695 { "spsr_fiq", CPEN_(4,C3,3) },
2696 { "spsr_el3", CPEN_(6,C0,0) },
2697 { "elr_el3", CPEN_(6,C0,1) },
2698 { "sp_el2", CPEN_(6,C1,0) },
2699 { "spsr_svc", CPEN_(0,C0,0) }, /* = spsr_el1 */
2700 { "spsr_hyp", CPEN_(4,C0,0) }, /* = spsr_el2 */
2701 { "midr_el1", CPENC(3,0,C0,C0,0) }, /* RO */
2702 { "ctr_el0", CPENC(3,3,C0,C0,1) }, /* RO */
2703 { "mpidr_el1", CPENC(3,0,C0,C0,5) }, /* RO */
2704 { "revidr_el1", CPENC(3,0,C0,C0,6) }, /* RO */
2705 { "aidr_el1", CPENC(3,1,C0,C0,7) }, /* RO */
2706 { "dczid_el0", CPENC(3,3,C0,C0,7) }, /* RO */
2707 { "id_dfr0_el1", CPENC(3,0,C0,C1,2) }, /* RO */
2708 { "id_pfr0_el1", CPENC(3,0,C0,C1,0) }, /* RO */
2709 { "id_pfr1_el1", CPENC(3,0,C0,C1,1) }, /* RO */
2710 { "id_afr0_el1", CPENC(3,0,C0,C1,3) }, /* RO */
2711 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4) }, /* RO */
2712 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5) }, /* RO */
2713 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6) }, /* RO */
2714 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7) }, /* RO */
2715 { "id_isar0_el1", CPENC(3,0,C0,C2,0) }, /* RO */
2716 { "id_isar1_el1", CPENC(3,0,C0,C2,1) }, /* RO */
2717 { "id_isar2_el1", CPENC(3,0,C0,C2,2) }, /* RO */
2718 { "id_isar3_el1", CPENC(3,0,C0,C2,3) }, /* RO */
2719 { "id_isar4_el1", CPENC(3,0,C0,C2,4) }, /* RO */
2720 { "id_isar5_el1", CPENC(3,0,C0,C2,5) }, /* RO */
2721 { "mvfr0_el1", CPENC(3,0,C0,C3,0) }, /* RO */
2722 { "mvfr1_el1", CPENC(3,0,C0,C3,1) }, /* RO */
2723 { "mvfr2_el1", CPENC(3,0,C0,C3,2) }, /* RO */
2724 { "ccsidr_el1", CPENC(3,1,C0,C0,0) }, /* RO */
2725 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0) }, /* RO */
2726 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1) }, /* RO */
2727 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0) }, /* RO */
2728 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1) }, /* RO */
2729 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0) }, /* RO */
2730 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1) }, /* RO */
2731 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0) }, /* RO */
2732 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1) }, /* RO */
2733 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4) }, /* RO */
2734 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5) }, /* RO */
2735 { "clidr_el1", CPENC(3,1,C0,C0,1) }, /* RO */
2736 { "csselr_el1", CPENC(3,2,C0,C0,0) }, /* RO */
2737 { "vpidr_el2", CPENC(3,4,C0,C0,0) },
2738 { "vmpidr_el2", CPENC(3,4,C0,C0,5) },
2739 { "sctlr_el1", CPENC(3,0,C1,C0,0) },
2740 { "sctlr_el2", CPENC(3,4,C1,C0,0) },
2741 { "sctlr_el3", CPENC(3,6,C1,C0,0) },
2742 { "actlr_el1", CPENC(3,0,C1,C0,1) },
2743 { "actlr_el2", CPENC(3,4,C1,C0,1) },
2744 { "actlr_el3", CPENC(3,6,C1,C0,1) },
2745 { "cpacr_el1", CPENC(3,0,C1,C0,2) },
2746 { "cptr_el2", CPENC(3,4,C1,C1,2) },
2747 { "cptr_el3", CPENC(3,6,C1,C1,2) },
2748 { "scr_el3", CPENC(3,6,C1,C1,0) },
2749 { "hcr_el2", CPENC(3,4,C1,C1,0) },
2750 { "mdcr_el2", CPENC(3,4,C1,C1,1) },
2751 { "mdcr_el3", CPENC(3,6,C1,C3,1) },
2752 { "hstr_el2", CPENC(3,4,C1,C1,3) },
2753 { "hacr_el2", CPENC(3,4,C1,C1,7) },
2754 { "ttbr0_el1", CPENC(3,0,C2,C0,0) },
2755 { "ttbr1_el1", CPENC(3,0,C2,C0,1) },
2756 { "ttbr0_el2", CPENC(3,4,C2,C0,0) },
2757 { "ttbr0_el3", CPENC(3,6,C2,C0,0) },
2758 { "vttbr_el2", CPENC(3,4,C2,C1,0) },
2759 { "tcr_el1", CPENC(3,0,C2,C0,2) },
2760 { "tcr_el2", CPENC(3,4,C2,C0,2) },
2761 { "tcr_el3", CPENC(3,6,C2,C0,2) },
2762 { "vtcr_el2", CPENC(3,4,C2,C1,2) },
2763 { "afsr0_el1", CPENC(3,0,C5,C1,0) },
2764 { "afsr1_el1", CPENC(3,0,C5,C1,1) },
2765 { "afsr0_el2", CPENC(3,4,C5,C1,0) },
2766 { "afsr1_el2", CPENC(3,4,C5,C1,1) },
2767 { "afsr0_el3", CPENC(3,6,C5,C1,0) },
2768 { "afsr1_el3", CPENC(3,6,C5,C1,1) },
2769 { "esr_el1", CPENC(3,0,C5,C2,0) },
2770 { "esr_el2", CPENC(3,4,C5,C2,0) },
2771 { "esr_el3", CPENC(3,6,C5,C2,0) },
2772 { "fpexc32_el2", CPENC(3,4,C5,C3,0) },
2773 { "far_el1", CPENC(3,0,C6,C0,0) },
2774 { "far_el2", CPENC(3,4,C6,C0,0) },
2775 { "far_el3", CPENC(3,6,C6,C0,0) },
2776 { "hpfar_el2", CPENC(3,4,C6,C0,4) },
2777 { "par_el1", CPENC(3,0,C7,C4,0) },
2778 { "mair_el1", CPENC(3,0,C10,C2,0) },
2779 { "mair_el2", CPENC(3,4,C10,C2,0) },
2780 { "mair_el3", CPENC(3,6,C10,C2,0) },
2781 { "amair_el1", CPENC(3,0,C10,C3,0) },
2782 { "amair_el2", CPENC(3,4,C10,C3,0) },
2783 { "amair_el3", CPENC(3,6,C10,C3,0) },
2784 { "vbar_el1", CPENC(3,0,C12,C0,0) },
2785 { "vbar_el2", CPENC(3,4,C12,C0,0) },
2786 { "vbar_el3", CPENC(3,6,C12,C0,0) },
2787 { "rvbar_el1", CPENC(3,0,C12,C0,1) }, /* RO */
2788 { "rvbar_el2", CPENC(3,4,C12,C0,1) }, /* RO */
2789 { "rvbar_el3", CPENC(3,6,C12,C0,1) }, /* RO */
b7a54b55
YZ
2790 { "rmr_el1", CPENC(3,0,C12,C0,2) },
2791 { "rmr_el2", CPENC(3,4,C12,C0,2) },
2792 { "rmr_el3", CPENC(3,6,C12,C0,2) },
a06ea964
NC
2793 { "isr_el1", CPENC(3,0,C12,C1,0) }, /* RO */
2794 { "contextidr_el1", CPENC(3,0,C13,C0,1) },
2795 { "tpidr_el0", CPENC(3,3,C13,C0,2) },
2796 { "tpidrro_el0", CPENC(3,3,C13,C0,3) }, /* RO */
2797 { "tpidr_el1", CPENC(3,0,C13,C0,4) },
2798 { "tpidr_el2", CPENC(3,4,C13,C0,2) },
2799 { "tpidr_el3", CPENC(3,6,C13,C0,2) },
2800 { "teecr32_el1", CPENC(2,2,C0, C0,0) }, /* See section 3.9.7.1 */
2801 { "cntfrq_el0", CPENC(3,3,C14,C0,0) }, /* RO */
2802 { "cntpct_el0", CPENC(3,3,C14,C0,1) }, /* RO */
2803 { "cntvct_el0", CPENC(3,3,C14,C0,2) }, /* RO */
2804 { "cntvoff_el2", CPENC(3,4,C14,C0,3) },
2805 { "cntkctl_el1", CPENC(3,0,C14,C1,0) },
2806 { "cnthctl_el2", CPENC(3,4,C14,C1,0) },
2807 { "cntp_tval_el0", CPENC(3,3,C14,C2,0) },
2808 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1) },
2809 { "cntp_cval_el0", CPENC(3,3,C14,C2,2) },
2810 { "cntv_tval_el0", CPENC(3,3,C14,C3,0) },
2811 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1) },
2812 { "cntv_cval_el0", CPENC(3,3,C14,C3,2) },
2813 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0) },
2814 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1) },
2815 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2) },
2816 { "cntps_tval_el1", CPENC(3,7,C14,C2,0) },
2817 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1) },
2818 { "cntps_cval_el1", CPENC(3,7,C14,C2,2) },
2819 { "dacr32_el2", CPENC(3,4,C3,C0,0) },
2820 { "ifsr32_el2", CPENC(3,4,C5,C0,1) },
2821 { "teehbr32_el1", CPENC(2,2,C1,C0,0) },
2822 { "sder32_el3", CPENC(3,6,C1,C1,1) },
2823 { "mdscr_el1", CPENC(2,0,C0, C2, 2) },
2824 { "mdccsr_el0", CPENC(2,3,C0, C1, 0) }, /* r */
2825 { "mdccint_el1", CPENC(2,0,C0, C2, 0) },
2826 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0) },
2827 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0) }, /* r */
2828 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0) }, /* w */
2829 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2) }, /* r */
2830 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2) }, /* w */
2831 { "oseccr_el1", CPENC(2,0,C0, C6, 2) },
2832 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0) },
2833 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4) },
2834 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4) },
2835 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4) },
2836 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4) },
2837 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4) },
2838 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4) },
2839 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4) },
2840 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4) },
2841 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4) },
2842 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4) },
2843 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4) },
2844 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4) },
2845 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4) },
2846 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4) },
2847 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4) },
2848 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4) },
2849 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5) },
2850 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5) },
2851 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5) },
2852 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5) },
2853 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5) },
2854 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5) },
2855 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5) },
2856 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5) },
2857 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5) },
2858 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5) },
2859 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5) },
2860 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5) },
2861 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5) },
2862 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5) },
2863 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5) },
2864 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5) },
2865 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6) },
2866 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6) },
2867 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6) },
2868 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6) },
2869 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6) },
2870 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6) },
2871 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6) },
2872 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6) },
2873 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6) },
2874 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6) },
2875 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6) },
2876 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6) },
2877 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6) },
2878 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6) },
2879 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6) },
2880 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6) },
2881 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7) },
2882 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7) },
2883 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7) },
2884 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7) },
2885 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7) },
2886 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7) },
2887 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7) },
2888 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7) },
2889 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7) },
2890 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7) },
2891 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7) },
2892 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7) },
2893 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7) },
2894 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7) },
2895 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7) },
2896 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7) },
2897 { "mdrar_el1", CPENC(2,0,C1, C0, 0) }, /* r */
2898 { "oslar_el1", CPENC(2,0,C1, C0, 4) }, /* w */
2899 { "oslsr_el1", CPENC(2,0,C1, C1, 4) }, /* r */
2900 { "osdlr_el1", CPENC(2,0,C1, C3, 4) },
2901 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4) },
2902 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6) },
2903 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6) },
2904 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6) }, /* r */
2905
2906 { "pmcr_el0", CPENC(3,3,C9,C12, 0) },
2907 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1) },
2908 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2) },
2909 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3) },
2910 { "pmswinc_el0", CPENC(3,3,C9,C12, 4) }, /* w */
2911 { "pmselr_el0", CPENC(3,3,C9,C12, 5) },
2912 { "pmceid0_el0", CPENC(3,3,C9,C12, 6) }, /* r */
2913 { "pmceid1_el0", CPENC(3,3,C9,C12, 7) }, /* r */
2914 { "pmccntr_el0", CPENC(3,3,C9,C13, 0) },
2915 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1) },
2916 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2) },
2917 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0) },
2918 { "pmintenset_el1", CPENC(3,0,C9,C14, 1) },
2919 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2) },
2920 { "pmovsset_el0", CPENC(3,3,C9,C14, 3) },
2921 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0) },
2922 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1) },
2923 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2) },
2924 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3) },
2925 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4) },
2926 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5) },
2927 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6) },
2928 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7) },
2929 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0) },
2930 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1) },
2931 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2) },
2932 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3) },
2933 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4) },
2934 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5) },
2935 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6) },
2936 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7) },
2937 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0) },
2938 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1) },
2939 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2) },
2940 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3) },
2941 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4) },
2942 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5) },
2943 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6) },
2944 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7) },
2945 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0) },
2946 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1) },
2947 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2) },
2948 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3) },
2949 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4) },
2950 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5) },
2951 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6) },
2952 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0) },
2953 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1) },
2954 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2) },
2955 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3) },
2956 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4) },
2957 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5) },
2958 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6) },
2959 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7) },
2960 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0) },
2961 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1) },
2962 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2) },
2963 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3) },
2964 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4) },
2965 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5) },
2966 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6) },
2967 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7) },
2968 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0) },
2969 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1) },
2970 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2) },
2971 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3) },
2972 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4) },
2973 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5) },
2974 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6) },
2975 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7) },
2976 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0) },
2977 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1) },
2978 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2) },
2979 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3) },
2980 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4) },
2981 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5) },
2982 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6) },
2983 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7) },
a06ea964
NC
2984 { 0, CPENC(0,0,0,0,0) },
2985};
2986
2987const struct aarch64_name_value_pair aarch64_pstatefields [] =
2988{
2989 { "spsel", 0x05 },
2990 { "daifset", 0x1e },
2991 { "daifclr", 0x1f },
2992 { 0, CPENC(0,0,0,0,0) },
2993};
2994
2995const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
2996{
2997 { "ialluis", CPENS(0,C7,C1,0), 0 },
2998 { "iallu", CPENS(0,C7,C5,0), 0 },
2999 { "ivau", CPENS(3,C7,C5,1), 1 },
3000 { 0, CPENS(0,0,0,0), 0 }
3001};
3002
3003const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3004{
3005 { "zva", CPENS(3,C7,C4,1), 1 },
3006 { "ivac", CPENS(0,C7,C6,1), 1 },
3007 { "isw", CPENS(0,C7,C6,2), 1 },
3008 { "cvac", CPENS(3,C7,C10,1), 1 },
3009 { "csw", CPENS(0,C7,C10,2), 1 },
3010 { "cvau", CPENS(3,C7,C11,1), 1 },
3011 { "civac", CPENS(3,C7,C14,1), 1 },
3012 { "cisw", CPENS(0,C7,C14,2), 1 },
3013 { 0, CPENS(0,0,0,0), 0 }
3014};
3015
3016const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3017{
3018 { "s1e1r", CPENS(0,C7,C8,0), 1 },
3019 { "s1e1w", CPENS(0,C7,C8,1), 1 },
3020 { "s1e0r", CPENS(0,C7,C8,2), 1 },
3021 { "s1e0w", CPENS(0,C7,C8,3), 1 },
3022 { "s12e1r", CPENS(4,C7,C8,4), 1 },
3023 { "s12e1w", CPENS(4,C7,C8,5), 1 },
3024 { "s12e0r", CPENS(4,C7,C8,6), 1 },
3025 { "s12e0w", CPENS(4,C7,C8,7), 1 },
3026 { "s1e2r", CPENS(4,C7,C8,0), 1 },
3027 { "s1e2w", CPENS(4,C7,C8,1), 1 },
3028 { "s1e3r", CPENS(6,C7,C8,0), 1 },
3029 { "s1e3w", CPENS(6,C7,C8,1), 1 },
3030 { 0, CPENS(0,0,0,0), 0 }
3031};
3032
3033const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3034{
3035 { "vmalle1", CPENS(0,C8,C7,0), 0 },
3036 { "vae1", CPENS(0,C8,C7,1), 1 },
3037 { "aside1", CPENS(0,C8,C7,2), 1 },
3038 { "vaae1", CPENS(0,C8,C7,3), 1 },
3039 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3040 { "vae1is", CPENS(0,C8,C3,1), 1 },
3041 { "aside1is", CPENS(0,C8,C3,2), 1 },
3042 { "vaae1is", CPENS(0,C8,C3,3), 1 },
3043 { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3044 { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3045 { "ipas2e1", CPENS(4,C8,C4,1), 1 },
3046 { "ipas2le1", CPENS(4,C8,C4,5), 1 },
3047 { "vae2", CPENS(4,C8,C7,1), 1 },
3048 { "vae2is", CPENS(4,C8,C3,1), 1 },
3049 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3050 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3051 { "vae3", CPENS(6,C8,C7,1), 1 },
3052 { "vae3is", CPENS(6,C8,C3,1), 1 },
3053 { "alle2", CPENS(4,C8,C7,0), 0 },
3054 { "alle2is", CPENS(4,C8,C3,0), 0 },
3055 { "alle1", CPENS(4,C8,C7,4), 0 },
3056 { "alle1is", CPENS(4,C8,C3,4), 0 },
3057 { "alle3", CPENS(6,C8,C7,0), 0 },
3058 { "alle3is", CPENS(6,C8,C3,0), 0 },
3059 { "vale1is", CPENS(0,C8,C3,5), 1 },
3060 { "vale2is", CPENS(4,C8,C3,5), 1 },
3061 { "vale3is", CPENS(6,C8,C3,5), 1 },
3062 { "vaale1is", CPENS(0,C8,C3,7), 1 },
3063 { "vale1", CPENS(0,C8,C7,5), 1 },
3064 { "vale2", CPENS(4,C8,C7,5), 1 },
3065 { "vale3", CPENS(6,C8,C7,5), 1 },
3066 { "vaale1", CPENS(0,C8,C7,7), 1 },
3067 { 0, CPENS(0,0,0,0), 0 }
3068};
3069
3070#undef C0
3071#undef C1
3072#undef C2
3073#undef C3
3074#undef C4
3075#undef C5
3076#undef C6
3077#undef C7
3078#undef C8
3079#undef C9
3080#undef C10
3081#undef C11
3082#undef C12
3083#undef C13
3084#undef C14
3085#undef C15
3086
3087/* Include the opcode description table as well as the operand description
3088 table. */
3089#include "aarch64-tbl.h"
This page took 0.153372 seconds and 4 git commands to generate.