1f85294e3bf992d95e48375d8021be401785f17d
[deliverable/binutils-gdb.git] / opcodes / aarch64-opc.c
1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <assert.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include "bfd_stdint.h"
26 #include <stdarg.h>
27 #include <inttypes.h>
28
29 #include "opintl.h"
30 #include "libiberty.h"
31
32 #include "aarch64-opc.h"
33
34 #ifdef DEBUG_AARCH64
35 int debug_dump = FALSE;
36 #endif /* DEBUG_AARCH64 */
37
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array[32] = {
41 /* 0-7. */
42 "pow2",
43 "vl1",
44 "vl2",
45 "vl3",
46 "vl4",
47 "vl5",
48 "vl6",
49 "vl7",
50 /* 8-15. */
51 "vl8",
52 "vl16",
53 "vl32",
54 "vl64",
55 "vl128",
56 "vl256",
57 0,
58 0,
59 /* 16-23. */
60 0,
61 0,
62 0,
63 0,
64 0,
65 0,
66 0,
67 0,
68 /* 24-31. */
69 0,
70 0,
71 0,
72 0,
73 0,
74 "mul4",
75 "mul3",
76 "all"
77 };
78
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array[16] = {
82 /* 0-7. */
83 "pldl1keep",
84 "pldl1strm",
85 "pldl2keep",
86 "pldl2strm",
87 "pldl3keep",
88 "pldl3strm",
89 0,
90 0,
91 /* 8-15. */
92 "pstl1keep",
93 "pstl1strm",
94 "pstl2keep",
95 "pstl2strm",
96 "pstl3keep",
97 "pstl3strm",
98 0,
99 0
100 };
101
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
104
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107 {
108 return ((qualifier >= AARCH64_OPND_QLF_V_8B
109 && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110 : FALSE);
111 }
112
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115 {
116 return ((qualifier >= AARCH64_OPND_QLF_S_B
117 && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118 : FALSE);
119 }
120
121 enum data_pattern
122 {
123 DP_UNKNOWN,
124 DP_VECTOR_3SAME,
125 DP_VECTOR_LONG,
126 DP_VECTOR_WIDE,
127 DP_VECTOR_ACROSS_LANES,
128 };
129
130 static const char significant_operand_index [] =
131 {
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
137 };
138
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
140 the data pattern.
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
143
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146 {
147 if (vector_qualifier_p (qualifiers[0]) == TRUE)
148 {
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers[0] == qualifiers[1]
152 && vector_qualifier_p (qualifiers[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers[0])
154 == aarch64_get_qualifier_esize (qualifiers[1]))
155 && (aarch64_get_qualifier_esize (qualifiers[0])
156 == aarch64_get_qualifier_esize (qualifiers[2])))
157 return DP_VECTOR_3SAME;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
160 or v.8h, v.16b. */
161 if (vector_qualifier_p (qualifiers[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers[0])
164 == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165 return DP_VECTOR_LONG;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers[0] == qualifiers[1]
168 && vector_qualifier_p (qualifiers[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers[0])
171 == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers[0])
173 == aarch64_get_qualifier_esize (qualifiers[1])))
174 return DP_VECTOR_WIDE;
175 }
176 else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177 {
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers[1]) == TRUE
180 && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181 return DP_VECTOR_ACROSS_LANES;
182 }
183
184 return DP_UNKNOWN;
185 }
186
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
193 benefit. */
194
195 int
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197 {
198 return
199 significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200 }
201 \f
202 const aarch64_field fields[] =
203 {
204 { 0, 0 }, /* NIL. */
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 26 }, /* imm26: in unconditional branch instructions. */
255 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
256 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
257 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
258 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
259 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
260 { 22, 1 }, /* N: in logical (immediate) instructions. */
261 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
262 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
263 { 31, 1 }, /* sf: in integer data processing instructions. */
264 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
265 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
266 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
267 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
268 { 31, 1 }, /* b5: in the test bit and branch instructions. */
269 { 19, 5 }, /* b40: in the test bit and branch instructions. */
270 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
271 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
272 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
273 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
274 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
275 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
276 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
277 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
278 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
279 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
280 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
281 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
282 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
283 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
284 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
285 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
286 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
287 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
289 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
290 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
291 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
292 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
294 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
295 { 5, 1 }, /* SVE_i1: single-bit immediate. */
296 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
297 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
298 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
299 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
300 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
301 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
302 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
303 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
304 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
305 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
306 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
307 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
308 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
309 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
310 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
311 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
312 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
313 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
314 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
315 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
316 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
317 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
318 { 16, 4 }, /* SVE_tsz: triangular size select. */
319 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
320 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
321 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
322 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
323 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
324 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
325 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
326 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
327 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
328 { 22, 1 }, /* sz: 1-bit element size select. */
329 };
330
331 enum aarch64_operand_class
332 aarch64_get_operand_class (enum aarch64_opnd type)
333 {
334 return aarch64_operands[type].op_class;
335 }
336
337 const char *
338 aarch64_get_operand_name (enum aarch64_opnd type)
339 {
340 return aarch64_operands[type].name;
341 }
342
343 /* Get operand description string.
344 This is usually for the diagnosis purpose. */
345 const char *
346 aarch64_get_operand_desc (enum aarch64_opnd type)
347 {
348 return aarch64_operands[type].desc;
349 }
350
351 /* Table of all conditional affixes. */
352 const aarch64_cond aarch64_conds[16] =
353 {
354 {{"eq", "none"}, 0x0},
355 {{"ne", "any"}, 0x1},
356 {{"cs", "hs", "nlast"}, 0x2},
357 {{"cc", "lo", "ul", "last"}, 0x3},
358 {{"mi", "first"}, 0x4},
359 {{"pl", "nfrst"}, 0x5},
360 {{"vs"}, 0x6},
361 {{"vc"}, 0x7},
362 {{"hi", "pmore"}, 0x8},
363 {{"ls", "plast"}, 0x9},
364 {{"ge", "tcont"}, 0xa},
365 {{"lt", "tstop"}, 0xb},
366 {{"gt"}, 0xc},
367 {{"le"}, 0xd},
368 {{"al"}, 0xe},
369 {{"nv"}, 0xf},
370 };
371
372 const aarch64_cond *
373 get_cond_from_value (aarch64_insn value)
374 {
375 assert (value < 16);
376 return &aarch64_conds[(unsigned int) value];
377 }
378
379 const aarch64_cond *
380 get_inverted_cond (const aarch64_cond *cond)
381 {
382 return &aarch64_conds[cond->value ^ 0x1];
383 }
384
385 /* Table describing the operand extension/shifting operators; indexed by
386 enum aarch64_modifier_kind.
387
388 The value column provides the most common values for encoding modifiers,
389 which enables table-driven encoding/decoding for the modifiers. */
390 const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
391 {
392 {"none", 0x0},
393 {"msl", 0x0},
394 {"ror", 0x3},
395 {"asr", 0x2},
396 {"lsr", 0x1},
397 {"lsl", 0x0},
398 {"uxtb", 0x0},
399 {"uxth", 0x1},
400 {"uxtw", 0x2},
401 {"uxtx", 0x3},
402 {"sxtb", 0x4},
403 {"sxth", 0x5},
404 {"sxtw", 0x6},
405 {"sxtx", 0x7},
406 {"mul", 0x0},
407 {"mul vl", 0x0},
408 {NULL, 0},
409 };
410
411 enum aarch64_modifier_kind
412 aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
413 {
414 return desc - aarch64_operand_modifiers;
415 }
416
417 aarch64_insn
418 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
419 {
420 return aarch64_operand_modifiers[kind].value;
421 }
422
423 enum aarch64_modifier_kind
424 aarch64_get_operand_modifier_from_value (aarch64_insn value,
425 bfd_boolean extend_p)
426 {
427 if (extend_p == TRUE)
428 return AARCH64_MOD_UXTB + value;
429 else
430 return AARCH64_MOD_LSL - value;
431 }
432
433 bfd_boolean
434 aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
435 {
436 return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
437 ? TRUE : FALSE;
438 }
439
440 static inline bfd_boolean
441 aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
442 {
443 return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
444 ? TRUE : FALSE;
445 }
446
447 const struct aarch64_name_value_pair aarch64_barrier_options[16] =
448 {
449 { "#0x00", 0x0 },
450 { "oshld", 0x1 },
451 { "oshst", 0x2 },
452 { "osh", 0x3 },
453 { "#0x04", 0x4 },
454 { "nshld", 0x5 },
455 { "nshst", 0x6 },
456 { "nsh", 0x7 },
457 { "#0x08", 0x8 },
458 { "ishld", 0x9 },
459 { "ishst", 0xa },
460 { "ish", 0xb },
461 { "#0x0c", 0xc },
462 { "ld", 0xd },
463 { "st", 0xe },
464 { "sy", 0xf },
465 };
466
467 /* Table describing the operands supported by the aliases of the HINT
468 instruction.
469
470 The name column is the operand that is accepted for the alias. The value
471 column is the hint number of the alias. The list of operands is terminated
472 by NULL in the name column. */
473
474 const struct aarch64_name_value_pair aarch64_hint_options[] =
475 {
476 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
477 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
478 { "csync", HINT_OPD_CSYNC }, /* PSB CSYNC. */
479 { "c", HINT_OPD_C }, /* BTI C. */
480 { "j", HINT_OPD_J }, /* BTI J. */
481 { "jc", HINT_OPD_JC }, /* BTI JC. */
482 { NULL, HINT_OPD_NULL },
483 };
484
485 /* op -> op: load = 0 instruction = 1 store = 2
486 l -> level: 1-3
487 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
488 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
489 const struct aarch64_name_value_pair aarch64_prfops[32] =
490 {
491 { "pldl1keep", B(0, 1, 0) },
492 { "pldl1strm", B(0, 1, 1) },
493 { "pldl2keep", B(0, 2, 0) },
494 { "pldl2strm", B(0, 2, 1) },
495 { "pldl3keep", B(0, 3, 0) },
496 { "pldl3strm", B(0, 3, 1) },
497 { NULL, 0x06 },
498 { NULL, 0x07 },
499 { "plil1keep", B(1, 1, 0) },
500 { "plil1strm", B(1, 1, 1) },
501 { "plil2keep", B(1, 2, 0) },
502 { "plil2strm", B(1, 2, 1) },
503 { "plil3keep", B(1, 3, 0) },
504 { "plil3strm", B(1, 3, 1) },
505 { NULL, 0x0e },
506 { NULL, 0x0f },
507 { "pstl1keep", B(2, 1, 0) },
508 { "pstl1strm", B(2, 1, 1) },
509 { "pstl2keep", B(2, 2, 0) },
510 { "pstl2strm", B(2, 2, 1) },
511 { "pstl3keep", B(2, 3, 0) },
512 { "pstl3strm", B(2, 3, 1) },
513 { NULL, 0x16 },
514 { NULL, 0x17 },
515 { NULL, 0x18 },
516 { NULL, 0x19 },
517 { NULL, 0x1a },
518 { NULL, 0x1b },
519 { NULL, 0x1c },
520 { NULL, 0x1d },
521 { NULL, 0x1e },
522 { NULL, 0x1f },
523 };
524 #undef B
525 \f
526 /* Utilities on value constraint. */
527
528 static inline int
529 value_in_range_p (int64_t value, int low, int high)
530 {
531 return (value >= low && value <= high) ? 1 : 0;
532 }
533
534 /* Return true if VALUE is a multiple of ALIGN. */
535 static inline int
536 value_aligned_p (int64_t value, int align)
537 {
538 return (value % align) == 0;
539 }
540
541 /* A signed value fits in a field. */
542 static inline int
543 value_fit_signed_field_p (int64_t value, unsigned width)
544 {
545 assert (width < 32);
546 if (width < sizeof (value) * 8)
547 {
548 int64_t lim = (int64_t)1 << (width - 1);
549 if (value >= -lim && value < lim)
550 return 1;
551 }
552 return 0;
553 }
554
555 /* An unsigned value fits in a field. */
556 static inline int
557 value_fit_unsigned_field_p (int64_t value, unsigned width)
558 {
559 assert (width < 32);
560 if (width < sizeof (value) * 8)
561 {
562 int64_t lim = (int64_t)1 << width;
563 if (value >= 0 && value < lim)
564 return 1;
565 }
566 return 0;
567 }
568
569 /* Return 1 if OPERAND is SP or WSP. */
570 int
571 aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
572 {
573 return ((aarch64_get_operand_class (operand->type)
574 == AARCH64_OPND_CLASS_INT_REG)
575 && operand_maybe_stack_pointer (aarch64_operands + operand->type)
576 && operand->reg.regno == 31);
577 }
578
579 /* Return 1 if OPERAND is XZR or WZP. */
580 int
581 aarch64_zero_register_p (const aarch64_opnd_info *operand)
582 {
583 return ((aarch64_get_operand_class (operand->type)
584 == AARCH64_OPND_CLASS_INT_REG)
585 && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
586 && operand->reg.regno == 31);
587 }
588
589 /* Return true if the operand *OPERAND that has the operand code
590 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
591 qualified by the qualifier TARGET. */
592
593 static inline int
594 operand_also_qualified_p (const struct aarch64_opnd_info *operand,
595 aarch64_opnd_qualifier_t target)
596 {
597 switch (operand->qualifier)
598 {
599 case AARCH64_OPND_QLF_W:
600 if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
601 return 1;
602 break;
603 case AARCH64_OPND_QLF_X:
604 if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
605 return 1;
606 break;
607 case AARCH64_OPND_QLF_WSP:
608 if (target == AARCH64_OPND_QLF_W
609 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
610 return 1;
611 break;
612 case AARCH64_OPND_QLF_SP:
613 if (target == AARCH64_OPND_QLF_X
614 && operand_maybe_stack_pointer (aarch64_operands + operand->type))
615 return 1;
616 break;
617 default:
618 break;
619 }
620
621 return 0;
622 }
623
624 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
625 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
626
627 Return NIL if more than one expected qualifiers are found. */
628
629 aarch64_opnd_qualifier_t
630 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
631 int idx,
632 const aarch64_opnd_qualifier_t known_qlf,
633 int known_idx)
634 {
635 int i, saved_i;
636
637 /* Special case.
638
639 When the known qualifier is NIL, we have to assume that there is only
640 one qualifier sequence in the *QSEQ_LIST and return the corresponding
641 qualifier directly. One scenario is that for instruction
642 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
643 which has only one possible valid qualifier sequence
644 NIL, S_D
645 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
646 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
647
648 Because the qualifier NIL has dual roles in the qualifier sequence:
649 it can mean no qualifier for the operand, or the qualifer sequence is
650 not in use (when all qualifiers in the sequence are NILs), we have to
651 handle this special case here. */
652 if (known_qlf == AARCH64_OPND_NIL)
653 {
654 assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
655 return qseq_list[0][idx];
656 }
657
658 for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
659 {
660 if (qseq_list[i][known_idx] == known_qlf)
661 {
662 if (saved_i != -1)
663 /* More than one sequences are found to have KNOWN_QLF at
664 KNOWN_IDX. */
665 return AARCH64_OPND_NIL;
666 saved_i = i;
667 }
668 }
669
670 return qseq_list[saved_i][idx];
671 }
672
673 enum operand_qualifier_kind
674 {
675 OQK_NIL,
676 OQK_OPD_VARIANT,
677 OQK_VALUE_IN_RANGE,
678 OQK_MISC,
679 };
680
681 /* Operand qualifier description. */
682 struct operand_qualifier_data
683 {
684 /* The usage of the three data fields depends on the qualifier kind. */
685 int data0;
686 int data1;
687 int data2;
688 /* Description. */
689 const char *desc;
690 /* Kind. */
691 enum operand_qualifier_kind kind;
692 };
693
694 /* Indexed by the operand qualifier enumerators. */
695 struct operand_qualifier_data aarch64_opnd_qualifiers[] =
696 {
697 {0, 0, 0, "NIL", OQK_NIL},
698
699 /* Operand variant qualifiers.
700 First 3 fields:
701 element size, number of elements and common value for encoding. */
702
703 {4, 1, 0x0, "w", OQK_OPD_VARIANT},
704 {8, 1, 0x1, "x", OQK_OPD_VARIANT},
705 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
706 {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
707
708 {1, 1, 0x0, "b", OQK_OPD_VARIANT},
709 {2, 1, 0x1, "h", OQK_OPD_VARIANT},
710 {4, 1, 0x2, "s", OQK_OPD_VARIANT},
711 {8, 1, 0x3, "d", OQK_OPD_VARIANT},
712 {16, 1, 0x4, "q", OQK_OPD_VARIANT},
713 {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
714
715 {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
716 {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
717 {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
718 {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
719 {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
720 {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
721 {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
722 {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
723 {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
724 {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
725 {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
726
727 {0, 0, 0, "z", OQK_OPD_VARIANT},
728 {0, 0, 0, "m", OQK_OPD_VARIANT},
729
730 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
731 {16, 0, 0, "tag", OQK_OPD_VARIANT},
732
733 /* Qualifiers constraining the value range.
734 First 3 fields:
735 Lower bound, higher bound, unused. */
736
737 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE},
738 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
739 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
740 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
741 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
742 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
743 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
744
745 /* Qualifiers for miscellaneous purpose.
746 First 3 fields:
747 unused, unused and unused. */
748
749 {0, 0, 0, "lsl", 0},
750 {0, 0, 0, "msl", 0},
751
752 {0, 0, 0, "retrieving", 0},
753 };
754
755 static inline bfd_boolean
756 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
757 {
758 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
759 ? TRUE : FALSE;
760 }
761
762 static inline bfd_boolean
763 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
764 {
765 return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
766 ? TRUE : FALSE;
767 }
768
769 const char*
770 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
771 {
772 return aarch64_opnd_qualifiers[qualifier].desc;
773 }
774
775 /* Given an operand qualifier, return the expected data element size
776 of a qualified operand. */
777 unsigned char
778 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
779 {
780 assert (operand_variant_qualifier_p (qualifier) == TRUE);
781 return aarch64_opnd_qualifiers[qualifier].data0;
782 }
783
784 unsigned char
785 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
786 {
787 assert (operand_variant_qualifier_p (qualifier) == TRUE);
788 return aarch64_opnd_qualifiers[qualifier].data1;
789 }
790
791 aarch64_insn
792 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
793 {
794 assert (operand_variant_qualifier_p (qualifier) == TRUE);
795 return aarch64_opnd_qualifiers[qualifier].data2;
796 }
797
798 static int
799 get_lower_bound (aarch64_opnd_qualifier_t qualifier)
800 {
801 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
802 return aarch64_opnd_qualifiers[qualifier].data0;
803 }
804
805 static int
806 get_upper_bound (aarch64_opnd_qualifier_t qualifier)
807 {
808 assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
809 return aarch64_opnd_qualifiers[qualifier].data1;
810 }
811
812 #ifdef DEBUG_AARCH64
813 void
814 aarch64_verbose (const char *str, ...)
815 {
816 va_list ap;
817 va_start (ap, str);
818 printf ("#### ");
819 vprintf (str, ap);
820 printf ("\n");
821 va_end (ap);
822 }
823
824 static inline void
825 dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
826 {
827 int i;
828 printf ("#### \t");
829 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
830 printf ("%s,", aarch64_get_qualifier_name (*qualifier));
831 printf ("\n");
832 }
833
834 static void
835 dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
836 const aarch64_opnd_qualifier_t *qualifier)
837 {
838 int i;
839 aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
840
841 aarch64_verbose ("dump_match_qualifiers:");
842 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
843 curr[i] = opnd[i].qualifier;
844 dump_qualifier_sequence (curr);
845 aarch64_verbose ("against");
846 dump_qualifier_sequence (qualifier);
847 }
848 #endif /* DEBUG_AARCH64 */
849
850 /* This function checks if the given instruction INSN is a destructive
851 instruction based on the usage of the registers. It does not recognize
852 unary destructive instructions. */
853 bfd_boolean
854 aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
855 {
856 int i = 0;
857 const enum aarch64_opnd *opnds = opcode->operands;
858
859 if (opnds[0] == AARCH64_OPND_NIL)
860 return FALSE;
861
862 while (opnds[++i] != AARCH64_OPND_NIL)
863 if (opnds[i] == opnds[0])
864 return TRUE;
865
866 return FALSE;
867 }
868
869 /* TODO improve this, we can have an extra field at the runtime to
870 store the number of operands rather than calculating it every time. */
871
872 int
873 aarch64_num_of_operands (const aarch64_opcode *opcode)
874 {
875 int i = 0;
876 const enum aarch64_opnd *opnds = opcode->operands;
877 while (opnds[i++] != AARCH64_OPND_NIL)
878 ;
879 --i;
880 assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
881 return i;
882 }
883
884 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
885 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
886
887 N.B. on the entry, it is very likely that only some operands in *INST
888 have had their qualifiers been established.
889
890 If STOP_AT is not -1, the function will only try to match
891 the qualifier sequence for operands before and including the operand
892 of index STOP_AT; and on success *RET will only be filled with the first
893 (STOP_AT+1) qualifiers.
894
895 A couple examples of the matching algorithm:
896
897 X,W,NIL should match
898 X,W,NIL
899
900 NIL,NIL should match
901 X ,NIL
902
903 Apart from serving the main encoding routine, this can also be called
904 during or after the operand decoding. */
905
906 int
907 aarch64_find_best_match (const aarch64_inst *inst,
908 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
909 int stop_at, aarch64_opnd_qualifier_t *ret)
910 {
911 int found = 0;
912 int i, num_opnds;
913 const aarch64_opnd_qualifier_t *qualifiers;
914
915 num_opnds = aarch64_num_of_operands (inst->opcode);
916 if (num_opnds == 0)
917 {
918 DEBUG_TRACE ("SUCCEED: no operand");
919 return 1;
920 }
921
922 if (stop_at < 0 || stop_at >= num_opnds)
923 stop_at = num_opnds - 1;
924
925 /* For each pattern. */
926 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
927 {
928 int j;
929 qualifiers = *qualifiers_list;
930
931 /* Start as positive. */
932 found = 1;
933
934 DEBUG_TRACE ("%d", i);
935 #ifdef DEBUG_AARCH64
936 if (debug_dump)
937 dump_match_qualifiers (inst->operands, qualifiers);
938 #endif
939
940 /* Most opcodes has much fewer patterns in the list.
941 First NIL qualifier indicates the end in the list. */
942 if (empty_qualifier_sequence_p (qualifiers) == TRUE)
943 {
944 DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
945 if (i)
946 found = 0;
947 break;
948 }
949
950 for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
951 {
952 if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
953 {
954 /* Either the operand does not have qualifier, or the qualifier
955 for the operand needs to be deduced from the qualifier
956 sequence.
957 In the latter case, any constraint checking related with
958 the obtained qualifier should be done later in
959 operand_general_constraint_met_p. */
960 continue;
961 }
962 else if (*qualifiers != inst->operands[j].qualifier)
963 {
964 /* Unless the target qualifier can also qualify the operand
965 (which has already had a non-nil qualifier), non-equal
966 qualifiers are generally un-matched. */
967 if (operand_also_qualified_p (inst->operands + j, *qualifiers))
968 continue;
969 else
970 {
971 found = 0;
972 break;
973 }
974 }
975 else
976 continue; /* Equal qualifiers are certainly matched. */
977 }
978
979 /* Qualifiers established. */
980 if (found == 1)
981 break;
982 }
983
984 if (found == 1)
985 {
986 /* Fill the result in *RET. */
987 int j;
988 qualifiers = *qualifiers_list;
989
990 DEBUG_TRACE ("complete qualifiers using list %d", i);
991 #ifdef DEBUG_AARCH64
992 if (debug_dump)
993 dump_qualifier_sequence (qualifiers);
994 #endif
995
996 for (j = 0; j <= stop_at; ++j, ++qualifiers)
997 ret[j] = *qualifiers;
998 for (; j < AARCH64_MAX_OPND_NUM; ++j)
999 ret[j] = AARCH64_OPND_QLF_NIL;
1000
1001 DEBUG_TRACE ("SUCCESS");
1002 return 1;
1003 }
1004
1005 DEBUG_TRACE ("FAIL");
1006 return 0;
1007 }
1008
1009 /* Operand qualifier matching and resolving.
1010
1011 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1012 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1013
1014 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1015 succeeds. */
1016
1017 static int
1018 match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1019 {
1020 int i, nops;
1021 aarch64_opnd_qualifier_seq_t qualifiers;
1022
1023 if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1024 qualifiers))
1025 {
1026 DEBUG_TRACE ("matching FAIL");
1027 return 0;
1028 }
1029
1030 if (inst->opcode->flags & F_STRICT)
1031 {
1032 /* Require an exact qualifier match, even for NIL qualifiers. */
1033 nops = aarch64_num_of_operands (inst->opcode);
1034 for (i = 0; i < nops; ++i)
1035 if (inst->operands[i].qualifier != qualifiers[i])
1036 return FALSE;
1037 }
1038
1039 /* Update the qualifiers. */
1040 if (update_p == TRUE)
1041 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1042 {
1043 if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1044 break;
1045 DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1046 "update %s with %s for operand %d",
1047 aarch64_get_qualifier_name (inst->operands[i].qualifier),
1048 aarch64_get_qualifier_name (qualifiers[i]), i);
1049 inst->operands[i].qualifier = qualifiers[i];
1050 }
1051
1052 DEBUG_TRACE ("matching SUCCESS");
1053 return 1;
1054 }
1055
1056 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1057 register by MOVZ.
1058
1059 IS32 indicates whether value is a 32-bit immediate or not.
1060 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1061 amount will be returned in *SHIFT_AMOUNT. */
1062
1063 bfd_boolean
1064 aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1065 {
1066 int amount;
1067
1068 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1069
1070 if (is32)
1071 {
1072 /* Allow all zeros or all ones in top 32-bits, so that
1073 32-bit constant expressions like ~0x80000000 are
1074 permitted. */
1075 uint64_t ext = value;
1076 if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1077 /* Immediate out of range. */
1078 return FALSE;
1079 value &= (int64_t) 0xffffffff;
1080 }
1081
1082 /* first, try movz then movn */
1083 amount = -1;
1084 if ((value & ((int64_t) 0xffff << 0)) == value)
1085 amount = 0;
1086 else if ((value & ((int64_t) 0xffff << 16)) == value)
1087 amount = 16;
1088 else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1089 amount = 32;
1090 else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1091 amount = 48;
1092
1093 if (amount == -1)
1094 {
1095 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1096 return FALSE;
1097 }
1098
1099 if (shift_amount != NULL)
1100 *shift_amount = amount;
1101
1102 DEBUG_TRACE ("exit TRUE with amount %d", amount);
1103
1104 return TRUE;
1105 }
1106
1107 /* Build the accepted values for immediate logical SIMD instructions.
1108
1109 The standard encodings of the immediate value are:
1110 N imms immr SIMD size R S
1111 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1112 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1113 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1114 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1115 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1116 0 11110s 00000r 2 UInt(r) UInt(s)
1117 where all-ones value of S is reserved.
1118
1119 Let's call E the SIMD size.
1120
1121 The immediate value is: S+1 bits '1' rotated to the right by R.
1122
1123 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1124 (remember S != E - 1). */
1125
1126 #define TOTAL_IMM_NB 5334
1127
1128 typedef struct
1129 {
1130 uint64_t imm;
1131 aarch64_insn encoding;
1132 } simd_imm_encoding;
1133
1134 static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1135
1136 static int
1137 simd_imm_encoding_cmp(const void *i1, const void *i2)
1138 {
1139 const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1140 const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1141
1142 if (imm1->imm < imm2->imm)
1143 return -1;
1144 if (imm1->imm > imm2->imm)
1145 return +1;
1146 return 0;
1147 }
1148
1149 /* immediate bitfield standard encoding
1150 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1151 1 ssssss rrrrrr 64 rrrrrr ssssss
1152 0 0sssss 0rrrrr 32 rrrrr sssss
1153 0 10ssss 00rrrr 16 rrrr ssss
1154 0 110sss 000rrr 8 rrr sss
1155 0 1110ss 0000rr 4 rr ss
1156 0 11110s 00000r 2 r s */
1157 static inline int
1158 encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1159 {
1160 return (is64 << 12) | (r << 6) | s;
1161 }
1162
1163 static void
1164 build_immediate_table (void)
1165 {
1166 uint32_t log_e, e, s, r, s_mask;
1167 uint64_t mask, imm;
1168 int nb_imms;
1169 int is64;
1170
1171 nb_imms = 0;
1172 for (log_e = 1; log_e <= 6; log_e++)
1173 {
1174 /* Get element size. */
1175 e = 1u << log_e;
1176 if (log_e == 6)
1177 {
1178 is64 = 1;
1179 mask = 0xffffffffffffffffull;
1180 s_mask = 0;
1181 }
1182 else
1183 {
1184 is64 = 0;
1185 mask = (1ull << e) - 1;
1186 /* log_e s_mask
1187 1 ((1 << 4) - 1) << 2 = 111100
1188 2 ((1 << 3) - 1) << 3 = 111000
1189 3 ((1 << 2) - 1) << 4 = 110000
1190 4 ((1 << 1) - 1) << 5 = 100000
1191 5 ((1 << 0) - 1) << 6 = 000000 */
1192 s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1193 }
1194 for (s = 0; s < e - 1; s++)
1195 for (r = 0; r < e; r++)
1196 {
1197 /* s+1 consecutive bits to 1 (s < 63) */
1198 imm = (1ull << (s + 1)) - 1;
1199 /* rotate right by r */
1200 if (r != 0)
1201 imm = (imm >> r) | ((imm << (e - r)) & mask);
1202 /* replicate the constant depending on SIMD size */
1203 switch (log_e)
1204 {
1205 case 1: imm = (imm << 2) | imm;
1206 /* Fall through. */
1207 case 2: imm = (imm << 4) | imm;
1208 /* Fall through. */
1209 case 3: imm = (imm << 8) | imm;
1210 /* Fall through. */
1211 case 4: imm = (imm << 16) | imm;
1212 /* Fall through. */
1213 case 5: imm = (imm << 32) | imm;
1214 /* Fall through. */
1215 case 6: break;
1216 default: abort ();
1217 }
1218 simd_immediates[nb_imms].imm = imm;
1219 simd_immediates[nb_imms].encoding =
1220 encode_immediate_bitfield(is64, s | s_mask, r);
1221 nb_imms++;
1222 }
1223 }
1224 assert (nb_imms == TOTAL_IMM_NB);
1225 qsort(simd_immediates, nb_imms,
1226 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1227 }
1228
1229 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1230 be accepted by logical (immediate) instructions
1231 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1232
1233 ESIZE is the number of bytes in the decoded immediate value.
1234 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1235 VALUE will be returned in *ENCODING. */
1236
1237 bfd_boolean
1238 aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1239 {
1240 simd_imm_encoding imm_enc;
1241 const simd_imm_encoding *imm_encoding;
1242 static bfd_boolean initialized = FALSE;
1243 uint64_t upper;
1244 int i;
1245
1246 DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1247 value, esize);
1248
1249 if (!initialized)
1250 {
1251 build_immediate_table ();
1252 initialized = TRUE;
1253 }
1254
1255 /* Allow all zeros or all ones in top bits, so that
1256 constant expressions like ~1 are permitted. */
1257 upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1258 if ((value & ~upper) != value && (value | upper) != value)
1259 return FALSE;
1260
1261 /* Replicate to a full 64-bit value. */
1262 value &= ~upper;
1263 for (i = esize * 8; i < 64; i *= 2)
1264 value |= (value << i);
1265
1266 imm_enc.imm = value;
1267 imm_encoding = (const simd_imm_encoding *)
1268 bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1269 sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1270 if (imm_encoding == NULL)
1271 {
1272 DEBUG_TRACE ("exit with FALSE");
1273 return FALSE;
1274 }
1275 if (encoding != NULL)
1276 *encoding = imm_encoding->encoding;
1277 DEBUG_TRACE ("exit with TRUE");
1278 return TRUE;
1279 }
1280
1281 /* If 64-bit immediate IMM is in the format of
1282 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1283 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1284 of value "abcdefgh". Otherwise return -1. */
1285 int
1286 aarch64_shrink_expanded_imm8 (uint64_t imm)
1287 {
1288 int i, ret;
1289 uint32_t byte;
1290
1291 ret = 0;
1292 for (i = 0; i < 8; i++)
1293 {
1294 byte = (imm >> (8 * i)) & 0xff;
1295 if (byte == 0xff)
1296 ret |= 1 << i;
1297 else if (byte != 0x00)
1298 return -1;
1299 }
1300 return ret;
1301 }
1302
1303 /* Utility inline functions for operand_general_constraint_met_p. */
1304
1305 static inline void
1306 set_error (aarch64_operand_error *mismatch_detail,
1307 enum aarch64_operand_error_kind kind, int idx,
1308 const char* error)
1309 {
1310 if (mismatch_detail == NULL)
1311 return;
1312 mismatch_detail->kind = kind;
1313 mismatch_detail->index = idx;
1314 mismatch_detail->error = error;
1315 }
1316
1317 static inline void
1318 set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1319 const char* error)
1320 {
1321 if (mismatch_detail == NULL)
1322 return;
1323 set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1324 }
1325
1326 static inline void
1327 set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1328 int idx, int lower_bound, int upper_bound,
1329 const char* error)
1330 {
1331 if (mismatch_detail == NULL)
1332 return;
1333 set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1334 mismatch_detail->data[0] = lower_bound;
1335 mismatch_detail->data[1] = upper_bound;
1336 }
1337
1338 static inline void
1339 set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1340 int idx, int lower_bound, int upper_bound)
1341 {
1342 if (mismatch_detail == NULL)
1343 return;
1344 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1345 _("immediate value"));
1346 }
1347
1348 static inline void
1349 set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1350 int idx, int lower_bound, int upper_bound)
1351 {
1352 if (mismatch_detail == NULL)
1353 return;
1354 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1355 _("immediate offset"));
1356 }
1357
1358 static inline void
1359 set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1360 int idx, int lower_bound, int upper_bound)
1361 {
1362 if (mismatch_detail == NULL)
1363 return;
1364 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1365 _("register number"));
1366 }
1367
1368 static inline void
1369 set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1370 int idx, int lower_bound, int upper_bound)
1371 {
1372 if (mismatch_detail == NULL)
1373 return;
1374 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1375 _("register element index"));
1376 }
1377
1378 static inline void
1379 set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1380 int idx, int lower_bound, int upper_bound)
1381 {
1382 if (mismatch_detail == NULL)
1383 return;
1384 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1385 _("shift amount"));
1386 }
1387
1388 /* Report that the MUL modifier in operand IDX should be in the range
1389 [LOWER_BOUND, UPPER_BOUND]. */
1390 static inline void
1391 set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1392 int idx, int lower_bound, int upper_bound)
1393 {
1394 if (mismatch_detail == NULL)
1395 return;
1396 set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1397 _("multiplier"));
1398 }
1399
1400 static inline void
1401 set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1402 int alignment)
1403 {
1404 if (mismatch_detail == NULL)
1405 return;
1406 set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1407 mismatch_detail->data[0] = alignment;
1408 }
1409
1410 static inline void
1411 set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1412 int expected_num)
1413 {
1414 if (mismatch_detail == NULL)
1415 return;
1416 set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1417 mismatch_detail->data[0] = expected_num;
1418 }
1419
1420 static inline void
1421 set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1422 const char* error)
1423 {
1424 if (mismatch_detail == NULL)
1425 return;
1426 set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1427 }
1428
1429 /* General constraint checking based on operand code.
1430
1431 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1432 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1433
1434 This function has to be called after the qualifiers for all operands
1435 have been resolved.
1436
1437 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1438 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1439 of error message during the disassembling where error message is not
1440 wanted. We avoid the dynamic construction of strings of error messages
1441 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1442 use a combination of error code, static string and some integer data to
1443 represent an error. */
1444
1445 static int
1446 operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1447 enum aarch64_opnd type,
1448 const aarch64_opcode *opcode,
1449 aarch64_operand_error *mismatch_detail)
1450 {
1451 unsigned num, modifiers, shift;
1452 unsigned char size;
1453 int64_t imm, min_value, max_value;
1454 uint64_t uvalue, mask;
1455 const aarch64_opnd_info *opnd = opnds + idx;
1456 aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1457
1458 assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1459
1460 switch (aarch64_operands[type].op_class)
1461 {
1462 case AARCH64_OPND_CLASS_INT_REG:
1463 /* Check pair reg constraints for cas* instructions. */
1464 if (type == AARCH64_OPND_PAIRREG)
1465 {
1466 assert (idx == 1 || idx == 3);
1467 if (opnds[idx - 1].reg.regno % 2 != 0)
1468 {
1469 set_syntax_error (mismatch_detail, idx - 1,
1470 _("reg pair must start from even reg"));
1471 return 0;
1472 }
1473 if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1474 {
1475 set_syntax_error (mismatch_detail, idx,
1476 _("reg pair must be contiguous"));
1477 return 0;
1478 }
1479 break;
1480 }
1481
1482 /* <Xt> may be optional in some IC and TLBI instructions. */
1483 if (type == AARCH64_OPND_Rt_SYS)
1484 {
1485 assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1486 == AARCH64_OPND_CLASS_SYSTEM));
1487 if (opnds[1].present
1488 && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1489 {
1490 set_other_error (mismatch_detail, idx, _("extraneous register"));
1491 return 0;
1492 }
1493 if (!opnds[1].present
1494 && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1495 {
1496 set_other_error (mismatch_detail, idx, _("missing register"));
1497 return 0;
1498 }
1499 }
1500 switch (qualifier)
1501 {
1502 case AARCH64_OPND_QLF_WSP:
1503 case AARCH64_OPND_QLF_SP:
1504 if (!aarch64_stack_pointer_p (opnd))
1505 {
1506 set_other_error (mismatch_detail, idx,
1507 _("stack pointer register expected"));
1508 return 0;
1509 }
1510 break;
1511 default:
1512 break;
1513 }
1514 break;
1515
1516 case AARCH64_OPND_CLASS_SVE_REG:
1517 switch (type)
1518 {
1519 case AARCH64_OPND_SVE_Zm3_INDEX:
1520 case AARCH64_OPND_SVE_Zm3_22_INDEX:
1521 case AARCH64_OPND_SVE_Zm3_11_INDEX:
1522 case AARCH64_OPND_SVE_Zm4_INDEX:
1523 size = get_operand_fields_width (get_operand_from_code (type));
1524 shift = get_operand_specific_data (&aarch64_operands[type]);
1525 mask = (1 << shift) - 1;
1526 if (opnd->reg.regno > mask)
1527 {
1528 assert (mask == 7 || mask == 15);
1529 set_other_error (mismatch_detail, idx,
1530 mask == 15
1531 ? _("z0-z15 expected")
1532 : _("z0-z7 expected"));
1533 return 0;
1534 }
1535 mask = (1 << (size - shift)) - 1;
1536 if (!value_in_range_p (opnd->reglane.index, 0, mask))
1537 {
1538 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1539 return 0;
1540 }
1541 break;
1542
1543 case AARCH64_OPND_SVE_Zn_INDEX:
1544 size = aarch64_get_qualifier_esize (opnd->qualifier);
1545 if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1546 {
1547 set_elem_idx_out_of_range_error (mismatch_detail, idx,
1548 0, 64 / size - 1);
1549 return 0;
1550 }
1551 break;
1552
1553 case AARCH64_OPND_SVE_ZnxN:
1554 case AARCH64_OPND_SVE_ZtxN:
1555 if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1556 {
1557 set_other_error (mismatch_detail, idx,
1558 _("invalid register list"));
1559 return 0;
1560 }
1561 break;
1562
1563 default:
1564 break;
1565 }
1566 break;
1567
1568 case AARCH64_OPND_CLASS_PRED_REG:
1569 if (opnd->reg.regno >= 8
1570 && get_operand_fields_width (get_operand_from_code (type)) == 3)
1571 {
1572 set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1573 return 0;
1574 }
1575 break;
1576
1577 case AARCH64_OPND_CLASS_COND:
1578 if (type == AARCH64_OPND_COND1
1579 && (opnds[idx].cond->value & 0xe) == 0xe)
1580 {
1581 /* Not allow AL or NV. */
1582 set_syntax_error (mismatch_detail, idx, NULL);
1583 }
1584 break;
1585
1586 case AARCH64_OPND_CLASS_ADDRESS:
1587 /* Check writeback. */
1588 switch (opcode->iclass)
1589 {
1590 case ldst_pos:
1591 case ldst_unscaled:
1592 case ldstnapair_offs:
1593 case ldstpair_off:
1594 case ldst_unpriv:
1595 if (opnd->addr.writeback == 1)
1596 {
1597 set_syntax_error (mismatch_detail, idx,
1598 _("unexpected address writeback"));
1599 return 0;
1600 }
1601 break;
1602 case ldst_imm10:
1603 if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1604 {
1605 set_syntax_error (mismatch_detail, idx,
1606 _("unexpected address writeback"));
1607 return 0;
1608 }
1609 break;
1610 case ldst_imm9:
1611 case ldstpair_indexed:
1612 case asisdlsep:
1613 case asisdlsop:
1614 if (opnd->addr.writeback == 0)
1615 {
1616 set_syntax_error (mismatch_detail, idx,
1617 _("address writeback expected"));
1618 return 0;
1619 }
1620 break;
1621 default:
1622 assert (opnd->addr.writeback == 0);
1623 break;
1624 }
1625 switch (type)
1626 {
1627 case AARCH64_OPND_ADDR_SIMM7:
1628 /* Scaled signed 7 bits immediate offset. */
1629 /* Get the size of the data element that is accessed, which may be
1630 different from that of the source register size,
1631 e.g. in strb/ldrb. */
1632 size = aarch64_get_qualifier_esize (opnd->qualifier);
1633 if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1634 {
1635 set_offset_out_of_range_error (mismatch_detail, idx,
1636 -64 * size, 63 * size);
1637 return 0;
1638 }
1639 if (!value_aligned_p (opnd->addr.offset.imm, size))
1640 {
1641 set_unaligned_error (mismatch_detail, idx, size);
1642 return 0;
1643 }
1644 break;
1645 case AARCH64_OPND_ADDR_OFFSET:
1646 case AARCH64_OPND_ADDR_SIMM9:
1647 /* Unscaled signed 9 bits immediate offset. */
1648 if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1649 {
1650 set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1651 return 0;
1652 }
1653 break;
1654
1655 case AARCH64_OPND_ADDR_SIMM9_2:
1656 /* Unscaled signed 9 bits immediate offset, which has to be negative
1657 or unaligned. */
1658 size = aarch64_get_qualifier_esize (qualifier);
1659 if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1660 && !value_aligned_p (opnd->addr.offset.imm, size))
1661 || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1662 return 1;
1663 set_other_error (mismatch_detail, idx,
1664 _("negative or unaligned offset expected"));
1665 return 0;
1666
1667 case AARCH64_OPND_ADDR_SIMM10:
1668 /* Scaled signed 10 bits immediate offset. */
1669 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1670 {
1671 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1672 return 0;
1673 }
1674 if (!value_aligned_p (opnd->addr.offset.imm, 8))
1675 {
1676 set_unaligned_error (mismatch_detail, idx, 8);
1677 return 0;
1678 }
1679 break;
1680
1681 case AARCH64_OPND_ADDR_SIMM11:
1682 /* Signed 11 bits immediate offset (multiple of 16). */
1683 if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1684 {
1685 set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1686 return 0;
1687 }
1688
1689 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1690 {
1691 set_unaligned_error (mismatch_detail, idx, 16);
1692 return 0;
1693 }
1694 break;
1695
1696 case AARCH64_OPND_ADDR_SIMM13:
1697 /* Signed 13 bits immediate offset (multiple of 16). */
1698 if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1699 {
1700 set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1701 return 0;
1702 }
1703
1704 if (!value_aligned_p (opnd->addr.offset.imm, 16))
1705 {
1706 set_unaligned_error (mismatch_detail, idx, 16);
1707 return 0;
1708 }
1709 break;
1710
1711 case AARCH64_OPND_SIMD_ADDR_POST:
1712 /* AdvSIMD load/store multiple structures, post-index. */
1713 assert (idx == 1);
1714 if (opnd->addr.offset.is_reg)
1715 {
1716 if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1717 return 1;
1718 else
1719 {
1720 set_other_error (mismatch_detail, idx,
1721 _("invalid register offset"));
1722 return 0;
1723 }
1724 }
1725 else
1726 {
1727 const aarch64_opnd_info *prev = &opnds[idx-1];
1728 unsigned num_bytes; /* total number of bytes transferred. */
1729 /* The opcode dependent area stores the number of elements in
1730 each structure to be loaded/stored. */
1731 int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1732 if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1733 /* Special handling of loading single structure to all lane. */
1734 num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1735 * aarch64_get_qualifier_esize (prev->qualifier);
1736 else
1737 num_bytes = prev->reglist.num_regs
1738 * aarch64_get_qualifier_esize (prev->qualifier)
1739 * aarch64_get_qualifier_nelem (prev->qualifier);
1740 if ((int) num_bytes != opnd->addr.offset.imm)
1741 {
1742 set_other_error (mismatch_detail, idx,
1743 _("invalid post-increment amount"));
1744 return 0;
1745 }
1746 }
1747 break;
1748
1749 case AARCH64_OPND_ADDR_REGOFF:
1750 /* Get the size of the data element that is accessed, which may be
1751 different from that of the source register size,
1752 e.g. in strb/ldrb. */
1753 size = aarch64_get_qualifier_esize (opnd->qualifier);
1754 /* It is either no shift or shift by the binary logarithm of SIZE. */
1755 if (opnd->shifter.amount != 0
1756 && opnd->shifter.amount != (int)get_logsz (size))
1757 {
1758 set_other_error (mismatch_detail, idx,
1759 _("invalid shift amount"));
1760 return 0;
1761 }
1762 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1763 operators. */
1764 switch (opnd->shifter.kind)
1765 {
1766 case AARCH64_MOD_UXTW:
1767 case AARCH64_MOD_LSL:
1768 case AARCH64_MOD_SXTW:
1769 case AARCH64_MOD_SXTX: break;
1770 default:
1771 set_other_error (mismatch_detail, idx,
1772 _("invalid extend/shift operator"));
1773 return 0;
1774 }
1775 break;
1776
1777 case AARCH64_OPND_ADDR_UIMM12:
1778 imm = opnd->addr.offset.imm;
1779 /* Get the size of the data element that is accessed, which may be
1780 different from that of the source register size,
1781 e.g. in strb/ldrb. */
1782 size = aarch64_get_qualifier_esize (qualifier);
1783 if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1784 {
1785 set_offset_out_of_range_error (mismatch_detail, idx,
1786 0, 4095 * size);
1787 return 0;
1788 }
1789 if (!value_aligned_p (opnd->addr.offset.imm, size))
1790 {
1791 set_unaligned_error (mismatch_detail, idx, size);
1792 return 0;
1793 }
1794 break;
1795
1796 case AARCH64_OPND_ADDR_PCREL14:
1797 case AARCH64_OPND_ADDR_PCREL19:
1798 case AARCH64_OPND_ADDR_PCREL21:
1799 case AARCH64_OPND_ADDR_PCREL26:
1800 imm = opnd->imm.value;
1801 if (operand_need_shift_by_two (get_operand_from_code (type)))
1802 {
1803 /* The offset value in a PC-relative branch instruction is alway
1804 4-byte aligned and is encoded without the lowest 2 bits. */
1805 if (!value_aligned_p (imm, 4))
1806 {
1807 set_unaligned_error (mismatch_detail, idx, 4);
1808 return 0;
1809 }
1810 /* Right shift by 2 so that we can carry out the following check
1811 canonically. */
1812 imm >>= 2;
1813 }
1814 size = get_operand_fields_width (get_operand_from_code (type));
1815 if (!value_fit_signed_field_p (imm, size))
1816 {
1817 set_other_error (mismatch_detail, idx,
1818 _("immediate out of range"));
1819 return 0;
1820 }
1821 break;
1822
1823 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1824 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1825 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1826 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1827 min_value = -8;
1828 max_value = 7;
1829 sve_imm_offset_vl:
1830 assert (!opnd->addr.offset.is_reg);
1831 assert (opnd->addr.preind);
1832 num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1833 min_value *= num;
1834 max_value *= num;
1835 if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1836 || (opnd->shifter.operator_present
1837 && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1838 {
1839 set_other_error (mismatch_detail, idx,
1840 _("invalid addressing mode"));
1841 return 0;
1842 }
1843 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1844 {
1845 set_offset_out_of_range_error (mismatch_detail, idx,
1846 min_value, max_value);
1847 return 0;
1848 }
1849 if (!value_aligned_p (opnd->addr.offset.imm, num))
1850 {
1851 set_unaligned_error (mismatch_detail, idx, num);
1852 return 0;
1853 }
1854 break;
1855
1856 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1857 min_value = -32;
1858 max_value = 31;
1859 goto sve_imm_offset_vl;
1860
1861 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1862 min_value = -256;
1863 max_value = 255;
1864 goto sve_imm_offset_vl;
1865
1866 case AARCH64_OPND_SVE_ADDR_RI_U6:
1867 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1868 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1869 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1870 min_value = 0;
1871 max_value = 63;
1872 sve_imm_offset:
1873 assert (!opnd->addr.offset.is_reg);
1874 assert (opnd->addr.preind);
1875 num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1876 min_value *= num;
1877 max_value *= num;
1878 if (opnd->shifter.operator_present
1879 || opnd->shifter.amount_present)
1880 {
1881 set_other_error (mismatch_detail, idx,
1882 _("invalid addressing mode"));
1883 return 0;
1884 }
1885 if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1886 {
1887 set_offset_out_of_range_error (mismatch_detail, idx,
1888 min_value, max_value);
1889 return 0;
1890 }
1891 if (!value_aligned_p (opnd->addr.offset.imm, num))
1892 {
1893 set_unaligned_error (mismatch_detail, idx, num);
1894 return 0;
1895 }
1896 break;
1897
1898 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1899 min_value = -8;
1900 max_value = 7;
1901 goto sve_imm_offset;
1902
1903 case AARCH64_OPND_SVE_ADDR_ZX:
1904 /* Everything is already ensured by parse_operands or
1905 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1906 argument type). */
1907 assert (opnd->addr.offset.is_reg);
1908 assert (opnd->addr.preind);
1909 assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1910 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1911 assert (opnd->shifter.operator_present == 0);
1912 break;
1913
1914 case AARCH64_OPND_SVE_ADDR_R:
1915 case AARCH64_OPND_SVE_ADDR_RR:
1916 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1917 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1918 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1919 case AARCH64_OPND_SVE_ADDR_RX:
1920 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1921 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1922 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1923 case AARCH64_OPND_SVE_ADDR_RZ:
1924 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1925 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1926 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1927 modifiers = 1 << AARCH64_MOD_LSL;
1928 sve_rr_operand:
1929 assert (opnd->addr.offset.is_reg);
1930 assert (opnd->addr.preind);
1931 if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1932 && opnd->addr.offset.regno == 31)
1933 {
1934 set_other_error (mismatch_detail, idx,
1935 _("index register xzr is not allowed"));
1936 return 0;
1937 }
1938 if (((1 << opnd->shifter.kind) & modifiers) == 0
1939 || (opnd->shifter.amount
1940 != get_operand_specific_data (&aarch64_operands[type])))
1941 {
1942 set_other_error (mismatch_detail, idx,
1943 _("invalid addressing mode"));
1944 return 0;
1945 }
1946 break;
1947
1948 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1949 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1950 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1951 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1952 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1953 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1954 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1955 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1956 modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1957 goto sve_rr_operand;
1958
1959 case AARCH64_OPND_SVE_ADDR_ZI_U5:
1960 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1961 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1962 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1963 min_value = 0;
1964 max_value = 31;
1965 goto sve_imm_offset;
1966
1967 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1968 modifiers = 1 << AARCH64_MOD_LSL;
1969 sve_zz_operand:
1970 assert (opnd->addr.offset.is_reg);
1971 assert (opnd->addr.preind);
1972 if (((1 << opnd->shifter.kind) & modifiers) == 0
1973 || opnd->shifter.amount < 0
1974 || opnd->shifter.amount > 3)
1975 {
1976 set_other_error (mismatch_detail, idx,
1977 _("invalid addressing mode"));
1978 return 0;
1979 }
1980 break;
1981
1982 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1983 modifiers = (1 << AARCH64_MOD_SXTW);
1984 goto sve_zz_operand;
1985
1986 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1987 modifiers = 1 << AARCH64_MOD_UXTW;
1988 goto sve_zz_operand;
1989
1990 default:
1991 break;
1992 }
1993 break;
1994
1995 case AARCH64_OPND_CLASS_SIMD_REGLIST:
1996 if (type == AARCH64_OPND_LEt)
1997 {
1998 /* Get the upper bound for the element index. */
1999 num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2000 if (!value_in_range_p (opnd->reglist.index, 0, num))
2001 {
2002 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2003 return 0;
2004 }
2005 }
2006 /* The opcode dependent area stores the number of elements in
2007 each structure to be loaded/stored. */
2008 num = get_opcode_dependent_value (opcode);
2009 switch (type)
2010 {
2011 case AARCH64_OPND_LVt:
2012 assert (num >= 1 && num <= 4);
2013 /* Unless LD1/ST1, the number of registers should be equal to that
2014 of the structure elements. */
2015 if (num != 1 && opnd->reglist.num_regs != num)
2016 {
2017 set_reg_list_error (mismatch_detail, idx, num);
2018 return 0;
2019 }
2020 break;
2021 case AARCH64_OPND_LVt_AL:
2022 case AARCH64_OPND_LEt:
2023 assert (num >= 1 && num <= 4);
2024 /* The number of registers should be equal to that of the structure
2025 elements. */
2026 if (opnd->reglist.num_regs != num)
2027 {
2028 set_reg_list_error (mismatch_detail, idx, num);
2029 return 0;
2030 }
2031 break;
2032 default:
2033 break;
2034 }
2035 break;
2036
2037 case AARCH64_OPND_CLASS_IMMEDIATE:
2038 /* Constraint check on immediate operand. */
2039 imm = opnd->imm.value;
2040 /* E.g. imm_0_31 constrains value to be 0..31. */
2041 if (qualifier_value_in_range_constraint_p (qualifier)
2042 && !value_in_range_p (imm, get_lower_bound (qualifier),
2043 get_upper_bound (qualifier)))
2044 {
2045 set_imm_out_of_range_error (mismatch_detail, idx,
2046 get_lower_bound (qualifier),
2047 get_upper_bound (qualifier));
2048 return 0;
2049 }
2050
2051 switch (type)
2052 {
2053 case AARCH64_OPND_AIMM:
2054 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2055 {
2056 set_other_error (mismatch_detail, idx,
2057 _("invalid shift operator"));
2058 return 0;
2059 }
2060 if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2061 {
2062 set_other_error (mismatch_detail, idx,
2063 _("shift amount must be 0 or 12"));
2064 return 0;
2065 }
2066 if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2067 {
2068 set_other_error (mismatch_detail, idx,
2069 _("immediate out of range"));
2070 return 0;
2071 }
2072 break;
2073
2074 case AARCH64_OPND_HALF:
2075 assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2076 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2077 {
2078 set_other_error (mismatch_detail, idx,
2079 _("invalid shift operator"));
2080 return 0;
2081 }
2082 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2083 if (!value_aligned_p (opnd->shifter.amount, 16))
2084 {
2085 set_other_error (mismatch_detail, idx,
2086 _("shift amount must be a multiple of 16"));
2087 return 0;
2088 }
2089 if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2090 {
2091 set_sft_amount_out_of_range_error (mismatch_detail, idx,
2092 0, size * 8 - 16);
2093 return 0;
2094 }
2095 if (opnd->imm.value < 0)
2096 {
2097 set_other_error (mismatch_detail, idx,
2098 _("negative immediate value not allowed"));
2099 return 0;
2100 }
2101 if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2102 {
2103 set_other_error (mismatch_detail, idx,
2104 _("immediate out of range"));
2105 return 0;
2106 }
2107 break;
2108
2109 case AARCH64_OPND_IMM_MOV:
2110 {
2111 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2112 imm = opnd->imm.value;
2113 assert (idx == 1);
2114 switch (opcode->op)
2115 {
2116 case OP_MOV_IMM_WIDEN:
2117 imm = ~imm;
2118 /* Fall through. */
2119 case OP_MOV_IMM_WIDE:
2120 if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2121 {
2122 set_other_error (mismatch_detail, idx,
2123 _("immediate out of range"));
2124 return 0;
2125 }
2126 break;
2127 case OP_MOV_IMM_LOG:
2128 if (!aarch64_logical_immediate_p (imm, esize, NULL))
2129 {
2130 set_other_error (mismatch_detail, idx,
2131 _("immediate out of range"));
2132 return 0;
2133 }
2134 break;
2135 default:
2136 assert (0);
2137 return 0;
2138 }
2139 }
2140 break;
2141
2142 case AARCH64_OPND_NZCV:
2143 case AARCH64_OPND_CCMP_IMM:
2144 case AARCH64_OPND_EXCEPTION:
2145 case AARCH64_OPND_TME_UIMM16:
2146 case AARCH64_OPND_UIMM4:
2147 case AARCH64_OPND_UIMM4_ADDG:
2148 case AARCH64_OPND_UIMM7:
2149 case AARCH64_OPND_UIMM3_OP1:
2150 case AARCH64_OPND_UIMM3_OP2:
2151 case AARCH64_OPND_SVE_UIMM3:
2152 case AARCH64_OPND_SVE_UIMM7:
2153 case AARCH64_OPND_SVE_UIMM8:
2154 case AARCH64_OPND_SVE_UIMM8_53:
2155 size = get_operand_fields_width (get_operand_from_code (type));
2156 assert (size < 32);
2157 if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2158 {
2159 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2160 (1 << size) - 1);
2161 return 0;
2162 }
2163 break;
2164
2165 case AARCH64_OPND_UIMM10:
2166 /* Scaled unsigned 10 bits immediate offset. */
2167 if (!value_in_range_p (opnd->imm.value, 0, 1008))
2168 {
2169 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2170 return 0;
2171 }
2172
2173 if (!value_aligned_p (opnd->imm.value, 16))
2174 {
2175 set_unaligned_error (mismatch_detail, idx, 16);
2176 return 0;
2177 }
2178 break;
2179
2180 case AARCH64_OPND_SIMM5:
2181 case AARCH64_OPND_SVE_SIMM5:
2182 case AARCH64_OPND_SVE_SIMM5B:
2183 case AARCH64_OPND_SVE_SIMM6:
2184 case AARCH64_OPND_SVE_SIMM8:
2185 size = get_operand_fields_width (get_operand_from_code (type));
2186 assert (size < 32);
2187 if (!value_fit_signed_field_p (opnd->imm.value, size))
2188 {
2189 set_imm_out_of_range_error (mismatch_detail, idx,
2190 -(1 << (size - 1)),
2191 (1 << (size - 1)) - 1);
2192 return 0;
2193 }
2194 break;
2195
2196 case AARCH64_OPND_WIDTH:
2197 assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2198 && opnds[0].type == AARCH64_OPND_Rd);
2199 size = get_upper_bound (qualifier);
2200 if (opnd->imm.value + opnds[idx-1].imm.value > size)
2201 /* lsb+width <= reg.size */
2202 {
2203 set_imm_out_of_range_error (mismatch_detail, idx, 1,
2204 size - opnds[idx-1].imm.value);
2205 return 0;
2206 }
2207 break;
2208
2209 case AARCH64_OPND_LIMM:
2210 case AARCH64_OPND_SVE_LIMM:
2211 {
2212 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2213 uint64_t uimm = opnd->imm.value;
2214 if (opcode->op == OP_BIC)
2215 uimm = ~uimm;
2216 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2217 {
2218 set_other_error (mismatch_detail, idx,
2219 _("immediate out of range"));
2220 return 0;
2221 }
2222 }
2223 break;
2224
2225 case AARCH64_OPND_IMM0:
2226 case AARCH64_OPND_FPIMM0:
2227 if (opnd->imm.value != 0)
2228 {
2229 set_other_error (mismatch_detail, idx,
2230 _("immediate zero expected"));
2231 return 0;
2232 }
2233 break;
2234
2235 case AARCH64_OPND_IMM_ROT1:
2236 case AARCH64_OPND_IMM_ROT2:
2237 case AARCH64_OPND_SVE_IMM_ROT2:
2238 if (opnd->imm.value != 0
2239 && opnd->imm.value != 90
2240 && opnd->imm.value != 180
2241 && opnd->imm.value != 270)
2242 {
2243 set_other_error (mismatch_detail, idx,
2244 _("rotate expected to be 0, 90, 180 or 270"));
2245 return 0;
2246 }
2247 break;
2248
2249 case AARCH64_OPND_IMM_ROT3:
2250 case AARCH64_OPND_SVE_IMM_ROT1:
2251 case AARCH64_OPND_SVE_IMM_ROT3:
2252 if (opnd->imm.value != 90 && opnd->imm.value != 270)
2253 {
2254 set_other_error (mismatch_detail, idx,
2255 _("rotate expected to be 90 or 270"));
2256 return 0;
2257 }
2258 break;
2259
2260 case AARCH64_OPND_SHLL_IMM:
2261 assert (idx == 2);
2262 size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2263 if (opnd->imm.value != size)
2264 {
2265 set_other_error (mismatch_detail, idx,
2266 _("invalid shift amount"));
2267 return 0;
2268 }
2269 break;
2270
2271 case AARCH64_OPND_IMM_VLSL:
2272 size = aarch64_get_qualifier_esize (qualifier);
2273 if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2274 {
2275 set_imm_out_of_range_error (mismatch_detail, idx, 0,
2276 size * 8 - 1);
2277 return 0;
2278 }
2279 break;
2280
2281 case AARCH64_OPND_IMM_VLSR:
2282 size = aarch64_get_qualifier_esize (qualifier);
2283 if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2284 {
2285 set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2286 return 0;
2287 }
2288 break;
2289
2290 case AARCH64_OPND_SIMD_IMM:
2291 case AARCH64_OPND_SIMD_IMM_SFT:
2292 /* Qualifier check. */
2293 switch (qualifier)
2294 {
2295 case AARCH64_OPND_QLF_LSL:
2296 if (opnd->shifter.kind != AARCH64_MOD_LSL)
2297 {
2298 set_other_error (mismatch_detail, idx,
2299 _("invalid shift operator"));
2300 return 0;
2301 }
2302 break;
2303 case AARCH64_OPND_QLF_MSL:
2304 if (opnd->shifter.kind != AARCH64_MOD_MSL)
2305 {
2306 set_other_error (mismatch_detail, idx,
2307 _("invalid shift operator"));
2308 return 0;
2309 }
2310 break;
2311 case AARCH64_OPND_QLF_NIL:
2312 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2313 {
2314 set_other_error (mismatch_detail, idx,
2315 _("shift is not permitted"));
2316 return 0;
2317 }
2318 break;
2319 default:
2320 assert (0);
2321 return 0;
2322 }
2323 /* Is the immediate valid? */
2324 assert (idx == 1);
2325 if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2326 {
2327 /* uimm8 or simm8 */
2328 if (!value_in_range_p (opnd->imm.value, -128, 255))
2329 {
2330 set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2331 return 0;
2332 }
2333 }
2334 else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2335 {
2336 /* uimm64 is not
2337 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2338 ffffffffgggggggghhhhhhhh'. */
2339 set_other_error (mismatch_detail, idx,
2340 _("invalid value for immediate"));
2341 return 0;
2342 }
2343 /* Is the shift amount valid? */
2344 switch (opnd->shifter.kind)
2345 {
2346 case AARCH64_MOD_LSL:
2347 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2348 if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2349 {
2350 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2351 (size - 1) * 8);
2352 return 0;
2353 }
2354 if (!value_aligned_p (opnd->shifter.amount, 8))
2355 {
2356 set_unaligned_error (mismatch_detail, idx, 8);
2357 return 0;
2358 }
2359 break;
2360 case AARCH64_MOD_MSL:
2361 /* Only 8 and 16 are valid shift amount. */
2362 if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2363 {
2364 set_other_error (mismatch_detail, idx,
2365 _("shift amount must be 0 or 16"));
2366 return 0;
2367 }
2368 break;
2369 default:
2370 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2371 {
2372 set_other_error (mismatch_detail, idx,
2373 _("invalid shift operator"));
2374 return 0;
2375 }
2376 break;
2377 }
2378 break;
2379
2380 case AARCH64_OPND_FPIMM:
2381 case AARCH64_OPND_SIMD_FPIMM:
2382 case AARCH64_OPND_SVE_FPIMM8:
2383 if (opnd->imm.is_fp == 0)
2384 {
2385 set_other_error (mismatch_detail, idx,
2386 _("floating-point immediate expected"));
2387 return 0;
2388 }
2389 /* The value is expected to be an 8-bit floating-point constant with
2390 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2391 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2392 instruction). */
2393 if (!value_in_range_p (opnd->imm.value, 0, 255))
2394 {
2395 set_other_error (mismatch_detail, idx,
2396 _("immediate out of range"));
2397 return 0;
2398 }
2399 if (opnd->shifter.kind != AARCH64_MOD_NONE)
2400 {
2401 set_other_error (mismatch_detail, idx,
2402 _("invalid shift operator"));
2403 return 0;
2404 }
2405 break;
2406
2407 case AARCH64_OPND_SVE_AIMM:
2408 min_value = 0;
2409 sve_aimm:
2410 assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2411 size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2412 mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2413 uvalue = opnd->imm.value;
2414 shift = opnd->shifter.amount;
2415 if (size == 1)
2416 {
2417 if (shift != 0)
2418 {
2419 set_other_error (mismatch_detail, idx,
2420 _("no shift amount allowed for"
2421 " 8-bit constants"));
2422 return 0;
2423 }
2424 }
2425 else
2426 {
2427 if (shift != 0 && shift != 8)
2428 {
2429 set_other_error (mismatch_detail, idx,
2430 _("shift amount must be 0 or 8"));
2431 return 0;
2432 }
2433 if (shift == 0 && (uvalue & 0xff) == 0)
2434 {
2435 shift = 8;
2436 uvalue = (int64_t) uvalue / 256;
2437 }
2438 }
2439 mask >>= shift;
2440 if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2441 {
2442 set_other_error (mismatch_detail, idx,
2443 _("immediate too big for element size"));
2444 return 0;
2445 }
2446 uvalue = (uvalue - min_value) & mask;
2447 if (uvalue > 0xff)
2448 {
2449 set_other_error (mismatch_detail, idx,
2450 _("invalid arithmetic immediate"));
2451 return 0;
2452 }
2453 break;
2454
2455 case AARCH64_OPND_SVE_ASIMM:
2456 min_value = -128;
2457 goto sve_aimm;
2458
2459 case AARCH64_OPND_SVE_I1_HALF_ONE:
2460 assert (opnd->imm.is_fp);
2461 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2462 {
2463 set_other_error (mismatch_detail, idx,
2464 _("floating-point value must be 0.5 or 1.0"));
2465 return 0;
2466 }
2467 break;
2468
2469 case AARCH64_OPND_SVE_I1_HALF_TWO:
2470 assert (opnd->imm.is_fp);
2471 if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2472 {
2473 set_other_error (mismatch_detail, idx,
2474 _("floating-point value must be 0.5 or 2.0"));
2475 return 0;
2476 }
2477 break;
2478
2479 case AARCH64_OPND_SVE_I1_ZERO_ONE:
2480 assert (opnd->imm.is_fp);
2481 if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2482 {
2483 set_other_error (mismatch_detail, idx,
2484 _("floating-point value must be 0.0 or 1.0"));
2485 return 0;
2486 }
2487 break;
2488
2489 case AARCH64_OPND_SVE_INV_LIMM:
2490 {
2491 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2492 uint64_t uimm = ~opnd->imm.value;
2493 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2494 {
2495 set_other_error (mismatch_detail, idx,
2496 _("immediate out of range"));
2497 return 0;
2498 }
2499 }
2500 break;
2501
2502 case AARCH64_OPND_SVE_LIMM_MOV:
2503 {
2504 int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2505 uint64_t uimm = opnd->imm.value;
2506 if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2507 {
2508 set_other_error (mismatch_detail, idx,
2509 _("immediate out of range"));
2510 return 0;
2511 }
2512 if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2513 {
2514 set_other_error (mismatch_detail, idx,
2515 _("invalid replicated MOV immediate"));
2516 return 0;
2517 }
2518 }
2519 break;
2520
2521 case AARCH64_OPND_SVE_PATTERN_SCALED:
2522 assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2523 if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2524 {
2525 set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2526 return 0;
2527 }
2528 break;
2529
2530 case AARCH64_OPND_SVE_SHLIMM_PRED:
2531 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2532 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2533 if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2534 {
2535 set_imm_out_of_range_error (mismatch_detail, idx,
2536 0, 8 * size - 1);
2537 return 0;
2538 }
2539 break;
2540
2541 case AARCH64_OPND_SVE_SHRIMM_PRED:
2542 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2543 size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2544 if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2545 {
2546 set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2547 return 0;
2548 }
2549 break;
2550
2551 default:
2552 break;
2553 }
2554 break;
2555
2556 case AARCH64_OPND_CLASS_SYSTEM:
2557 switch (type)
2558 {
2559 case AARCH64_OPND_PSTATEFIELD:
2560 assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2561 /* MSR UAO, #uimm4
2562 MSR PAN, #uimm4
2563 MSR SSBS,#uimm4
2564 The immediate must be #0 or #1. */
2565 if ((opnd->pstatefield == 0x03 /* UAO. */
2566 || opnd->pstatefield == 0x04 /* PAN. */
2567 || opnd->pstatefield == 0x19 /* SSBS. */
2568 || opnd->pstatefield == 0x1a) /* DIT. */
2569 && opnds[1].imm.value > 1)
2570 {
2571 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2572 return 0;
2573 }
2574 /* MSR SPSel, #uimm4
2575 Uses uimm4 as a control value to select the stack pointer: if
2576 bit 0 is set it selects the current exception level's stack
2577 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2578 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2579 if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2580 {
2581 set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2582 return 0;
2583 }
2584 break;
2585 default:
2586 break;
2587 }
2588 break;
2589
2590 case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2591 /* Get the upper bound for the element index. */
2592 if (opcode->op == OP_FCMLA_ELEM)
2593 /* FCMLA index range depends on the vector size of other operands
2594 and is halfed because complex numbers take two elements. */
2595 num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2596 * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2597 else
2598 num = 16;
2599 num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2600 assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2601
2602 /* Index out-of-range. */
2603 if (!value_in_range_p (opnd->reglane.index, 0, num))
2604 {
2605 set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2606 return 0;
2607 }
2608 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2609 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2610 number is encoded in "size:M:Rm":
2611 size <Vm>
2612 00 RESERVED
2613 01 0:Rm
2614 10 M:Rm
2615 11 RESERVED */
2616 if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2617 && !value_in_range_p (opnd->reglane.regno, 0, 15))
2618 {
2619 set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2620 return 0;
2621 }
2622 break;
2623
2624 case AARCH64_OPND_CLASS_MODIFIED_REG:
2625 assert (idx == 1 || idx == 2);
2626 switch (type)
2627 {
2628 case AARCH64_OPND_Rm_EXT:
2629 if (!aarch64_extend_operator_p (opnd->shifter.kind)
2630 && opnd->shifter.kind != AARCH64_MOD_LSL)
2631 {
2632 set_other_error (mismatch_detail, idx,
2633 _("extend operator expected"));
2634 return 0;
2635 }
2636 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2637 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2638 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2639 case. */
2640 if (!aarch64_stack_pointer_p (opnds + 0)
2641 && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2642 {
2643 if (!opnd->shifter.operator_present)
2644 {
2645 set_other_error (mismatch_detail, idx,
2646 _("missing extend operator"));
2647 return 0;
2648 }
2649 else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2650 {
2651 set_other_error (mismatch_detail, idx,
2652 _("'LSL' operator not allowed"));
2653 return 0;
2654 }
2655 }
2656 assert (opnd->shifter.operator_present /* Default to LSL. */
2657 || opnd->shifter.kind == AARCH64_MOD_LSL);
2658 if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2659 {
2660 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2661 return 0;
2662 }
2663 /* In the 64-bit form, the final register operand is written as Wm
2664 for all but the (possibly omitted) UXTX/LSL and SXTX
2665 operators.
2666 N.B. GAS allows X register to be used with any operator as a
2667 programming convenience. */
2668 if (qualifier == AARCH64_OPND_QLF_X
2669 && opnd->shifter.kind != AARCH64_MOD_LSL
2670 && opnd->shifter.kind != AARCH64_MOD_UXTX
2671 && opnd->shifter.kind != AARCH64_MOD_SXTX)
2672 {
2673 set_other_error (mismatch_detail, idx, _("W register expected"));
2674 return 0;
2675 }
2676 break;
2677
2678 case AARCH64_OPND_Rm_SFT:
2679 /* ROR is not available to the shifted register operand in
2680 arithmetic instructions. */
2681 if (!aarch64_shift_operator_p (opnd->shifter.kind))
2682 {
2683 set_other_error (mismatch_detail, idx,
2684 _("shift operator expected"));
2685 return 0;
2686 }
2687 if (opnd->shifter.kind == AARCH64_MOD_ROR
2688 && opcode->iclass != log_shift)
2689 {
2690 set_other_error (mismatch_detail, idx,
2691 _("'ROR' operator not allowed"));
2692 return 0;
2693 }
2694 num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2695 if (!value_in_range_p (opnd->shifter.amount, 0, num))
2696 {
2697 set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2698 return 0;
2699 }
2700 break;
2701
2702 default:
2703 break;
2704 }
2705 break;
2706
2707 default:
2708 break;
2709 }
2710
2711 return 1;
2712 }
2713
2714 /* Main entrypoint for the operand constraint checking.
2715
2716 Return 1 if operands of *INST meet the constraint applied by the operand
2717 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2718 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2719 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2720 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2721 error kind when it is notified that an instruction does not pass the check).
2722
2723 Un-determined operand qualifiers may get established during the process. */
2724
2725 int
2726 aarch64_match_operands_constraint (aarch64_inst *inst,
2727 aarch64_operand_error *mismatch_detail)
2728 {
2729 int i;
2730
2731 DEBUG_TRACE ("enter");
2732
2733 /* Check for cases where a source register needs to be the same as the
2734 destination register. Do this before matching qualifiers since if
2735 an instruction has both invalid tying and invalid qualifiers,
2736 the error about qualifiers would suggest several alternative
2737 instructions that also have invalid tying. */
2738 i = inst->opcode->tied_operand;
2739 if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2740 {
2741 if (mismatch_detail)
2742 {
2743 mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2744 mismatch_detail->index = i;
2745 mismatch_detail->error = NULL;
2746 }
2747 return 0;
2748 }
2749
2750 /* Match operands' qualifier.
2751 *INST has already had qualifier establish for some, if not all, of
2752 its operands; we need to find out whether these established
2753 qualifiers match one of the qualifier sequence in
2754 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2755 with the corresponding qualifier in such a sequence.
2756 Only basic operand constraint checking is done here; the more thorough
2757 constraint checking will carried out by operand_general_constraint_met_p,
2758 which has be to called after this in order to get all of the operands'
2759 qualifiers established. */
2760 if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2761 {
2762 DEBUG_TRACE ("FAIL on operand qualifier matching");
2763 if (mismatch_detail)
2764 {
2765 /* Return an error type to indicate that it is the qualifier
2766 matching failure; we don't care about which operand as there
2767 are enough information in the opcode table to reproduce it. */
2768 mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2769 mismatch_detail->index = -1;
2770 mismatch_detail->error = NULL;
2771 }
2772 return 0;
2773 }
2774
2775 /* Match operands' constraint. */
2776 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2777 {
2778 enum aarch64_opnd type = inst->opcode->operands[i];
2779 if (type == AARCH64_OPND_NIL)
2780 break;
2781 if (inst->operands[i].skip)
2782 {
2783 DEBUG_TRACE ("skip the incomplete operand %d", i);
2784 continue;
2785 }
2786 if (operand_general_constraint_met_p (inst->operands, i, type,
2787 inst->opcode, mismatch_detail) == 0)
2788 {
2789 DEBUG_TRACE ("FAIL on operand %d", i);
2790 return 0;
2791 }
2792 }
2793
2794 DEBUG_TRACE ("PASS");
2795
2796 return 1;
2797 }
2798
2799 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2800 Also updates the TYPE of each INST->OPERANDS with the corresponding
2801 value of OPCODE->OPERANDS.
2802
2803 Note that some operand qualifiers may need to be manually cleared by
2804 the caller before it further calls the aarch64_opcode_encode; by
2805 doing this, it helps the qualifier matching facilities work
2806 properly. */
2807
2808 const aarch64_opcode*
2809 aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2810 {
2811 int i;
2812 const aarch64_opcode *old = inst->opcode;
2813
2814 inst->opcode = opcode;
2815
2816 /* Update the operand types. */
2817 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2818 {
2819 inst->operands[i].type = opcode->operands[i];
2820 if (opcode->operands[i] == AARCH64_OPND_NIL)
2821 break;
2822 }
2823
2824 DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2825
2826 return old;
2827 }
2828
2829 int
2830 aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2831 {
2832 int i;
2833 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2834 if (operands[i] == operand)
2835 return i;
2836 else if (operands[i] == AARCH64_OPND_NIL)
2837 break;
2838 return -1;
2839 }
2840 \f
2841 /* R0...R30, followed by FOR31. */
2842 #define BANK(R, FOR31) \
2843 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2844 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2845 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2846 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2847 /* [0][0] 32-bit integer regs with sp Wn
2848 [0][1] 64-bit integer regs with sp Xn sf=1
2849 [1][0] 32-bit integer regs with #0 Wn
2850 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2851 static const char *int_reg[2][2][32] = {
2852 #define R32(X) "w" #X
2853 #define R64(X) "x" #X
2854 { BANK (R32, "wsp"), BANK (R64, "sp") },
2855 { BANK (R32, "wzr"), BANK (R64, "xzr") }
2856 #undef R64
2857 #undef R32
2858 };
2859
2860 /* Names of the SVE vector registers, first with .S suffixes,
2861 then with .D suffixes. */
2862
2863 static const char *sve_reg[2][32] = {
2864 #define ZS(X) "z" #X ".s"
2865 #define ZD(X) "z" #X ".d"
2866 BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2867 #undef ZD
2868 #undef ZS
2869 };
2870 #undef BANK
2871
2872 /* Return the integer register name.
2873 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2874
2875 static inline const char *
2876 get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2877 {
2878 const int has_zr = sp_reg_p ? 0 : 1;
2879 const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2880 return int_reg[has_zr][is_64][regno];
2881 }
2882
2883 /* Like get_int_reg_name, but IS_64 is always 1. */
2884
2885 static inline const char *
2886 get_64bit_int_reg_name (int regno, int sp_reg_p)
2887 {
2888 const int has_zr = sp_reg_p ? 0 : 1;
2889 return int_reg[has_zr][1][regno];
2890 }
2891
2892 /* Get the name of the integer offset register in OPND, using the shift type
2893 to decide whether it's a word or doubleword. */
2894
2895 static inline const char *
2896 get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2897 {
2898 switch (opnd->shifter.kind)
2899 {
2900 case AARCH64_MOD_UXTW:
2901 case AARCH64_MOD_SXTW:
2902 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2903
2904 case AARCH64_MOD_LSL:
2905 case AARCH64_MOD_SXTX:
2906 return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2907
2908 default:
2909 abort ();
2910 }
2911 }
2912
2913 /* Get the name of the SVE vector offset register in OPND, using the operand
2914 qualifier to decide whether the suffix should be .S or .D. */
2915
2916 static inline const char *
2917 get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2918 {
2919 assert (qualifier == AARCH64_OPND_QLF_S_S
2920 || qualifier == AARCH64_OPND_QLF_S_D);
2921 return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2922 }
2923
2924 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2925
2926 typedef union
2927 {
2928 uint64_t i;
2929 double d;
2930 } double_conv_t;
2931
2932 typedef union
2933 {
2934 uint32_t i;
2935 float f;
2936 } single_conv_t;
2937
2938 typedef union
2939 {
2940 uint32_t i;
2941 float f;
2942 } half_conv_t;
2943
2944 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2945 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2946 (depending on the type of the instruction). IMM8 will be expanded to a
2947 single-precision floating-point value (SIZE == 4) or a double-precision
2948 floating-point value (SIZE == 8). A half-precision floating-point value
2949 (SIZE == 2) is expanded to a single-precision floating-point value. The
2950 expanded value is returned. */
2951
2952 static uint64_t
2953 expand_fp_imm (int size, uint32_t imm8)
2954 {
2955 uint64_t imm = 0;
2956 uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2957
2958 imm8_7 = (imm8 >> 7) & 0x01; /* imm8<7> */
2959 imm8_6_0 = imm8 & 0x7f; /* imm8<6:0> */
2960 imm8_6 = imm8_6_0 >> 6; /* imm8<6> */
2961 imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2962 | (imm8_6 << 1) | imm8_6; /* Replicate(imm8<6>,4) */
2963 if (size == 8)
2964 {
2965 imm = (imm8_7 << (63-32)) /* imm8<7> */
2966 | ((imm8_6 ^ 1) << (62-32)) /* NOT(imm8<6) */
2967 | (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2968 | (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2969 | (imm8_6_0 << (48-32)); /* imm8<6>:imm8<5:0> */
2970 imm <<= 32;
2971 }
2972 else if (size == 4 || size == 2)
2973 {
2974 imm = (imm8_7 << 31) /* imm8<7> */
2975 | ((imm8_6 ^ 1) << 30) /* NOT(imm8<6>) */
2976 | (imm8_6_repl4 << 26) /* Replicate(imm8<6>,4) */
2977 | (imm8_6_0 << 19); /* imm8<6>:imm8<5:0> */
2978 }
2979 else
2980 {
2981 /* An unsupported size. */
2982 assert (0);
2983 }
2984
2985 return imm;
2986 }
2987
2988 /* Produce the string representation of the register list operand *OPND
2989 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
2990 the register name that comes before the register number, such as "v". */
2991 static void
2992 print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2993 const char *prefix)
2994 {
2995 const int num_regs = opnd->reglist.num_regs;
2996 const int first_reg = opnd->reglist.first_regno;
2997 const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2998 const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2999 char tb[8]; /* Temporary buffer. */
3000
3001 assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3002 assert (num_regs >= 1 && num_regs <= 4);
3003
3004 /* Prepare the index if any. */
3005 if (opnd->reglist.has_index)
3006 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3007 snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3008 else
3009 tb[0] = '\0';
3010
3011 /* The hyphenated form is preferred for disassembly if there are
3012 more than two registers in the list, and the register numbers
3013 are monotonically increasing in increments of one. */
3014 if (num_regs > 2 && last_reg > first_reg)
3015 snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3016 prefix, last_reg, qlf_name, tb);
3017 else
3018 {
3019 const int reg0 = first_reg;
3020 const int reg1 = (first_reg + 1) & 0x1f;
3021 const int reg2 = (first_reg + 2) & 0x1f;
3022 const int reg3 = (first_reg + 3) & 0x1f;
3023
3024 switch (num_regs)
3025 {
3026 case 1:
3027 snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3028 break;
3029 case 2:
3030 snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3031 prefix, reg1, qlf_name, tb);
3032 break;
3033 case 3:
3034 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3035 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3036 prefix, reg2, qlf_name, tb);
3037 break;
3038 case 4:
3039 snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3040 prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3041 prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3042 break;
3043 }
3044 }
3045 }
3046
3047 /* Print the register+immediate address in OPND to BUF, which has SIZE
3048 characters. BASE is the name of the base register. */
3049
3050 static void
3051 print_immediate_offset_address (char *buf, size_t size,
3052 const aarch64_opnd_info *opnd,
3053 const char *base)
3054 {
3055 if (opnd->addr.writeback)
3056 {
3057 if (opnd->addr.preind)
3058 snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3059 else
3060 snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3061 }
3062 else
3063 {
3064 if (opnd->shifter.operator_present)
3065 {
3066 assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3067 snprintf (buf, size, "[%s, #%d, mul vl]",
3068 base, opnd->addr.offset.imm);
3069 }
3070 else if (opnd->addr.offset.imm)
3071 snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3072 else
3073 snprintf (buf, size, "[%s]", base);
3074 }
3075 }
3076
3077 /* Produce the string representation of the register offset address operand
3078 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3079 the names of the base and offset registers. */
3080 static void
3081 print_register_offset_address (char *buf, size_t size,
3082 const aarch64_opnd_info *opnd,
3083 const char *base, const char *offset)
3084 {
3085 char tb[16]; /* Temporary buffer. */
3086 bfd_boolean print_extend_p = TRUE;
3087 bfd_boolean print_amount_p = TRUE;
3088 const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3089
3090 if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3091 || !opnd->shifter.amount_present))
3092 {
3093 /* Not print the shift/extend amount when the amount is zero and
3094 when it is not the special case of 8-bit load/store instruction. */
3095 print_amount_p = FALSE;
3096 /* Likewise, no need to print the shift operator LSL in such a
3097 situation. */
3098 if (opnd->shifter.kind == AARCH64_MOD_LSL)
3099 print_extend_p = FALSE;
3100 }
3101
3102 /* Prepare for the extend/shift. */
3103 if (print_extend_p)
3104 {
3105 if (print_amount_p)
3106 snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3107 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3108 (opnd->shifter.amount % 100));
3109 else
3110 snprintf (tb, sizeof (tb), ", %s", shift_name);
3111 }
3112 else
3113 tb[0] = '\0';
3114
3115 snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3116 }
3117
3118 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3119 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3120 PC, PCREL_P and ADDRESS are used to pass in and return information about
3121 the PC-relative address calculation, where the PC value is passed in
3122 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3123 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3124 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3125
3126 The function serves both the disassembler and the assembler diagnostics
3127 issuer, which is the reason why it lives in this file. */
3128
3129 void
3130 aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3131 const aarch64_opcode *opcode,
3132 const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3133 bfd_vma *address, char** notes)
3134 {
3135 unsigned int i, num_conds;
3136 const char *name = NULL;
3137 const aarch64_opnd_info *opnd = opnds + idx;
3138 enum aarch64_modifier_kind kind;
3139 uint64_t addr, enum_value;
3140
3141 buf[0] = '\0';
3142 if (pcrel_p)
3143 *pcrel_p = 0;
3144
3145 switch (opnd->type)
3146 {
3147 case AARCH64_OPND_Rd:
3148 case AARCH64_OPND_Rn:
3149 case AARCH64_OPND_Rm:
3150 case AARCH64_OPND_Rt:
3151 case AARCH64_OPND_Rt2:
3152 case AARCH64_OPND_Rs:
3153 case AARCH64_OPND_Ra:
3154 case AARCH64_OPND_Rt_SYS:
3155 case AARCH64_OPND_PAIRREG:
3156 case AARCH64_OPND_SVE_Rm:
3157 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3158 the <ic_op>, therefore we use opnd->present to override the
3159 generic optional-ness information. */
3160 if (opnd->type == AARCH64_OPND_Rt_SYS)
3161 {
3162 if (!opnd->present)
3163 break;
3164 }
3165 /* Omit the operand, e.g. RET. */
3166 else if (optional_operand_p (opcode, idx)
3167 && (opnd->reg.regno
3168 == get_optional_operand_default_value (opcode)))
3169 break;
3170 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3171 || opnd->qualifier == AARCH64_OPND_QLF_X);
3172 snprintf (buf, size, "%s",
3173 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3174 break;
3175
3176 case AARCH64_OPND_Rd_SP:
3177 case AARCH64_OPND_Rn_SP:
3178 case AARCH64_OPND_Rt_SP:
3179 case AARCH64_OPND_SVE_Rn_SP:
3180 case AARCH64_OPND_Rm_SP:
3181 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3182 || opnd->qualifier == AARCH64_OPND_QLF_WSP
3183 || opnd->qualifier == AARCH64_OPND_QLF_X
3184 || opnd->qualifier == AARCH64_OPND_QLF_SP);
3185 snprintf (buf, size, "%s",
3186 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3187 break;
3188
3189 case AARCH64_OPND_Rm_EXT:
3190 kind = opnd->shifter.kind;
3191 assert (idx == 1 || idx == 2);
3192 if ((aarch64_stack_pointer_p (opnds)
3193 || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3194 && ((opnd->qualifier == AARCH64_OPND_QLF_W
3195 && opnds[0].qualifier == AARCH64_OPND_QLF_W
3196 && kind == AARCH64_MOD_UXTW)
3197 || (opnd->qualifier == AARCH64_OPND_QLF_X
3198 && kind == AARCH64_MOD_UXTX)))
3199 {
3200 /* 'LSL' is the preferred form in this case. */
3201 kind = AARCH64_MOD_LSL;
3202 if (opnd->shifter.amount == 0)
3203 {
3204 /* Shifter omitted. */
3205 snprintf (buf, size, "%s",
3206 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3207 break;
3208 }
3209 }
3210 if (opnd->shifter.amount)
3211 snprintf (buf, size, "%s, %s #%" PRIi64,
3212 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3213 aarch64_operand_modifiers[kind].name,
3214 opnd->shifter.amount);
3215 else
3216 snprintf (buf, size, "%s, %s",
3217 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3218 aarch64_operand_modifiers[kind].name);
3219 break;
3220
3221 case AARCH64_OPND_Rm_SFT:
3222 assert (opnd->qualifier == AARCH64_OPND_QLF_W
3223 || opnd->qualifier == AARCH64_OPND_QLF_X);
3224 if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3225 snprintf (buf, size, "%s",
3226 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3227 else
3228 snprintf (buf, size, "%s, %s #%" PRIi64,
3229 get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3230 aarch64_operand_modifiers[opnd->shifter.kind].name,
3231 opnd->shifter.amount);
3232 break;
3233
3234 case AARCH64_OPND_Fd:
3235 case AARCH64_OPND_Fn:
3236 case AARCH64_OPND_Fm:
3237 case AARCH64_OPND_Fa:
3238 case AARCH64_OPND_Ft:
3239 case AARCH64_OPND_Ft2:
3240 case AARCH64_OPND_Sd:
3241 case AARCH64_OPND_Sn:
3242 case AARCH64_OPND_Sm:
3243 case AARCH64_OPND_SVE_VZn:
3244 case AARCH64_OPND_SVE_Vd:
3245 case AARCH64_OPND_SVE_Vm:
3246 case AARCH64_OPND_SVE_Vn:
3247 snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3248 opnd->reg.regno);
3249 break;
3250
3251 case AARCH64_OPND_Va:
3252 case AARCH64_OPND_Vd:
3253 case AARCH64_OPND_Vn:
3254 case AARCH64_OPND_Vm:
3255 snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3256 aarch64_get_qualifier_name (opnd->qualifier));
3257 break;
3258
3259 case AARCH64_OPND_Ed:
3260 case AARCH64_OPND_En:
3261 case AARCH64_OPND_Em:
3262 case AARCH64_OPND_Em16:
3263 case AARCH64_OPND_SM3_IMM2:
3264 snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3265 aarch64_get_qualifier_name (opnd->qualifier),
3266 opnd->reglane.index);
3267 break;
3268
3269 case AARCH64_OPND_VdD1:
3270 case AARCH64_OPND_VnD1:
3271 snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3272 break;
3273
3274 case AARCH64_OPND_LVn:
3275 case AARCH64_OPND_LVt:
3276 case AARCH64_OPND_LVt_AL:
3277 case AARCH64_OPND_LEt:
3278 print_register_list (buf, size, opnd, "v");
3279 break;
3280
3281 case AARCH64_OPND_SVE_Pd:
3282 case AARCH64_OPND_SVE_Pg3:
3283 case AARCH64_OPND_SVE_Pg4_5:
3284 case AARCH64_OPND_SVE_Pg4_10:
3285 case AARCH64_OPND_SVE_Pg4_16:
3286 case AARCH64_OPND_SVE_Pm:
3287 case AARCH64_OPND_SVE_Pn:
3288 case AARCH64_OPND_SVE_Pt:
3289 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3290 snprintf (buf, size, "p%d", opnd->reg.regno);
3291 else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3292 || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3293 snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3294 aarch64_get_qualifier_name (opnd->qualifier));
3295 else
3296 snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3297 aarch64_get_qualifier_name (opnd->qualifier));
3298 break;
3299
3300 case AARCH64_OPND_SVE_Za_5:
3301 case AARCH64_OPND_SVE_Za_16:
3302 case AARCH64_OPND_SVE_Zd:
3303 case AARCH64_OPND_SVE_Zm_5:
3304 case AARCH64_OPND_SVE_Zm_16:
3305 case AARCH64_OPND_SVE_Zn:
3306 case AARCH64_OPND_SVE_Zt:
3307 if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3308 snprintf (buf, size, "z%d", opnd->reg.regno);
3309 else
3310 snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3311 aarch64_get_qualifier_name (opnd->qualifier));
3312 break;
3313
3314 case AARCH64_OPND_SVE_ZnxN:
3315 case AARCH64_OPND_SVE_ZtxN:
3316 print_register_list (buf, size, opnd, "z");
3317 break;
3318
3319 case AARCH64_OPND_SVE_Zm3_INDEX:
3320 case AARCH64_OPND_SVE_Zm3_22_INDEX:
3321 case AARCH64_OPND_SVE_Zm3_11_INDEX:
3322 case AARCH64_OPND_SVE_Zm4_INDEX:
3323 case AARCH64_OPND_SVE_Zn_INDEX:
3324 snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3325 aarch64_get_qualifier_name (opnd->qualifier),
3326 opnd->reglane.index);
3327 break;
3328
3329 case AARCH64_OPND_CRn:
3330 case AARCH64_OPND_CRm:
3331 snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3332 break;
3333
3334 case AARCH64_OPND_IDX:
3335 case AARCH64_OPND_MASK:
3336 case AARCH64_OPND_IMM:
3337 case AARCH64_OPND_IMM_2:
3338 case AARCH64_OPND_WIDTH:
3339 case AARCH64_OPND_UIMM3_OP1:
3340 case AARCH64_OPND_UIMM3_OP2:
3341 case AARCH64_OPND_BIT_NUM:
3342 case AARCH64_OPND_IMM_VLSL:
3343 case AARCH64_OPND_IMM_VLSR:
3344 case AARCH64_OPND_SHLL_IMM:
3345 case AARCH64_OPND_IMM0:
3346 case AARCH64_OPND_IMMR:
3347 case AARCH64_OPND_IMMS:
3348 case AARCH64_OPND_FBITS:
3349 case AARCH64_OPND_TME_UIMM16:
3350 case AARCH64_OPND_SIMM5:
3351 case AARCH64_OPND_SVE_SHLIMM_PRED:
3352 case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3353 case AARCH64_OPND_SVE_SHRIMM_PRED:
3354 case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3355 case AARCH64_OPND_SVE_SIMM5:
3356 case AARCH64_OPND_SVE_SIMM5B:
3357 case AARCH64_OPND_SVE_SIMM6:
3358 case AARCH64_OPND_SVE_SIMM8:
3359 case AARCH64_OPND_SVE_UIMM3:
3360 case AARCH64_OPND_SVE_UIMM7:
3361 case AARCH64_OPND_SVE_UIMM8:
3362 case AARCH64_OPND_SVE_UIMM8_53:
3363 case AARCH64_OPND_IMM_ROT1:
3364 case AARCH64_OPND_IMM_ROT2:
3365 case AARCH64_OPND_IMM_ROT3:
3366 case AARCH64_OPND_SVE_IMM_ROT1:
3367 case AARCH64_OPND_SVE_IMM_ROT2:
3368 case AARCH64_OPND_SVE_IMM_ROT3:
3369 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3370 break;
3371
3372 case AARCH64_OPND_SVE_I1_HALF_ONE:
3373 case AARCH64_OPND_SVE_I1_HALF_TWO:
3374 case AARCH64_OPND_SVE_I1_ZERO_ONE:
3375 {
3376 single_conv_t c;
3377 c.i = opnd->imm.value;
3378 snprintf (buf, size, "#%.1f", c.f);
3379 break;
3380 }
3381
3382 case AARCH64_OPND_SVE_PATTERN:
3383 if (optional_operand_p (opcode, idx)
3384 && opnd->imm.value == get_optional_operand_default_value (opcode))
3385 break;
3386 enum_value = opnd->imm.value;
3387 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3388 if (aarch64_sve_pattern_array[enum_value])
3389 snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3390 else
3391 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3392 break;
3393
3394 case AARCH64_OPND_SVE_PATTERN_SCALED:
3395 if (optional_operand_p (opcode, idx)
3396 && !opnd->shifter.operator_present
3397 && opnd->imm.value == get_optional_operand_default_value (opcode))
3398 break;
3399 enum_value = opnd->imm.value;
3400 assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3401 if (aarch64_sve_pattern_array[opnd->imm.value])
3402 snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3403 else
3404 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3405 if (opnd->shifter.operator_present)
3406 {
3407 size_t len = strlen (buf);
3408 snprintf (buf + len, size - len, ", %s #%" PRIi64,
3409 aarch64_operand_modifiers[opnd->shifter.kind].name,
3410 opnd->shifter.amount);
3411 }
3412 break;
3413
3414 case AARCH64_OPND_SVE_PRFOP:
3415 enum_value = opnd->imm.value;
3416 assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3417 if (aarch64_sve_prfop_array[enum_value])
3418 snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3419 else
3420 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3421 break;
3422
3423 case AARCH64_OPND_IMM_MOV:
3424 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3425 {
3426 case 4: /* e.g. MOV Wd, #<imm32>. */
3427 {
3428 int imm32 = opnd->imm.value;
3429 snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3430 }
3431 break;
3432 case 8: /* e.g. MOV Xd, #<imm64>. */
3433 snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3434 opnd->imm.value, opnd->imm.value);
3435 break;
3436 default: assert (0);
3437 }
3438 break;
3439
3440 case AARCH64_OPND_FPIMM0:
3441 snprintf (buf, size, "#0.0");
3442 break;
3443
3444 case AARCH64_OPND_LIMM:
3445 case AARCH64_OPND_AIMM:
3446 case AARCH64_OPND_HALF:
3447 case AARCH64_OPND_SVE_INV_LIMM:
3448 case AARCH64_OPND_SVE_LIMM:
3449 case AARCH64_OPND_SVE_LIMM_MOV:
3450 if (opnd->shifter.amount)
3451 snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3452 opnd->shifter.amount);
3453 else
3454 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3455 break;
3456
3457 case AARCH64_OPND_SIMD_IMM:
3458 case AARCH64_OPND_SIMD_IMM_SFT:
3459 if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3460 || opnd->shifter.kind == AARCH64_MOD_NONE)
3461 snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3462 else
3463 snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3464 aarch64_operand_modifiers[opnd->shifter.kind].name,
3465 opnd->shifter.amount);
3466 break;
3467
3468 case AARCH64_OPND_SVE_AIMM:
3469 case AARCH64_OPND_SVE_ASIMM:
3470 if (opnd->shifter.amount)
3471 snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3472 opnd->shifter.amount);
3473 else
3474 snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3475 break;
3476
3477 case AARCH64_OPND_FPIMM:
3478 case AARCH64_OPND_SIMD_FPIMM:
3479 case AARCH64_OPND_SVE_FPIMM8:
3480 switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3481 {
3482 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3483 {
3484 half_conv_t c;
3485 c.i = expand_fp_imm (2, opnd->imm.value);
3486 snprintf (buf, size, "#%.18e", c.f);
3487 }
3488 break;
3489 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3490 {
3491 single_conv_t c;
3492 c.i = expand_fp_imm (4, opnd->imm.value);
3493 snprintf (buf, size, "#%.18e", c.f);
3494 }
3495 break;
3496 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3497 {
3498 double_conv_t c;
3499 c.i = expand_fp_imm (8, opnd->imm.value);
3500 snprintf (buf, size, "#%.18e", c.d);
3501 }
3502 break;
3503 default: assert (0);
3504 }
3505 break;
3506
3507 case AARCH64_OPND_CCMP_IMM:
3508 case AARCH64_OPND_NZCV:
3509 case AARCH64_OPND_EXCEPTION:
3510 case AARCH64_OPND_UIMM4:
3511 case AARCH64_OPND_UIMM4_ADDG:
3512 case AARCH64_OPND_UIMM7:
3513 case AARCH64_OPND_UIMM10:
3514 if (optional_operand_p (opcode, idx) == TRUE
3515 && (opnd->imm.value ==
3516 (int64_t) get_optional_operand_default_value (opcode)))
3517 /* Omit the operand, e.g. DCPS1. */
3518 break;
3519 snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3520 break;
3521
3522 case AARCH64_OPND_COND:
3523 case AARCH64_OPND_COND1:
3524 snprintf (buf, size, "%s", opnd->cond->names[0]);
3525 num_conds = ARRAY_SIZE (opnd->cond->names);
3526 for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3527 {
3528 size_t len = strlen (buf);
3529 if (i == 1)
3530 snprintf (buf + len, size - len, " // %s = %s",
3531 opnd->cond->names[0], opnd->cond->names[i]);
3532 else
3533 snprintf (buf + len, size - len, ", %s",
3534 opnd->cond->names[i]);
3535 }
3536 break;
3537
3538 case AARCH64_OPND_ADDR_ADRP:
3539 addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3540 + opnd->imm.value;
3541 if (pcrel_p)
3542 *pcrel_p = 1;
3543 if (address)
3544 *address = addr;
3545 /* This is not necessary during the disassembling, as print_address_func
3546 in the disassemble_info will take care of the printing. But some
3547 other callers may be still interested in getting the string in *STR,
3548 so here we do snprintf regardless. */
3549 snprintf (buf, size, "#0x%" PRIx64, addr);
3550 break;
3551
3552 case AARCH64_OPND_ADDR_PCREL14:
3553 case AARCH64_OPND_ADDR_PCREL19:
3554 case AARCH64_OPND_ADDR_PCREL21:
3555 case AARCH64_OPND_ADDR_PCREL26:
3556 addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3557 if (pcrel_p)
3558 *pcrel_p = 1;
3559 if (address)
3560 *address = addr;
3561 /* This is not necessary during the disassembling, as print_address_func
3562 in the disassemble_info will take care of the printing. But some
3563 other callers may be still interested in getting the string in *STR,
3564 so here we do snprintf regardless. */
3565 snprintf (buf, size, "#0x%" PRIx64, addr);
3566 break;
3567
3568 case AARCH64_OPND_ADDR_SIMPLE:
3569 case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3570 case AARCH64_OPND_SIMD_ADDR_POST:
3571 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3572 if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3573 {
3574 if (opnd->addr.offset.is_reg)
3575 snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3576 else
3577 snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3578 }
3579 else
3580 snprintf (buf, size, "[%s]", name);
3581 break;
3582
3583 case AARCH64_OPND_ADDR_REGOFF:
3584 case AARCH64_OPND_SVE_ADDR_R:
3585 case AARCH64_OPND_SVE_ADDR_RR:
3586 case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3587 case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3588 case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3589 case AARCH64_OPND_SVE_ADDR_RX:
3590 case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3591 case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3592 case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3593 print_register_offset_address
3594 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3595 get_offset_int_reg_name (opnd));
3596 break;
3597
3598 case AARCH64_OPND_SVE_ADDR_ZX:
3599 print_register_offset_address
3600 (buf, size, opnd,
3601 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3602 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3603 break;
3604
3605 case AARCH64_OPND_SVE_ADDR_RZ:
3606 case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3607 case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3608 case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3609 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3610 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3611 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3612 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3613 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3614 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3615 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3616 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3617 print_register_offset_address
3618 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3619 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3620 break;
3621
3622 case AARCH64_OPND_ADDR_SIMM7:
3623 case AARCH64_OPND_ADDR_SIMM9:
3624 case AARCH64_OPND_ADDR_SIMM9_2:
3625 case AARCH64_OPND_ADDR_SIMM10:
3626 case AARCH64_OPND_ADDR_SIMM11:
3627 case AARCH64_OPND_ADDR_SIMM13:
3628 case AARCH64_OPND_ADDR_OFFSET:
3629 case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3630 case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3631 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3632 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3633 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3634 case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3635 case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3636 case AARCH64_OPND_SVE_ADDR_RI_U6:
3637 case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3638 case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3639 case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3640 print_immediate_offset_address
3641 (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3642 break;
3643
3644 case AARCH64_OPND_SVE_ADDR_ZI_U5:
3645 case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3646 case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3647 case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3648 print_immediate_offset_address
3649 (buf, size, opnd,
3650 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3651 break;
3652
3653 case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3654 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3655 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3656 print_register_offset_address
3657 (buf, size, opnd,
3658 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3659 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3660 break;
3661
3662 case AARCH64_OPND_ADDR_UIMM12:
3663 name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3664 if (opnd->addr.offset.imm)
3665 snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3666 else
3667 snprintf (buf, size, "[%s]", name);
3668 break;
3669
3670 case AARCH64_OPND_SYSREG:
3671 for (i = 0; aarch64_sys_regs[i].name; ++i)
3672 {
3673 bfd_boolean exact_match
3674 = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3675 == opnd->sysreg.flags;
3676
3677 /* Try and find an exact match, But if that fails, return the first
3678 partial match that was found. */
3679 if (aarch64_sys_regs[i].value == opnd->sysreg.value
3680 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3681 && (name == NULL || exact_match))
3682 {
3683 name = aarch64_sys_regs[i].name;
3684 if (exact_match)
3685 {
3686 if (notes)
3687 *notes = NULL;
3688 break;
3689 }
3690
3691 /* If we didn't match exactly, that means the presense of a flag
3692 indicates what we didn't want for this instruction. e.g. If
3693 F_REG_READ is there, that means we were looking for a write
3694 register. See aarch64_ext_sysreg. */
3695 if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3696 *notes = _("reading from a write-only register");
3697 else if (aarch64_sys_regs[i].flags & F_REG_READ)
3698 *notes = _("writing to a read-only register");
3699 }
3700 }
3701
3702 if (name)
3703 snprintf (buf, size, "%s", name);
3704 else
3705 {
3706 /* Implementation defined system register. */
3707 unsigned int value = opnd->sysreg.value;
3708 snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3709 (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3710 value & 0x7);
3711 }
3712 break;
3713
3714 case AARCH64_OPND_PSTATEFIELD:
3715 for (i = 0; aarch64_pstatefields[i].name; ++i)
3716 if (aarch64_pstatefields[i].value == opnd->pstatefield)
3717 break;
3718 assert (aarch64_pstatefields[i].name);
3719 snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3720 break;
3721
3722 case AARCH64_OPND_SYSREG_AT:
3723 case AARCH64_OPND_SYSREG_DC:
3724 case AARCH64_OPND_SYSREG_IC:
3725 case AARCH64_OPND_SYSREG_TLBI:
3726 case AARCH64_OPND_SYSREG_SR:
3727 snprintf (buf, size, "%s", opnd->sysins_op->name);
3728 break;
3729
3730 case AARCH64_OPND_BARRIER:
3731 snprintf (buf, size, "%s", opnd->barrier->name);
3732 break;
3733
3734 case AARCH64_OPND_BARRIER_ISB:
3735 /* Operand can be omitted, e.g. in DCPS1. */
3736 if (! optional_operand_p (opcode, idx)
3737 || (opnd->barrier->value
3738 != get_optional_operand_default_value (opcode)))
3739 snprintf (buf, size, "#0x%x", opnd->barrier->value);
3740 break;
3741
3742 case AARCH64_OPND_PRFOP:
3743 if (opnd->prfop->name != NULL)
3744 snprintf (buf, size, "%s", opnd->prfop->name);
3745 else
3746 snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3747 break;
3748
3749 case AARCH64_OPND_BARRIER_PSB:
3750 case AARCH64_OPND_BTI_TARGET:
3751 if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3752 snprintf (buf, size, "%s", opnd->hint_option->name);
3753 break;
3754
3755 default:
3756 assert (0);
3757 }
3758 }
3759 \f
3760 #define CPENC(op0,op1,crn,crm,op2) \
3761 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3762 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3763 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3764 /* for 3.9.10 System Instructions */
3765 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3766
3767 #define C0 0
3768 #define C1 1
3769 #define C2 2
3770 #define C3 3
3771 #define C4 4
3772 #define C5 5
3773 #define C6 6
3774 #define C7 7
3775 #define C8 8
3776 #define C9 9
3777 #define C10 10
3778 #define C11 11
3779 #define C12 12
3780 #define C13 13
3781 #define C14 14
3782 #define C15 15
3783
3784 /* TODO there is one more issues need to be resolved
3785 1. handle cpu-implementation-defined system registers. */
3786 const aarch64_sys_reg aarch64_sys_regs [] =
3787 {
3788 { "spsr_el1", CPEN_(0,C0,0), 0 }, /* = spsr_svc */
3789 { "spsr_el12", CPEN_ (5, C0, 0), F_ARCHEXT },
3790 { "elr_el1", CPEN_(0,C0,1), 0 },
3791 { "elr_el12", CPEN_ (5, C0, 1), F_ARCHEXT },
3792 { "sp_el0", CPEN_(0,C1,0), 0 },
3793 { "spsel", CPEN_(0,C2,0), 0 },
3794 { "daif", CPEN_(3,C2,1), 0 },
3795 { "currentel", CPEN_(0,C2,2), F_REG_READ }, /* RO */
3796 { "pan", CPEN_(0,C2,3), F_ARCHEXT },
3797 { "uao", CPEN_ (0, C2, 4), F_ARCHEXT },
3798 { "nzcv", CPEN_(3,C2,0), 0 },
3799 { "ssbs", CPEN_(3,C2,6), F_ARCHEXT },
3800 { "fpcr", CPEN_(3,C4,0), 0 },
3801 { "fpsr", CPEN_(3,C4,1), 0 },
3802 { "dspsr_el0", CPEN_(3,C5,0), 0 },
3803 { "dlr_el0", CPEN_(3,C5,1), 0 },
3804 { "spsr_el2", CPEN_(4,C0,0), 0 }, /* = spsr_hyp */
3805 { "elr_el2", CPEN_(4,C0,1), 0 },
3806 { "sp_el1", CPEN_(4,C1,0), 0 },
3807 { "spsr_irq", CPEN_(4,C3,0), 0 },
3808 { "spsr_abt", CPEN_(4,C3,1), 0 },
3809 { "spsr_und", CPEN_(4,C3,2), 0 },
3810 { "spsr_fiq", CPEN_(4,C3,3), 0 },
3811 { "spsr_el3", CPEN_(6,C0,0), 0 },
3812 { "elr_el3", CPEN_(6,C0,1), 0 },
3813 { "sp_el2", CPEN_(6,C1,0), 0 },
3814 { "spsr_svc", CPEN_(0,C0,0), F_DEPRECATED }, /* = spsr_el1 */
3815 { "spsr_hyp", CPEN_(4,C0,0), F_DEPRECATED }, /* = spsr_el2 */
3816 { "midr_el1", CPENC(3,0,C0,C0,0), F_REG_READ }, /* RO */
3817 { "ctr_el0", CPENC(3,3,C0,C0,1), F_REG_READ }, /* RO */
3818 { "mpidr_el1", CPENC(3,0,C0,C0,5), F_REG_READ }, /* RO */
3819 { "revidr_el1", CPENC(3,0,C0,C0,6), F_REG_READ }, /* RO */
3820 { "aidr_el1", CPENC(3,1,C0,C0,7), F_REG_READ }, /* RO */
3821 { "dczid_el0", CPENC(3,3,C0,C0,7), F_REG_READ }, /* RO */
3822 { "id_dfr0_el1", CPENC(3,0,C0,C1,2), F_REG_READ }, /* RO */
3823 { "id_pfr0_el1", CPENC(3,0,C0,C1,0), F_REG_READ }, /* RO */
3824 { "id_pfr1_el1", CPENC(3,0,C0,C1,1), F_REG_READ }, /* RO */
3825 { "id_pfr2_el1", CPENC(3,0,C0,C3,4), F_ARCHEXT | F_REG_READ}, /* RO */
3826 { "id_afr0_el1", CPENC(3,0,C0,C1,3), F_REG_READ }, /* RO */
3827 { "id_mmfr0_el1", CPENC(3,0,C0,C1,4), F_REG_READ }, /* RO */
3828 { "id_mmfr1_el1", CPENC(3,0,C0,C1,5), F_REG_READ }, /* RO */
3829 { "id_mmfr2_el1", CPENC(3,0,C0,C1,6), F_REG_READ }, /* RO */
3830 { "id_mmfr3_el1", CPENC(3,0,C0,C1,7), F_REG_READ }, /* RO */
3831 { "id_mmfr4_el1", CPENC(3,0,C0,C2,6), F_REG_READ }, /* RO */
3832 { "id_isar0_el1", CPENC(3,0,C0,C2,0), F_REG_READ }, /* RO */
3833 { "id_isar1_el1", CPENC(3,0,C0,C2,1), F_REG_READ }, /* RO */
3834 { "id_isar2_el1", CPENC(3,0,C0,C2,2), F_REG_READ }, /* RO */
3835 { "id_isar3_el1", CPENC(3,0,C0,C2,3), F_REG_READ }, /* RO */
3836 { "id_isar4_el1", CPENC(3,0,C0,C2,4), F_REG_READ }, /* RO */
3837 { "id_isar5_el1", CPENC(3,0,C0,C2,5), F_REG_READ }, /* RO */
3838 { "mvfr0_el1", CPENC(3,0,C0,C3,0), F_REG_READ }, /* RO */
3839 { "mvfr1_el1", CPENC(3,0,C0,C3,1), F_REG_READ }, /* RO */
3840 { "mvfr2_el1", CPENC(3,0,C0,C3,2), F_REG_READ }, /* RO */
3841 { "ccsidr_el1", CPENC(3,1,C0,C0,0), F_REG_READ }, /* RO */
3842 { "id_aa64pfr0_el1", CPENC(3,0,C0,C4,0), F_REG_READ }, /* RO */
3843 { "id_aa64pfr1_el1", CPENC(3,0,C0,C4,1), F_REG_READ }, /* RO */
3844 { "id_aa64dfr0_el1", CPENC(3,0,C0,C5,0), F_REG_READ }, /* RO */
3845 { "id_aa64dfr1_el1", CPENC(3,0,C0,C5,1), F_REG_READ }, /* RO */
3846 { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0), F_REG_READ }, /* RO */
3847 { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1), F_REG_READ }, /* RO */
3848 { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0), F_REG_READ }, /* RO */
3849 { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1), F_REG_READ }, /* RO */
3850 { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3851 { "id_aa64afr0_el1", CPENC(3,0,C0,C5,4), F_REG_READ }, /* RO */
3852 { "id_aa64afr1_el1", CPENC(3,0,C0,C5,5), F_REG_READ }, /* RO */
3853 { "id_aa64zfr0_el1", CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3854 { "clidr_el1", CPENC(3,1,C0,C0,1), F_REG_READ }, /* RO */
3855 { "csselr_el1", CPENC(3,2,C0,C0,0), 0 },
3856 { "vpidr_el2", CPENC(3,4,C0,C0,0), 0 },
3857 { "vmpidr_el2", CPENC(3,4,C0,C0,5), 0 },
3858 { "sctlr_el1", CPENC(3,0,C1,C0,0), 0 },
3859 { "sctlr_el2", CPENC(3,4,C1,C0,0), 0 },
3860 { "sctlr_el3", CPENC(3,6,C1,C0,0), 0 },
3861 { "sctlr_el12", CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3862 { "actlr_el1", CPENC(3,0,C1,C0,1), 0 },
3863 { "actlr_el2", CPENC(3,4,C1,C0,1), 0 },
3864 { "actlr_el3", CPENC(3,6,C1,C0,1), 0 },
3865 { "cpacr_el1", CPENC(3,0,C1,C0,2), 0 },
3866 { "cpacr_el12", CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3867 { "cptr_el2", CPENC(3,4,C1,C1,2), 0 },
3868 { "cptr_el3", CPENC(3,6,C1,C1,2), 0 },
3869 { "scr_el3", CPENC(3,6,C1,C1,0), 0 },
3870 { "hcr_el2", CPENC(3,4,C1,C1,0), 0 },
3871 { "mdcr_el2", CPENC(3,4,C1,C1,1), 0 },
3872 { "mdcr_el3", CPENC(3,6,C1,C3,1), 0 },
3873 { "hstr_el2", CPENC(3,4,C1,C1,3), 0 },
3874 { "hacr_el2", CPENC(3,4,C1,C1,7), 0 },
3875 { "zcr_el1", CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3876 { "zcr_el12", CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3877 { "zcr_el2", CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3878 { "zcr_el3", CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3879 { "zidr_el1", CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3880 { "ttbr0_el1", CPENC(3,0,C2,C0,0), 0 },
3881 { "ttbr1_el1", CPENC(3,0,C2,C0,1), 0 },
3882 { "ttbr0_el2", CPENC(3,4,C2,C0,0), 0 },
3883 { "ttbr1_el2", CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3884 { "ttbr0_el3", CPENC(3,6,C2,C0,0), 0 },
3885 { "ttbr0_el12", CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3886 { "ttbr1_el12", CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3887 { "vttbr_el2", CPENC(3,4,C2,C1,0), 0 },
3888 { "tcr_el1", CPENC(3,0,C2,C0,2), 0 },
3889 { "tcr_el2", CPENC(3,4,C2,C0,2), 0 },
3890 { "tcr_el3", CPENC(3,6,C2,C0,2), 0 },
3891 { "tcr_el12", CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3892 { "vtcr_el2", CPENC(3,4,C2,C1,2), 0 },
3893 { "apiakeylo_el1", CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3894 { "apiakeyhi_el1", CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3895 { "apibkeylo_el1", CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3896 { "apibkeyhi_el1", CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3897 { "apdakeylo_el1", CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3898 { "apdakeyhi_el1", CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3899 { "apdbkeylo_el1", CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3900 { "apdbkeyhi_el1", CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3901 { "apgakeylo_el1", CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3902 { "apgakeyhi_el1", CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3903 { "afsr0_el1", CPENC(3,0,C5,C1,0), 0 },
3904 { "afsr1_el1", CPENC(3,0,C5,C1,1), 0 },
3905 { "afsr0_el2", CPENC(3,4,C5,C1,0), 0 },
3906 { "afsr1_el2", CPENC(3,4,C5,C1,1), 0 },
3907 { "afsr0_el3", CPENC(3,6,C5,C1,0), 0 },
3908 { "afsr0_el12", CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3909 { "afsr1_el3", CPENC(3,6,C5,C1,1), 0 },
3910 { "afsr1_el12", CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3911 { "esr_el1", CPENC(3,0,C5,C2,0), 0 },
3912 { "esr_el2", CPENC(3,4,C5,C2,0), 0 },
3913 { "esr_el3", CPENC(3,6,C5,C2,0), 0 },
3914 { "esr_el12", CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3915 { "vsesr_el2", CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3916 { "fpexc32_el2", CPENC(3,4,C5,C3,0), 0 },
3917 { "erridr_el1", CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3918 { "errselr_el1", CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3919 { "erxfr_el1", CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3920 { "erxctlr_el1", CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3921 { "erxstatus_el1", CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3922 { "erxaddr_el1", CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3923 { "erxmisc0_el1", CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3924 { "erxmisc1_el1", CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3925 { "far_el1", CPENC(3,0,C6,C0,0), 0 },
3926 { "far_el2", CPENC(3,4,C6,C0,0), 0 },
3927 { "far_el3", CPENC(3,6,C6,C0,0), 0 },
3928 { "far_el12", CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3929 { "hpfar_el2", CPENC(3,4,C6,C0,4), 0 },
3930 { "par_el1", CPENC(3,0,C7,C4,0), 0 },
3931 { "mair_el1", CPENC(3,0,C10,C2,0), 0 },
3932 { "mair_el2", CPENC(3,4,C10,C2,0), 0 },
3933 { "mair_el3", CPENC(3,6,C10,C2,0), 0 },
3934 { "mair_el12", CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3935 { "amair_el1", CPENC(3,0,C10,C3,0), 0 },
3936 { "amair_el2", CPENC(3,4,C10,C3,0), 0 },
3937 { "amair_el3", CPENC(3,6,C10,C3,0), 0 },
3938 { "amair_el12", CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3939 { "vbar_el1", CPENC(3,0,C12,C0,0), 0 },
3940 { "vbar_el2", CPENC(3,4,C12,C0,0), 0 },
3941 { "vbar_el3", CPENC(3,6,C12,C0,0), 0 },
3942 { "vbar_el12", CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3943 { "rvbar_el1", CPENC(3,0,C12,C0,1), F_REG_READ }, /* RO */
3944 { "rvbar_el2", CPENC(3,4,C12,C0,1), F_REG_READ }, /* RO */
3945 { "rvbar_el3", CPENC(3,6,C12,C0,1), F_REG_READ }, /* RO */
3946 { "rmr_el1", CPENC(3,0,C12,C0,2), 0 },
3947 { "rmr_el2", CPENC(3,4,C12,C0,2), 0 },
3948 { "rmr_el3", CPENC(3,6,C12,C0,2), 0 },
3949 { "isr_el1", CPENC(3,0,C12,C1,0), F_REG_READ }, /* RO */
3950 { "disr_el1", CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3951 { "vdisr_el2", CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3952 { "contextidr_el1", CPENC(3,0,C13,C0,1), 0 },
3953 { "contextidr_el2", CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3954 { "contextidr_el12", CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3955 { "rndr", CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3956 { "rndrrs", CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3957 { "tco", CPENC(3,3,C4,C2,7), F_ARCHEXT },
3958 { "tfsre0_el1", CPENC(3,0,C6,C6,1), F_ARCHEXT },
3959 { "tfsr_el1", CPENC(3,0,C6,C5,0), F_ARCHEXT },
3960 { "tfsr_el2", CPENC(3,4,C6,C5,0), F_ARCHEXT },
3961 { "tfsr_el3", CPENC(3,6,C6,C6,0), F_ARCHEXT },
3962 { "tfsr_el12", CPENC(3,5,C6,C6,0), F_ARCHEXT },
3963 { "rgsr_el1", CPENC(3,0,C1,C0,5), F_ARCHEXT },
3964 { "gcr_el1", CPENC(3,0,C1,C0,6), F_ARCHEXT },
3965 { "tpidr_el0", CPENC(3,3,C13,C0,2), 0 },
3966 { "tpidrro_el0", CPENC(3,3,C13,C0,3), 0 }, /* RW */
3967 { "tpidr_el1", CPENC(3,0,C13,C0,4), 0 },
3968 { "tpidr_el2", CPENC(3,4,C13,C0,2), 0 },
3969 { "tpidr_el3", CPENC(3,6,C13,C0,2), 0 },
3970 { "scxtnum_el0", CPENC(3,3,C13,C0,7), F_ARCHEXT },
3971 { "scxtnum_el1", CPENC(3,0,C13,C0,7), F_ARCHEXT },
3972 { "scxtnum_el2", CPENC(3,4,C13,C0,7), F_ARCHEXT },
3973 { "scxtnum_el12", CPENC(3,5,C13,C0,7), F_ARCHEXT },
3974 { "scxtnum_el3", CPENC(3,6,C13,C0,7), F_ARCHEXT },
3975 { "teecr32_el1", CPENC(2,2,C0, C0,0), 0 }, /* See section 3.9.7.1 */
3976 { "cntfrq_el0", CPENC(3,3,C14,C0,0), 0 }, /* RW */
3977 { "cntpct_el0", CPENC(3,3,C14,C0,1), F_REG_READ }, /* RO */
3978 { "cntvct_el0", CPENC(3,3,C14,C0,2), F_REG_READ }, /* RO */
3979 { "cntvoff_el2", CPENC(3,4,C14,C0,3), 0 },
3980 { "cntkctl_el1", CPENC(3,0,C14,C1,0), 0 },
3981 { "cntkctl_el12", CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3982 { "cnthctl_el2", CPENC(3,4,C14,C1,0), 0 },
3983 { "cntp_tval_el0", CPENC(3,3,C14,C2,0), 0 },
3984 { "cntp_tval_el02", CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3985 { "cntp_ctl_el0", CPENC(3,3,C14,C2,1), 0 },
3986 { "cntp_ctl_el02", CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3987 { "cntp_cval_el0", CPENC(3,3,C14,C2,2), 0 },
3988 { "cntp_cval_el02", CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3989 { "cntv_tval_el0", CPENC(3,3,C14,C3,0), 0 },
3990 { "cntv_tval_el02", CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3991 { "cntv_ctl_el0", CPENC(3,3,C14,C3,1), 0 },
3992 { "cntv_ctl_el02", CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3993 { "cntv_cval_el0", CPENC(3,3,C14,C3,2), 0 },
3994 { "cntv_cval_el02", CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3995 { "cnthp_tval_el2", CPENC(3,4,C14,C2,0), 0 },
3996 { "cnthp_ctl_el2", CPENC(3,4,C14,C2,1), 0 },
3997 { "cnthp_cval_el2", CPENC(3,4,C14,C2,2), 0 },
3998 { "cntps_tval_el1", CPENC(3,7,C14,C2,0), 0 },
3999 { "cntps_ctl_el1", CPENC(3,7,C14,C2,1), 0 },
4000 { "cntps_cval_el1", CPENC(3,7,C14,C2,2), 0 },
4001 { "cnthv_tval_el2", CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
4002 { "cnthv_ctl_el2", CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
4003 { "cnthv_cval_el2", CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
4004 { "dacr32_el2", CPENC(3,4,C3,C0,0), 0 },
4005 { "ifsr32_el2", CPENC(3,4,C5,C0,1), 0 },
4006 { "teehbr32_el1", CPENC(2,2,C1,C0,0), 0 },
4007 { "sder32_el3", CPENC(3,6,C1,C1,1), 0 },
4008 { "mdscr_el1", CPENC(2,0,C0, C2, 2), 0 },
4009 { "mdccsr_el0", CPENC(2,3,C0, C1, 0), F_REG_READ }, /* r */
4010 { "mdccint_el1", CPENC(2,0,C0, C2, 0), 0 },
4011 { "dbgdtr_el0", CPENC(2,3,C0, C4, 0), 0 },
4012 { "dbgdtrrx_el0", CPENC(2,3,C0, C5, 0), F_REG_READ }, /* r */
4013 { "dbgdtrtx_el0", CPENC(2,3,C0, C5, 0), F_REG_WRITE }, /* w */
4014 { "osdtrrx_el1", CPENC(2,0,C0, C0, 2), 0 },
4015 { "osdtrtx_el1", CPENC(2,0,C0, C3, 2), 0 },
4016 { "oseccr_el1", CPENC(2,0,C0, C6, 2), 0 },
4017 { "dbgvcr32_el2", CPENC(2,4,C0, C7, 0), 0 },
4018 { "dbgbvr0_el1", CPENC(2,0,C0, C0, 4), 0 },
4019 { "dbgbvr1_el1", CPENC(2,0,C0, C1, 4), 0 },
4020 { "dbgbvr2_el1", CPENC(2,0,C0, C2, 4), 0 },
4021 { "dbgbvr3_el1", CPENC(2,0,C0, C3, 4), 0 },
4022 { "dbgbvr4_el1", CPENC(2,0,C0, C4, 4), 0 },
4023 { "dbgbvr5_el1", CPENC(2,0,C0, C5, 4), 0 },
4024 { "dbgbvr6_el1", CPENC(2,0,C0, C6, 4), 0 },
4025 { "dbgbvr7_el1", CPENC(2,0,C0, C7, 4), 0 },
4026 { "dbgbvr8_el1", CPENC(2,0,C0, C8, 4), 0 },
4027 { "dbgbvr9_el1", CPENC(2,0,C0, C9, 4), 0 },
4028 { "dbgbvr10_el1", CPENC(2,0,C0, C10,4), 0 },
4029 { "dbgbvr11_el1", CPENC(2,0,C0, C11,4), 0 },
4030 { "dbgbvr12_el1", CPENC(2,0,C0, C12,4), 0 },
4031 { "dbgbvr13_el1", CPENC(2,0,C0, C13,4), 0 },
4032 { "dbgbvr14_el1", CPENC(2,0,C0, C14,4), 0 },
4033 { "dbgbvr15_el1", CPENC(2,0,C0, C15,4), 0 },
4034 { "dbgbcr0_el1", CPENC(2,0,C0, C0, 5), 0 },
4035 { "dbgbcr1_el1", CPENC(2,0,C0, C1, 5), 0 },
4036 { "dbgbcr2_el1", CPENC(2,0,C0, C2, 5), 0 },
4037 { "dbgbcr3_el1", CPENC(2,0,C0, C3, 5), 0 },
4038 { "dbgbcr4_el1", CPENC(2,0,C0, C4, 5), 0 },
4039 { "dbgbcr5_el1", CPENC(2,0,C0, C5, 5), 0 },
4040 { "dbgbcr6_el1", CPENC(2,0,C0, C6, 5), 0 },
4041 { "dbgbcr7_el1", CPENC(2,0,C0, C7, 5), 0 },
4042 { "dbgbcr8_el1", CPENC(2,0,C0, C8, 5), 0 },
4043 { "dbgbcr9_el1", CPENC(2,0,C0, C9, 5), 0 },
4044 { "dbgbcr10_el1", CPENC(2,0,C0, C10,5), 0 },
4045 { "dbgbcr11_el1", CPENC(2,0,C0, C11,5), 0 },
4046 { "dbgbcr12_el1", CPENC(2,0,C0, C12,5), 0 },
4047 { "dbgbcr13_el1", CPENC(2,0,C0, C13,5), 0 },
4048 { "dbgbcr14_el1", CPENC(2,0,C0, C14,5), 0 },
4049 { "dbgbcr15_el1", CPENC(2,0,C0, C15,5), 0 },
4050 { "dbgwvr0_el1", CPENC(2,0,C0, C0, 6), 0 },
4051 { "dbgwvr1_el1", CPENC(2,0,C0, C1, 6), 0 },
4052 { "dbgwvr2_el1", CPENC(2,0,C0, C2, 6), 0 },
4053 { "dbgwvr3_el1", CPENC(2,0,C0, C3, 6), 0 },
4054 { "dbgwvr4_el1", CPENC(2,0,C0, C4, 6), 0 },
4055 { "dbgwvr5_el1", CPENC(2,0,C0, C5, 6), 0 },
4056 { "dbgwvr6_el1", CPENC(2,0,C0, C6, 6), 0 },
4057 { "dbgwvr7_el1", CPENC(2,0,C0, C7, 6), 0 },
4058 { "dbgwvr8_el1", CPENC(2,0,C0, C8, 6), 0 },
4059 { "dbgwvr9_el1", CPENC(2,0,C0, C9, 6), 0 },
4060 { "dbgwvr10_el1", CPENC(2,0,C0, C10,6), 0 },
4061 { "dbgwvr11_el1", CPENC(2,0,C0, C11,6), 0 },
4062 { "dbgwvr12_el1", CPENC(2,0,C0, C12,6), 0 },
4063 { "dbgwvr13_el1", CPENC(2,0,C0, C13,6), 0 },
4064 { "dbgwvr14_el1", CPENC(2,0,C0, C14,6), 0 },
4065 { "dbgwvr15_el1", CPENC(2,0,C0, C15,6), 0 },
4066 { "dbgwcr0_el1", CPENC(2,0,C0, C0, 7), 0 },
4067 { "dbgwcr1_el1", CPENC(2,0,C0, C1, 7), 0 },
4068 { "dbgwcr2_el1", CPENC(2,0,C0, C2, 7), 0 },
4069 { "dbgwcr3_el1", CPENC(2,0,C0, C3, 7), 0 },
4070 { "dbgwcr4_el1", CPENC(2,0,C0, C4, 7), 0 },
4071 { "dbgwcr5_el1", CPENC(2,0,C0, C5, 7), 0 },
4072 { "dbgwcr6_el1", CPENC(2,0,C0, C6, 7), 0 },
4073 { "dbgwcr7_el1", CPENC(2,0,C0, C7, 7), 0 },
4074 { "dbgwcr8_el1", CPENC(2,0,C0, C8, 7), 0 },
4075 { "dbgwcr9_el1", CPENC(2,0,C0, C9, 7), 0 },
4076 { "dbgwcr10_el1", CPENC(2,0,C0, C10,7), 0 },
4077 { "dbgwcr11_el1", CPENC(2,0,C0, C11,7), 0 },
4078 { "dbgwcr12_el1", CPENC(2,0,C0, C12,7), 0 },
4079 { "dbgwcr13_el1", CPENC(2,0,C0, C13,7), 0 },
4080 { "dbgwcr14_el1", CPENC(2,0,C0, C14,7), 0 },
4081 { "dbgwcr15_el1", CPENC(2,0,C0, C15,7), 0 },
4082 { "mdrar_el1", CPENC(2,0,C1, C0, 0), F_REG_READ }, /* r */
4083 { "oslar_el1", CPENC(2,0,C1, C0, 4), F_REG_WRITE }, /* w */
4084 { "oslsr_el1", CPENC(2,0,C1, C1, 4), F_REG_READ }, /* r */
4085 { "osdlr_el1", CPENC(2,0,C1, C3, 4), 0 },
4086 { "dbgprcr_el1", CPENC(2,0,C1, C4, 4), 0 },
4087 { "dbgclaimset_el1", CPENC(2,0,C7, C8, 6), 0 },
4088 { "dbgclaimclr_el1", CPENC(2,0,C7, C9, 6), 0 },
4089 { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6), F_REG_READ }, /* r */
4090 { "pmblimitr_el1", CPENC (3, 0, C9, C10, 0), F_ARCHEXT }, /* rw */
4091 { "pmbptr_el1", CPENC (3, 0, C9, C10, 1), F_ARCHEXT }, /* rw */
4092 { "pmbsr_el1", CPENC (3, 0, C9, C10, 3), F_ARCHEXT }, /* rw */
4093 { "pmbidr_el1", CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ }, /* ro */
4094 { "pmscr_el1", CPENC (3, 0, C9, C9, 0), F_ARCHEXT }, /* rw */
4095 { "pmsicr_el1", CPENC (3, 0, C9, C9, 2), F_ARCHEXT }, /* rw */
4096 { "pmsirr_el1", CPENC (3, 0, C9, C9, 3), F_ARCHEXT }, /* rw */
4097 { "pmsfcr_el1", CPENC (3, 0, C9, C9, 4), F_ARCHEXT }, /* rw */
4098 { "pmsevfr_el1", CPENC (3, 0, C9, C9, 5), F_ARCHEXT }, /* rw */
4099 { "pmslatfr_el1", CPENC (3, 0, C9, C9, 6), F_ARCHEXT }, /* rw */
4100 { "pmsidr_el1", CPENC (3, 0, C9, C9, 7), F_ARCHEXT }, /* rw */
4101 { "pmscr_el2", CPENC (3, 4, C9, C9, 0), F_ARCHEXT }, /* rw */
4102 { "pmscr_el12", CPENC (3, 5, C9, C9, 0), F_ARCHEXT }, /* rw */
4103 { "pmcr_el0", CPENC(3,3,C9,C12, 0), 0 },
4104 { "pmcntenset_el0", CPENC(3,3,C9,C12, 1), 0 },
4105 { "pmcntenclr_el0", CPENC(3,3,C9,C12, 2), 0 },
4106 { "pmovsclr_el0", CPENC(3,3,C9,C12, 3), 0 },
4107 { "pmswinc_el0", CPENC(3,3,C9,C12, 4), F_REG_WRITE }, /* w */
4108 { "pmselr_el0", CPENC(3,3,C9,C12, 5), 0 },
4109 { "pmceid0_el0", CPENC(3,3,C9,C12, 6), F_REG_READ }, /* r */
4110 { "pmceid1_el0", CPENC(3,3,C9,C12, 7), F_REG_READ }, /* r */
4111 { "pmccntr_el0", CPENC(3,3,C9,C13, 0), 0 },
4112 { "pmxevtyper_el0", CPENC(3,3,C9,C13, 1), 0 },
4113 { "pmxevcntr_el0", CPENC(3,3,C9,C13, 2), 0 },
4114 { "pmuserenr_el0", CPENC(3,3,C9,C14, 0), 0 },
4115 { "pmintenset_el1", CPENC(3,0,C9,C14, 1), 0 },
4116 { "pmintenclr_el1", CPENC(3,0,C9,C14, 2), 0 },
4117 { "pmovsset_el0", CPENC(3,3,C9,C14, 3), 0 },
4118 { "pmevcntr0_el0", CPENC(3,3,C14,C8, 0), 0 },
4119 { "pmevcntr1_el0", CPENC(3,3,C14,C8, 1), 0 },
4120 { "pmevcntr2_el0", CPENC(3,3,C14,C8, 2), 0 },
4121 { "pmevcntr3_el0", CPENC(3,3,C14,C8, 3), 0 },
4122 { "pmevcntr4_el0", CPENC(3,3,C14,C8, 4), 0 },
4123 { "pmevcntr5_el0", CPENC(3,3,C14,C8, 5), 0 },
4124 { "pmevcntr6_el0", CPENC(3,3,C14,C8, 6), 0 },
4125 { "pmevcntr7_el0", CPENC(3,3,C14,C8, 7), 0 },
4126 { "pmevcntr8_el0", CPENC(3,3,C14,C9, 0), 0 },
4127 { "pmevcntr9_el0", CPENC(3,3,C14,C9, 1), 0 },
4128 { "pmevcntr10_el0", CPENC(3,3,C14,C9, 2), 0 },
4129 { "pmevcntr11_el0", CPENC(3,3,C14,C9, 3), 0 },
4130 { "pmevcntr12_el0", CPENC(3,3,C14,C9, 4), 0 },
4131 { "pmevcntr13_el0", CPENC(3,3,C14,C9, 5), 0 },
4132 { "pmevcntr14_el0", CPENC(3,3,C14,C9, 6), 0 },
4133 { "pmevcntr15_el0", CPENC(3,3,C14,C9, 7), 0 },
4134 { "pmevcntr16_el0", CPENC(3,3,C14,C10,0), 0 },
4135 { "pmevcntr17_el0", CPENC(3,3,C14,C10,1), 0 },
4136 { "pmevcntr18_el0", CPENC(3,3,C14,C10,2), 0 },
4137 { "pmevcntr19_el0", CPENC(3,3,C14,C10,3), 0 },
4138 { "pmevcntr20_el0", CPENC(3,3,C14,C10,4), 0 },
4139 { "pmevcntr21_el0", CPENC(3,3,C14,C10,5), 0 },
4140 { "pmevcntr22_el0", CPENC(3,3,C14,C10,6), 0 },
4141 { "pmevcntr23_el0", CPENC(3,3,C14,C10,7), 0 },
4142 { "pmevcntr24_el0", CPENC(3,3,C14,C11,0), 0 },
4143 { "pmevcntr25_el0", CPENC(3,3,C14,C11,1), 0 },
4144 { "pmevcntr26_el0", CPENC(3,3,C14,C11,2), 0 },
4145 { "pmevcntr27_el0", CPENC(3,3,C14,C11,3), 0 },
4146 { "pmevcntr28_el0", CPENC(3,3,C14,C11,4), 0 },
4147 { "pmevcntr29_el0", CPENC(3,3,C14,C11,5), 0 },
4148 { "pmevcntr30_el0", CPENC(3,3,C14,C11,6), 0 },
4149 { "pmevtyper0_el0", CPENC(3,3,C14,C12,0), 0 },
4150 { "pmevtyper1_el0", CPENC(3,3,C14,C12,1), 0 },
4151 { "pmevtyper2_el0", CPENC(3,3,C14,C12,2), 0 },
4152 { "pmevtyper3_el0", CPENC(3,3,C14,C12,3), 0 },
4153 { "pmevtyper4_el0", CPENC(3,3,C14,C12,4), 0 },
4154 { "pmevtyper5_el0", CPENC(3,3,C14,C12,5), 0 },
4155 { "pmevtyper6_el0", CPENC(3,3,C14,C12,6), 0 },
4156 { "pmevtyper7_el0", CPENC(3,3,C14,C12,7), 0 },
4157 { "pmevtyper8_el0", CPENC(3,3,C14,C13,0), 0 },
4158 { "pmevtyper9_el0", CPENC(3,3,C14,C13,1), 0 },
4159 { "pmevtyper10_el0", CPENC(3,3,C14,C13,2), 0 },
4160 { "pmevtyper11_el0", CPENC(3,3,C14,C13,3), 0 },
4161 { "pmevtyper12_el0", CPENC(3,3,C14,C13,4), 0 },
4162 { "pmevtyper13_el0", CPENC(3,3,C14,C13,5), 0 },
4163 { "pmevtyper14_el0", CPENC(3,3,C14,C13,6), 0 },
4164 { "pmevtyper15_el0", CPENC(3,3,C14,C13,7), 0 },
4165 { "pmevtyper16_el0", CPENC(3,3,C14,C14,0), 0 },
4166 { "pmevtyper17_el0", CPENC(3,3,C14,C14,1), 0 },
4167 { "pmevtyper18_el0", CPENC(3,3,C14,C14,2), 0 },
4168 { "pmevtyper19_el0", CPENC(3,3,C14,C14,3), 0 },
4169 { "pmevtyper20_el0", CPENC(3,3,C14,C14,4), 0 },
4170 { "pmevtyper21_el0", CPENC(3,3,C14,C14,5), 0 },
4171 { "pmevtyper22_el0", CPENC(3,3,C14,C14,6), 0 },
4172 { "pmevtyper23_el0", CPENC(3,3,C14,C14,7), 0 },
4173 { "pmevtyper24_el0", CPENC(3,3,C14,C15,0), 0 },
4174 { "pmevtyper25_el0", CPENC(3,3,C14,C15,1), 0 },
4175 { "pmevtyper26_el0", CPENC(3,3,C14,C15,2), 0 },
4176 { "pmevtyper27_el0", CPENC(3,3,C14,C15,3), 0 },
4177 { "pmevtyper28_el0", CPENC(3,3,C14,C15,4), 0 },
4178 { "pmevtyper29_el0", CPENC(3,3,C14,C15,5), 0 },
4179 { "pmevtyper30_el0", CPENC(3,3,C14,C15,6), 0 },
4180 { "pmccfiltr_el0", CPENC(3,3,C14,C15,7), 0 },
4181
4182 { "dit", CPEN_ (3, C2, 5), F_ARCHEXT },
4183 { "vstcr_el2", CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4184 { "vsttbr_el2", CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4185 { "cnthvs_tval_el2", CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4186 { "cnthvs_cval_el2", CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4187 { "cnthvs_ctl_el2", CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4188 { "cnthps_tval_el2", CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4189 { "cnthps_cval_el2", CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4190 { "cnthps_ctl_el2", CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4191 { "sder32_el2", CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4192 { "vncr_el2", CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4193 { 0, CPENC(0,0,0,0,0), 0 },
4194 };
4195
4196 bfd_boolean
4197 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4198 {
4199 return (reg->flags & F_DEPRECATED) != 0;
4200 }
4201
4202 bfd_boolean
4203 aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4204 const aarch64_sys_reg *reg)
4205 {
4206 if (!(reg->flags & F_ARCHEXT))
4207 return TRUE;
4208
4209 /* PAN. Values are from aarch64_sys_regs. */
4210 if (reg->value == CPEN_(0,C2,3)
4211 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4212 return FALSE;
4213
4214 /* SCXTNUM_ELx registers. */
4215 if ((reg->value == CPENC (3, 3, C13, C0, 7)
4216 || reg->value == CPENC (3, 0, C13, C0, 7)
4217 || reg->value == CPENC (3, 4, C13, C0, 7)
4218 || reg->value == CPENC (3, 6, C13, C0, 7)
4219 || reg->value == CPENC (3, 5, C13, C0, 7))
4220 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4221 return FALSE;
4222
4223 /* ID_PFR2_EL1 register. */
4224 if (reg->value == CPENC(3, 0, C0, C3, 4)
4225 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4226 return FALSE;
4227
4228 /* SSBS. Values are from aarch64_sys_regs. */
4229 if (reg->value == CPEN_(3,C2,6)
4230 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4231 return FALSE;
4232
4233 /* Virtualization host extensions: system registers. */
4234 if ((reg->value == CPENC (3, 4, C2, C0, 1)
4235 || reg->value == CPENC (3, 4, C13, C0, 1)
4236 || reg->value == CPENC (3, 4, C14, C3, 0)
4237 || reg->value == CPENC (3, 4, C14, C3, 1)
4238 || reg->value == CPENC (3, 4, C14, C3, 2))
4239 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4240 return FALSE;
4241
4242 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
4243 if ((reg->value == CPEN_ (5, C0, 0)
4244 || reg->value == CPEN_ (5, C0, 1)
4245 || reg->value == CPENC (3, 5, C1, C0, 0)
4246 || reg->value == CPENC (3, 5, C1, C0, 2)
4247 || reg->value == CPENC (3, 5, C2, C0, 0)
4248 || reg->value == CPENC (3, 5, C2, C0, 1)
4249 || reg->value == CPENC (3, 5, C2, C0, 2)
4250 || reg->value == CPENC (3, 5, C5, C1, 0)
4251 || reg->value == CPENC (3, 5, C5, C1, 1)
4252 || reg->value == CPENC (3, 5, C5, C2, 0)
4253 || reg->value == CPENC (3, 5, C6, C0, 0)
4254 || reg->value == CPENC (3, 5, C10, C2, 0)
4255 || reg->value == CPENC (3, 5, C10, C3, 0)
4256 || reg->value == CPENC (3, 5, C12, C0, 0)
4257 || reg->value == CPENC (3, 5, C13, C0, 1)
4258 || reg->value == CPENC (3, 5, C14, C1, 0))
4259 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4260 return FALSE;
4261
4262 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
4263 if ((reg->value == CPENC (3, 5, C14, C2, 0)
4264 || reg->value == CPENC (3, 5, C14, C2, 1)
4265 || reg->value == CPENC (3, 5, C14, C2, 2)
4266 || reg->value == CPENC (3, 5, C14, C3, 0)
4267 || reg->value == CPENC (3, 5, C14, C3, 1)
4268 || reg->value == CPENC (3, 5, C14, C3, 2))
4269 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4270 return FALSE;
4271
4272 /* ARMv8.2 features. */
4273
4274 /* ID_AA64MMFR2_EL1. */
4275 if (reg->value == CPENC (3, 0, C0, C7, 2)
4276 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4277 return FALSE;
4278
4279 /* PSTATE.UAO. */
4280 if (reg->value == CPEN_ (0, C2, 4)
4281 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4282 return FALSE;
4283
4284 /* RAS extension. */
4285
4286 /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4287 ERXMISC0_EL1 AND ERXMISC1_EL1. */
4288 if ((reg->value == CPENC (3, 0, C5, C3, 0)
4289 || reg->value == CPENC (3, 0, C5, C3, 1)
4290 || reg->value == CPENC (3, 0, C5, C3, 2)
4291 || reg->value == CPENC (3, 0, C5, C3, 3)
4292 || reg->value == CPENC (3, 0, C5, C4, 0)
4293 || reg->value == CPENC (3, 0, C5, C4, 1)
4294 || reg->value == CPENC (3, 0, C5, C4, 2)
4295 || reg->value == CPENC (3, 0, C5, C4, 3)
4296 || reg->value == CPENC (3, 0, C5, C5, 0)
4297 || reg->value == CPENC (3, 0, C5, C5, 1))
4298 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4299 return FALSE;
4300
4301 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
4302 if ((reg->value == CPENC (3, 4, C5, C2, 3)
4303 || reg->value == CPENC (3, 0, C12, C1, 1)
4304 || reg->value == CPENC (3, 4, C12, C1, 1))
4305 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4306 return FALSE;
4307
4308 /* Statistical Profiling extension. */
4309 if ((reg->value == CPENC (3, 0, C9, C10, 0)
4310 || reg->value == CPENC (3, 0, C9, C10, 1)
4311 || reg->value == CPENC (3, 0, C9, C10, 3)
4312 || reg->value == CPENC (3, 0, C9, C10, 7)
4313 || reg->value == CPENC (3, 0, C9, C9, 0)
4314 || reg->value == CPENC (3, 0, C9, C9, 2)
4315 || reg->value == CPENC (3, 0, C9, C9, 3)
4316 || reg->value == CPENC (3, 0, C9, C9, 4)
4317 || reg->value == CPENC (3, 0, C9, C9, 5)
4318 || reg->value == CPENC (3, 0, C9, C9, 6)
4319 || reg->value == CPENC (3, 0, C9, C9, 7)
4320 || reg->value == CPENC (3, 4, C9, C9, 0)
4321 || reg->value == CPENC (3, 5, C9, C9, 0))
4322 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4323 return FALSE;
4324
4325 /* ARMv8.3 Pointer authentication keys. */
4326 if ((reg->value == CPENC (3, 0, C2, C1, 0)
4327 || reg->value == CPENC (3, 0, C2, C1, 1)
4328 || reg->value == CPENC (3, 0, C2, C1, 2)
4329 || reg->value == CPENC (3, 0, C2, C1, 3)
4330 || reg->value == CPENC (3, 0, C2, C2, 0)
4331 || reg->value == CPENC (3, 0, C2, C2, 1)
4332 || reg->value == CPENC (3, 0, C2, C2, 2)
4333 || reg->value == CPENC (3, 0, C2, C2, 3)
4334 || reg->value == CPENC (3, 0, C2, C3, 0)
4335 || reg->value == CPENC (3, 0, C2, C3, 1))
4336 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4337 return FALSE;
4338
4339 /* SVE. */
4340 if ((reg->value == CPENC (3, 0, C0, C4, 4)
4341 || reg->value == CPENC (3, 0, C1, C2, 0)
4342 || reg->value == CPENC (3, 4, C1, C2, 0)
4343 || reg->value == CPENC (3, 6, C1, C2, 0)
4344 || reg->value == CPENC (3, 5, C1, C2, 0)
4345 || reg->value == CPENC (3, 0, C0, C0, 7))
4346 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4347 return FALSE;
4348
4349 /* ARMv8.4 features. */
4350
4351 /* PSTATE.DIT. */
4352 if (reg->value == CPEN_ (3, C2, 5)
4353 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4354 return FALSE;
4355
4356 /* Virtualization extensions. */
4357 if ((reg->value == CPENC(3, 4, C2, C6, 2)
4358 || reg->value == CPENC(3, 4, C2, C6, 0)
4359 || reg->value == CPENC(3, 4, C14, C4, 0)
4360 || reg->value == CPENC(3, 4, C14, C4, 2)
4361 || reg->value == CPENC(3, 4, C14, C4, 1)
4362 || reg->value == CPENC(3, 4, C14, C5, 0)
4363 || reg->value == CPENC(3, 4, C14, C5, 2)
4364 || reg->value == CPENC(3, 4, C14, C5, 1)
4365 || reg->value == CPENC(3, 4, C1, C3, 1)
4366 || reg->value == CPENC(3, 4, C2, C2, 0))
4367 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4368 return FALSE;
4369
4370 /* ARMv8.4 TLB instructions. */
4371 if ((reg->value == CPENS (0, C8, C1, 0)
4372 || reg->value == CPENS (0, C8, C1, 1)
4373 || reg->value == CPENS (0, C8, C1, 2)
4374 || reg->value == CPENS (0, C8, C1, 3)
4375 || reg->value == CPENS (0, C8, C1, 5)
4376 || reg->value == CPENS (0, C8, C1, 7)
4377 || reg->value == CPENS (4, C8, C4, 0)
4378 || reg->value == CPENS (4, C8, C4, 4)
4379 || reg->value == CPENS (4, C8, C1, 1)
4380 || reg->value == CPENS (4, C8, C1, 5)
4381 || reg->value == CPENS (4, C8, C1, 6)
4382 || reg->value == CPENS (6, C8, C1, 1)
4383 || reg->value == CPENS (6, C8, C1, 5)
4384 || reg->value == CPENS (4, C8, C1, 0)
4385 || reg->value == CPENS (4, C8, C1, 4)
4386 || reg->value == CPENS (6, C8, C1, 0)
4387 || reg->value == CPENS (0, C8, C6, 1)
4388 || reg->value == CPENS (0, C8, C6, 3)
4389 || reg->value == CPENS (0, C8, C6, 5)
4390 || reg->value == CPENS (0, C8, C6, 7)
4391 || reg->value == CPENS (0, C8, C2, 1)
4392 || reg->value == CPENS (0, C8, C2, 3)
4393 || reg->value == CPENS (0, C8, C2, 5)
4394 || reg->value == CPENS (0, C8, C2, 7)
4395 || reg->value == CPENS (0, C8, C5, 1)
4396 || reg->value == CPENS (0, C8, C5, 3)
4397 || reg->value == CPENS (0, C8, C5, 5)
4398 || reg->value == CPENS (0, C8, C5, 7)
4399 || reg->value == CPENS (4, C8, C0, 2)
4400 || reg->value == CPENS (4, C8, C0, 6)
4401 || reg->value == CPENS (4, C8, C4, 2)
4402 || reg->value == CPENS (4, C8, C4, 6)
4403 || reg->value == CPENS (4, C8, C4, 3)
4404 || reg->value == CPENS (4, C8, C4, 7)
4405 || reg->value == CPENS (4, C8, C6, 1)
4406 || reg->value == CPENS (4, C8, C6, 5)
4407 || reg->value == CPENS (4, C8, C2, 1)
4408 || reg->value == CPENS (4, C8, C2, 5)
4409 || reg->value == CPENS (4, C8, C5, 1)
4410 || reg->value == CPENS (4, C8, C5, 5)
4411 || reg->value == CPENS (6, C8, C6, 1)
4412 || reg->value == CPENS (6, C8, C6, 5)
4413 || reg->value == CPENS (6, C8, C2, 1)
4414 || reg->value == CPENS (6, C8, C2, 5)
4415 || reg->value == CPENS (6, C8, C5, 1)
4416 || reg->value == CPENS (6, C8, C5, 5))
4417 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4418 return FALSE;
4419
4420 /* Random Number Instructions. For now they are available
4421 (and optional) only with ARMv8.5-A. */
4422 if ((reg->value == CPENC (3, 3, C2, C4, 0)
4423 || reg->value == CPENC (3, 3, C2, C4, 1))
4424 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4425 && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4426 return FALSE;
4427
4428 /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG. */
4429 if ((reg->value == CPENC (3, 3, C4, C2, 7)
4430 || reg->value == CPENC (3, 0, C6, C6, 1)
4431 || reg->value == CPENC (3, 0, C6, C5, 0)
4432 || reg->value == CPENC (3, 4, C6, C5, 0)
4433 || reg->value == CPENC (3, 6, C6, C6, 0)
4434 || reg->value == CPENC (3, 5, C6, C6, 0)
4435 || reg->value == CPENC (3, 0, C1, C0, 5)
4436 || reg->value == CPENC (3, 0, C1, C0, 6))
4437 && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4438 return FALSE;
4439
4440 return TRUE;
4441 }
4442
4443 /* The CPENC below is fairly misleading, the fields
4444 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4445 by ins_pstatefield, which just shifts the value by the width of the fields
4446 in a loop. So if you CPENC them only the first value will be set, the rest
4447 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4448 value of 0b110000000001000000 (0x30040) while what you want is
4449 0b011010 (0x1a). */
4450 const aarch64_sys_reg aarch64_pstatefields [] =
4451 {
4452 { "spsel", 0x05, 0 },
4453 { "daifset", 0x1e, 0 },
4454 { "daifclr", 0x1f, 0 },
4455 { "pan", 0x04, F_ARCHEXT },
4456 { "uao", 0x03, F_ARCHEXT },
4457 { "ssbs", 0x19, F_ARCHEXT },
4458 { "dit", 0x1a, F_ARCHEXT },
4459 { "tco", 0x1c, F_ARCHEXT },
4460 { 0, CPENC(0,0,0,0,0), 0 },
4461 };
4462
4463 bfd_boolean
4464 aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4465 const aarch64_sys_reg *reg)
4466 {
4467 if (!(reg->flags & F_ARCHEXT))
4468 return TRUE;
4469
4470 /* PAN. Values are from aarch64_pstatefields. */
4471 if (reg->value == 0x04
4472 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4473 return FALSE;
4474
4475 /* UAO. Values are from aarch64_pstatefields. */
4476 if (reg->value == 0x03
4477 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4478 return FALSE;
4479
4480 /* SSBS. Values are from aarch64_pstatefields. */
4481 if (reg->value == 0x19
4482 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4483 return FALSE;
4484
4485 /* DIT. Values are from aarch64_pstatefields. */
4486 if (reg->value == 0x1a
4487 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4488 return FALSE;
4489
4490 /* TCO. Values are from aarch64_pstatefields. */
4491 if (reg->value == 0x1c
4492 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4493 return FALSE;
4494
4495 return TRUE;
4496 }
4497
4498 const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4499 {
4500 { "ialluis", CPENS(0,C7,C1,0), 0 },
4501 { "iallu", CPENS(0,C7,C5,0), 0 },
4502 { "ivau", CPENS (3, C7, C5, 1), F_HASXT },
4503 { 0, CPENS(0,0,0,0), 0 }
4504 };
4505
4506 const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4507 {
4508 { "zva", CPENS (3, C7, C4, 1), F_HASXT },
4509 { "gva", CPENS (3, C7, C4, 3), F_HASXT | F_ARCHEXT },
4510 { "gzva", CPENS (3, C7, C4, 4), F_HASXT | F_ARCHEXT },
4511 { "ivac", CPENS (0, C7, C6, 1), F_HASXT },
4512 { "igvac", CPENS (0, C7, C6, 3), F_HASXT | F_ARCHEXT },
4513 { "igsw", CPENS (0, C7, C6, 4), F_HASXT | F_ARCHEXT },
4514 { "isw", CPENS (0, C7, C6, 2), F_HASXT },
4515 { "igdvac", CPENS (0, C7, C6, 5), F_HASXT | F_ARCHEXT },
4516 { "igdsw", CPENS (0, C7, C6, 6), F_HASXT | F_ARCHEXT },
4517 { "cvac", CPENS (3, C7, C10, 1), F_HASXT },
4518 { "cgvac", CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4519 { "cgdvac", CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4520 { "csw", CPENS (0, C7, C10, 2), F_HASXT },
4521 { "cgsw", CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4522 { "cgdsw", CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4523 { "cvau", CPENS (3, C7, C11, 1), F_HASXT },
4524 { "cvap", CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4525 { "cgvap", CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4526 { "cgdvap", CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4527 { "cvadp", CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4528 { "cgvadp", CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4529 { "cgdvadp", CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4530 { "civac", CPENS (3, C7, C14, 1), F_HASXT },
4531 { "cigvac", CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4532 { "cigdvac", CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4533 { "cisw", CPENS (0, C7, C14, 2), F_HASXT },
4534 { "cigsw", CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4535 { "cigdsw", CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4536 { 0, CPENS(0,0,0,0), 0 }
4537 };
4538
4539 const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4540 {
4541 { "s1e1r", CPENS (0, C7, C8, 0), F_HASXT },
4542 { "s1e1w", CPENS (0, C7, C8, 1), F_HASXT },
4543 { "s1e0r", CPENS (0, C7, C8, 2), F_HASXT },
4544 { "s1e0w", CPENS (0, C7, C8, 3), F_HASXT },
4545 { "s12e1r", CPENS (4, C7, C8, 4), F_HASXT },
4546 { "s12e1w", CPENS (4, C7, C8, 5), F_HASXT },
4547 { "s12e0r", CPENS (4, C7, C8, 6), F_HASXT },
4548 { "s12e0w", CPENS (4, C7, C8, 7), F_HASXT },
4549 { "s1e2r", CPENS (4, C7, C8, 0), F_HASXT },
4550 { "s1e2w", CPENS (4, C7, C8, 1), F_HASXT },
4551 { "s1e3r", CPENS (6, C7, C8, 0), F_HASXT },
4552 { "s1e3w", CPENS (6, C7, C8, 1), F_HASXT },
4553 { "s1e1rp", CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4554 { "s1e1wp", CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4555 { 0, CPENS(0,0,0,0), 0 }
4556 };
4557
4558 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4559 {
4560 { "vmalle1", CPENS(0,C8,C7,0), 0 },
4561 { "vae1", CPENS (0, C8, C7, 1), F_HASXT },
4562 { "aside1", CPENS (0, C8, C7, 2), F_HASXT },
4563 { "vaae1", CPENS (0, C8, C7, 3), F_HASXT },
4564 { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4565 { "vae1is", CPENS (0, C8, C3, 1), F_HASXT },
4566 { "aside1is", CPENS (0, C8, C3, 2), F_HASXT },
4567 { "vaae1is", CPENS (0, C8, C3, 3), F_HASXT },
4568 { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4569 { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4570 { "ipas2e1", CPENS (4, C8, C4, 1), F_HASXT },
4571 { "ipas2le1", CPENS (4, C8, C4, 5), F_HASXT },
4572 { "vae2", CPENS (4, C8, C7, 1), F_HASXT },
4573 { "vae2is", CPENS (4, C8, C3, 1), F_HASXT },
4574 { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4575 { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4576 { "vae3", CPENS (6, C8, C7, 1), F_HASXT },
4577 { "vae3is", CPENS (6, C8, C3, 1), F_HASXT },
4578 { "alle2", CPENS(4,C8,C7,0), 0 },
4579 { "alle2is", CPENS(4,C8,C3,0), 0 },
4580 { "alle1", CPENS(4,C8,C7,4), 0 },
4581 { "alle1is", CPENS(4,C8,C3,4), 0 },
4582 { "alle3", CPENS(6,C8,C7,0), 0 },
4583 { "alle3is", CPENS(6,C8,C3,0), 0 },
4584 { "vale1is", CPENS (0, C8, C3, 5), F_HASXT },
4585 { "vale2is", CPENS (4, C8, C3, 5), F_HASXT },
4586 { "vale3is", CPENS (6, C8, C3, 5), F_HASXT },
4587 { "vaale1is", CPENS (0, C8, C3, 7), F_HASXT },
4588 { "vale1", CPENS (0, C8, C7, 5), F_HASXT },
4589 { "vale2", CPENS (4, C8, C7, 5), F_HASXT },
4590 { "vale3", CPENS (6, C8, C7, 5), F_HASXT },
4591 { "vaale1", CPENS (0, C8, C7, 7), F_HASXT },
4592
4593 { "vmalle1os", CPENS (0, C8, C1, 0), F_ARCHEXT },
4594 { "vae1os", CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4595 { "aside1os", CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4596 { "vaae1os", CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4597 { "vale1os", CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4598 { "vaale1os", CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4599 { "ipas2e1os", CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4600 { "ipas2le1os", CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4601 { "vae2os", CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4602 { "vale2os", CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4603 { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4604 { "vae3os", CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4605 { "vale3os", CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4606 { "alle2os", CPENS (4, C8, C1, 0), F_ARCHEXT },
4607 { "alle1os", CPENS (4, C8, C1, 4), F_ARCHEXT },
4608 { "alle3os", CPENS (6, C8, C1, 0), F_ARCHEXT },
4609
4610 { "rvae1", CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4611 { "rvaae1", CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4612 { "rvale1", CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4613 { "rvaale1", CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4614 { "rvae1is", CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4615 { "rvaae1is", CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4616 { "rvale1is", CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4617 { "rvaale1is", CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4618 { "rvae1os", CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4619 { "rvaae1os", CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4620 { "rvale1os", CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4621 { "rvaale1os", CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4622 { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4623 { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4624 { "ripas2e1", CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4625 { "ripas2le1", CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4626 { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4627 { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4628 { "rvae2", CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4629 { "rvale2", CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4630 { "rvae2is", CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4631 { "rvale2is", CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4632 { "rvae2os", CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4633 { "rvale2os", CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4634 { "rvae3", CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4635 { "rvale3", CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4636 { "rvae3is", CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4637 { "rvale3is", CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4638 { "rvae3os", CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4639 { "rvale3os", CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4640
4641 { 0, CPENS(0,0,0,0), 0 }
4642 };
4643
4644 const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4645 {
4646 /* RCTX is somewhat unique in a way that it has different values
4647 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4648 Thus op2 is masked out and instead encoded directly in the
4649 aarch64_opcode_table entries for the respective instructions. */
4650 { "rctx", CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4651
4652 { 0, CPENS(0,0,0,0), 0 }
4653 };
4654
4655 bfd_boolean
4656 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4657 {
4658 return (sys_ins_reg->flags & F_HASXT) != 0;
4659 }
4660
4661 extern bfd_boolean
4662 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4663 const aarch64_sys_ins_reg *reg)
4664 {
4665 if (!(reg->flags & F_ARCHEXT))
4666 return TRUE;
4667
4668 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4669 if (reg->value == CPENS (3, C7, C12, 1)
4670 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4671 return FALSE;
4672
4673 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4674 if (reg->value == CPENS (3, C7, C13, 1)
4675 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4676 return FALSE;
4677
4678 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4679 if ((reg->value == CPENS (0, C7, C6, 3)
4680 || reg->value == CPENS (0, C7, C6, 4)
4681 || reg->value == CPENS (0, C7, C10, 4)
4682 || reg->value == CPENS (0, C7, C14, 4)
4683 || reg->value == CPENS (3, C7, C10, 3)
4684 || reg->value == CPENS (3, C7, C12, 3)
4685 || reg->value == CPENS (3, C7, C13, 3)
4686 || reg->value == CPENS (3, C7, C14, 3)
4687 || reg->value == CPENS (3, C7, C4, 3)
4688 || reg->value == CPENS (0, C7, C6, 5)
4689 || reg->value == CPENS (0, C7, C6, 6)
4690 || reg->value == CPENS (0, C7, C10, 6)
4691 || reg->value == CPENS (0, C7, C14, 6)
4692 || reg->value == CPENS (3, C7, C10, 5)
4693 || reg->value == CPENS (3, C7, C12, 5)
4694 || reg->value == CPENS (3, C7, C13, 5)
4695 || reg->value == CPENS (3, C7, C14, 5)
4696 || reg->value == CPENS (3, C7, C4, 4))
4697 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4698 return FALSE;
4699
4700 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4701 if ((reg->value == CPENS (0, C7, C9, 0)
4702 || reg->value == CPENS (0, C7, C9, 1))
4703 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4704 return FALSE;
4705
4706 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4707 if (reg->value == CPENS (3, C7, C3, 0)
4708 && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4709 return FALSE;
4710
4711 return TRUE;
4712 }
4713
4714 #undef C0
4715 #undef C1
4716 #undef C2
4717 #undef C3
4718 #undef C4
4719 #undef C5
4720 #undef C6
4721 #undef C7
4722 #undef C8
4723 #undef C9
4724 #undef C10
4725 #undef C11
4726 #undef C12
4727 #undef C13
4728 #undef C14
4729 #undef C15
4730
4731 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
4732 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4733
4734 static enum err_type
4735 verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4736 const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4737 bfd_boolean encoding ATTRIBUTE_UNUSED,
4738 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4739 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4740 {
4741 int t = BITS (insn, 4, 0);
4742 int n = BITS (insn, 9, 5);
4743 int t2 = BITS (insn, 14, 10);
4744
4745 if (BIT (insn, 23))
4746 {
4747 /* Write back enabled. */
4748 if ((t == n || t2 == n) && n != 31)
4749 return ERR_UND;
4750 }
4751
4752 if (BIT (insn, 22))
4753 {
4754 /* Load */
4755 if (t == t2)
4756 return ERR_UND;
4757 }
4758
4759 return ERR_OK;
4760 }
4761
4762 /* Verifier for vector by element 3 operands functions where the
4763 conditions `if sz:L == 11 then UNDEFINED` holds. */
4764
4765 static enum err_type
4766 verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4767 bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4768 aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4769 aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4770 {
4771 const aarch64_insn undef_pattern = 0x3;
4772 aarch64_insn value;
4773
4774 assert (inst->opcode);
4775 assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4776 value = encoding ? inst->value : insn;
4777 assert (value);
4778
4779 if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4780 return ERR_UND;
4781
4782 return ERR_OK;
4783 }
4784
4785 /* Initialize an instruction sequence insn_sequence with the instruction INST.
4786 If INST is NULL the given insn_sequence is cleared and the sequence is left
4787 uninitialized. */
4788
4789 void
4790 init_insn_sequence (const struct aarch64_inst *inst,
4791 aarch64_instr_sequence *insn_sequence)
4792 {
4793 int num_req_entries = 0;
4794 insn_sequence->next_insn = 0;
4795 insn_sequence->num_insns = num_req_entries;
4796 if (insn_sequence->instr)
4797 XDELETE (insn_sequence->instr);
4798 insn_sequence->instr = NULL;
4799
4800 if (inst)
4801 {
4802 insn_sequence->instr = XNEW (aarch64_inst);
4803 memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4804 }
4805
4806 /* Handle all the cases here. May need to think of something smarter than
4807 a giant if/else chain if this grows. At that time, a lookup table may be
4808 best. */
4809 if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4810 num_req_entries = 1;
4811
4812 if (insn_sequence->current_insns)
4813 XDELETEVEC (insn_sequence->current_insns);
4814 insn_sequence->current_insns = NULL;
4815
4816 if (num_req_entries != 0)
4817 {
4818 size_t size = num_req_entries * sizeof (aarch64_inst);
4819 insn_sequence->current_insns
4820 = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4821 memset (insn_sequence->current_insns, 0, size);
4822 }
4823 }
4824
4825
4826 /* This function verifies that the instruction INST adheres to its specified
4827 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
4828 returned and MISMATCH_DETAIL contains the reason why verification failed.
4829
4830 The function is called both during assembly and disassembly. If assembling
4831 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
4832 and will contain the PC of the current instruction w.r.t to the section.
4833
4834 If ENCODING and PC=0 then you are at a start of a section. The constraints
4835 are verified against the given state insn_sequence which is updated as it
4836 transitions through the verification. */
4837
4838 enum err_type
4839 verify_constraints (const struct aarch64_inst *inst,
4840 const aarch64_insn insn ATTRIBUTE_UNUSED,
4841 bfd_vma pc,
4842 bfd_boolean encoding,
4843 aarch64_operand_error *mismatch_detail,
4844 aarch64_instr_sequence *insn_sequence)
4845 {
4846 assert (inst);
4847 assert (inst->opcode);
4848
4849 const struct aarch64_opcode *opcode = inst->opcode;
4850 if (!opcode->constraints && !insn_sequence->instr)
4851 return ERR_OK;
4852
4853 assert (insn_sequence);
4854
4855 enum err_type res = ERR_OK;
4856
4857 /* This instruction puts a constraint on the insn_sequence. */
4858 if (opcode->flags & F_SCAN)
4859 {
4860 if (insn_sequence->instr)
4861 {
4862 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4863 mismatch_detail->error = _("instruction opens new dependency "
4864 "sequence without ending previous one");
4865 mismatch_detail->index = -1;
4866 mismatch_detail->non_fatal = TRUE;
4867 res = ERR_VFI;
4868 }
4869
4870 init_insn_sequence (inst, insn_sequence);
4871 return res;
4872 }
4873
4874 /* Verify constraints on an existing sequence. */
4875 if (insn_sequence->instr)
4876 {
4877 const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4878 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4879 closed a previous one that we should have. */
4880 if (!encoding && pc == 0)
4881 {
4882 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4883 mismatch_detail->error = _("previous `movprfx' sequence not closed");
4884 mismatch_detail->index = -1;
4885 mismatch_detail->non_fatal = TRUE;
4886 res = ERR_VFI;
4887 /* Reset the sequence. */
4888 init_insn_sequence (NULL, insn_sequence);
4889 return res;
4890 }
4891
4892 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
4893 if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4894 {
4895 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4896 instruction for better error messages. */
4897 if (!opcode->avariant
4898 || !(*opcode->avariant &
4899 (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4900 {
4901 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4902 mismatch_detail->error = _("SVE instruction expected after "
4903 "`movprfx'");
4904 mismatch_detail->index = -1;
4905 mismatch_detail->non_fatal = TRUE;
4906 res = ERR_VFI;
4907 goto done;
4908 }
4909
4910 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4911 instruction that is allowed to be used with a MOVPRFX. */
4912 if (!(opcode->constraints & C_SCAN_MOVPRFX))
4913 {
4914 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4915 mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4916 "expected");
4917 mismatch_detail->index = -1;
4918 mismatch_detail->non_fatal = TRUE;
4919 res = ERR_VFI;
4920 goto done;
4921 }
4922
4923 /* Next check for usage of the predicate register. */
4924 aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4925 aarch64_opnd_info blk_pred, inst_pred;
4926 memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4927 memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4928 bfd_boolean predicated = FALSE;
4929 assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4930
4931 /* Determine if the movprfx instruction used is predicated or not. */
4932 if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4933 {
4934 predicated = TRUE;
4935 blk_pred = insn_sequence->instr->operands[1];
4936 }
4937
4938 unsigned char max_elem_size = 0;
4939 unsigned char current_elem_size;
4940 int num_op_used = 0, last_op_usage = 0;
4941 int i, inst_pred_idx = -1;
4942 int num_ops = aarch64_num_of_operands (opcode);
4943 for (i = 0; i < num_ops; i++)
4944 {
4945 aarch64_opnd_info inst_op = inst->operands[i];
4946 switch (inst_op.type)
4947 {
4948 case AARCH64_OPND_SVE_Zd:
4949 case AARCH64_OPND_SVE_Zm_5:
4950 case AARCH64_OPND_SVE_Zm_16:
4951 case AARCH64_OPND_SVE_Zn:
4952 case AARCH64_OPND_SVE_Zt:
4953 case AARCH64_OPND_SVE_Vm:
4954 case AARCH64_OPND_SVE_Vn:
4955 case AARCH64_OPND_Va:
4956 case AARCH64_OPND_Vn:
4957 case AARCH64_OPND_Vm:
4958 case AARCH64_OPND_Sn:
4959 case AARCH64_OPND_Sm:
4960 case AARCH64_OPND_Rn:
4961 case AARCH64_OPND_Rm:
4962 case AARCH64_OPND_Rn_SP:
4963 case AARCH64_OPND_Rt_SP:
4964 case AARCH64_OPND_Rm_SP:
4965 if (inst_op.reg.regno == blk_dest.reg.regno)
4966 {
4967 num_op_used++;
4968 last_op_usage = i;
4969 }
4970 current_elem_size
4971 = aarch64_get_qualifier_esize (inst_op.qualifier);
4972 if (current_elem_size > max_elem_size)
4973 max_elem_size = current_elem_size;
4974 break;
4975 case AARCH64_OPND_SVE_Pd:
4976 case AARCH64_OPND_SVE_Pg3:
4977 case AARCH64_OPND_SVE_Pg4_5:
4978 case AARCH64_OPND_SVE_Pg4_10:
4979 case AARCH64_OPND_SVE_Pg4_16:
4980 case AARCH64_OPND_SVE_Pm:
4981 case AARCH64_OPND_SVE_Pn:
4982 case AARCH64_OPND_SVE_Pt:
4983 inst_pred = inst_op;
4984 inst_pred_idx = i;
4985 break;
4986 default:
4987 break;
4988 }
4989 }
4990
4991 assert (max_elem_size != 0);
4992 aarch64_opnd_info inst_dest = inst->operands[0];
4993 /* Determine the size that should be used to compare against the
4994 movprfx size. */
4995 current_elem_size
4996 = opcode->constraints & C_MAX_ELEM
4997 ? max_elem_size
4998 : aarch64_get_qualifier_esize (inst_dest.qualifier);
4999
5000 /* If movprfx is predicated do some extra checks. */
5001 if (predicated)
5002 {
5003 /* The instruction must be predicated. */
5004 if (inst_pred_idx < 0)
5005 {
5006 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5007 mismatch_detail->error = _("predicated instruction expected "
5008 "after `movprfx'");
5009 mismatch_detail->index = -1;
5010 mismatch_detail->non_fatal = TRUE;
5011 res = ERR_VFI;
5012 goto done;
5013 }
5014
5015 /* The instruction must have a merging predicate. */
5016 if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5017 {
5018 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5019 mismatch_detail->error = _("merging predicate expected due "
5020 "to preceding `movprfx'");
5021 mismatch_detail->index = inst_pred_idx;
5022 mismatch_detail->non_fatal = TRUE;
5023 res = ERR_VFI;
5024 goto done;
5025 }
5026
5027 /* The same register must be used in instruction. */
5028 if (blk_pred.reg.regno != inst_pred.reg.regno)
5029 {
5030 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5031 mismatch_detail->error = _("predicate register differs "
5032 "from that in preceding "
5033 "`movprfx'");
5034 mismatch_detail->index = inst_pred_idx;
5035 mismatch_detail->non_fatal = TRUE;
5036 res = ERR_VFI;
5037 goto done;
5038 }
5039 }
5040
5041 /* Destructive operations by definition must allow one usage of the
5042 same register. */
5043 int allowed_usage
5044 = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5045
5046 /* Operand is not used at all. */
5047 if (num_op_used == 0)
5048 {
5049 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5050 mismatch_detail->error = _("output register of preceding "
5051 "`movprfx' not used in current "
5052 "instruction");
5053 mismatch_detail->index = 0;
5054 mismatch_detail->non_fatal = TRUE;
5055 res = ERR_VFI;
5056 goto done;
5057 }
5058
5059 /* We now know it's used, now determine exactly where it's used. */
5060 if (blk_dest.reg.regno != inst_dest.reg.regno)
5061 {
5062 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5063 mismatch_detail->error = _("output register of preceding "
5064 "`movprfx' expected as output");
5065 mismatch_detail->index = 0;
5066 mismatch_detail->non_fatal = TRUE;
5067 res = ERR_VFI;
5068 goto done;
5069 }
5070
5071 /* Operand used more than allowed for the specific opcode type. */
5072 if (num_op_used > allowed_usage)
5073 {
5074 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5075 mismatch_detail->error = _("output register of preceding "
5076 "`movprfx' used as input");
5077 mismatch_detail->index = last_op_usage;
5078 mismatch_detail->non_fatal = TRUE;
5079 res = ERR_VFI;
5080 goto done;
5081 }
5082
5083 /* Now the only thing left is the qualifiers checks. The register
5084 must have the same maximum element size. */
5085 if (inst_dest.qualifier
5086 && blk_dest.qualifier
5087 && current_elem_size
5088 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5089 {
5090 mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5091 mismatch_detail->error = _("register size not compatible with "
5092 "previous `movprfx'");
5093 mismatch_detail->index = 0;
5094 mismatch_detail->non_fatal = TRUE;
5095 res = ERR_VFI;
5096 goto done;
5097 }
5098 }
5099
5100 done:
5101 /* Add the new instruction to the sequence. */
5102 memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5103 inst, sizeof (aarch64_inst));
5104
5105 /* Check if sequence is now full. */
5106 if (insn_sequence->next_insn >= insn_sequence->num_insns)
5107 {
5108 /* Sequence is full, but we don't have anything special to do for now,
5109 so clear and reset it. */
5110 init_insn_sequence (NULL, insn_sequence);
5111 }
5112 }
5113
5114 return res;
5115 }
5116
5117
5118 /* Return true if VALUE cannot be moved into an SVE register using DUP
5119 (with any element size, not just ESIZE) and if using DUPM would
5120 therefore be OK. ESIZE is the number of bytes in the immediate. */
5121
5122 bfd_boolean
5123 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5124 {
5125 int64_t svalue = uvalue;
5126 uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5127
5128 if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5129 return FALSE;
5130 if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5131 {
5132 svalue = (int32_t) uvalue;
5133 if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5134 {
5135 svalue = (int16_t) uvalue;
5136 if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5137 return FALSE;
5138 }
5139 }
5140 if ((svalue & 0xff) == 0)
5141 svalue /= 256;
5142 return svalue < -128 || svalue >= 128;
5143 }
5144
5145 /* Include the opcode description table as well as the operand description
5146 table. */
5147 #define VERIFIER(x) verify_##x
5148 #include "aarch64-tbl.h"
This page took 0.18288 seconds and 4 git commands to generate.