Updated Spanish translation for the gas sub-directory.
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else if (inst->opcode->iclass == dotproduct)
329 {
330 /* Need information in other operand(s) to help decoding. */
331 info->qualifier = get_expected_qualifier (inst, info->idx);
332 switch (info->qualifier)
333 {
334 case AARCH64_OPND_QLF_S_4B:
335 /* L:H */
336 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
337 info->reglane.regno &= 0x1f;
338 break;
339 default:
340 return 0;
341 }
342 }
343 else if (inst->opcode->iclass == cryptosm3)
344 {
345 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
346 info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
347 }
348 else
349 {
350 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
351 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
352
353 /* Need information in other operand(s) to help decoding. */
354 info->qualifier = get_expected_qualifier (inst, info->idx);
355 switch (info->qualifier)
356 {
357 case AARCH64_OPND_QLF_S_H:
358 /* h:l:m */
359 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
360 FLD_M);
361 info->reglane.regno &= 0xf;
362 break;
363 case AARCH64_OPND_QLF_S_S:
364 /* h:l */
365 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
366 break;
367 case AARCH64_OPND_QLF_S_D:
368 /* H */
369 info->reglane.index = extract_field (FLD_H, code, 0);
370 break;
371 default:
372 return 0;
373 }
374
375 if (inst->opcode->op == OP_FCMLA_ELEM)
376 {
377 /* Complex operand takes two elements. */
378 if (info->reglane.index & 1)
379 return 0;
380 info->reglane.index /= 2;
381 }
382 }
383
384 return 1;
385 }
386
387 int
388 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
389 const aarch64_insn code,
390 const aarch64_inst *inst ATTRIBUTE_UNUSED)
391 {
392 /* R */
393 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
394 /* len */
395 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
396 return 1;
397 }
398
399 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
400 int
401 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
402 aarch64_opnd_info *info, const aarch64_insn code,
403 const aarch64_inst *inst)
404 {
405 aarch64_insn value;
406 /* Number of elements in each structure to be loaded/stored. */
407 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
408
409 struct
410 {
411 unsigned is_reserved;
412 unsigned num_regs;
413 unsigned num_elements;
414 } data [] =
415 { {0, 4, 4},
416 {1, 4, 4},
417 {0, 4, 1},
418 {0, 4, 2},
419 {0, 3, 3},
420 {1, 3, 3},
421 {0, 3, 1},
422 {0, 1, 1},
423 {0, 2, 2},
424 {1, 2, 2},
425 {0, 2, 1},
426 };
427
428 /* Rt */
429 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
430 /* opcode */
431 value = extract_field (FLD_opcode, code, 0);
432 /* PR 21595: Check for a bogus value. */
433 if (value >= ARRAY_SIZE (data))
434 return 0;
435 if (expected_num != data[value].num_elements || data[value].is_reserved)
436 return 0;
437 info->reglist.num_regs = data[value].num_regs;
438
439 return 1;
440 }
441
442 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
443 lanes instructions. */
444 int
445 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
446 aarch64_opnd_info *info, const aarch64_insn code,
447 const aarch64_inst *inst)
448 {
449 aarch64_insn value;
450
451 /* Rt */
452 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
453 /* S */
454 value = extract_field (FLD_S, code, 0);
455
456 /* Number of registers is equal to the number of elements in
457 each structure to be loaded/stored. */
458 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
459 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
460
461 /* Except when it is LD1R. */
462 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
463 info->reglist.num_regs = 2;
464
465 return 1;
466 }
467
468 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
469 load/store single element instructions. */
470 int
471 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
472 aarch64_opnd_info *info, const aarch64_insn code,
473 const aarch64_inst *inst ATTRIBUTE_UNUSED)
474 {
475 aarch64_field field = {0, 0};
476 aarch64_insn QSsize; /* fields Q:S:size. */
477 aarch64_insn opcodeh2; /* opcode<2:1> */
478
479 /* Rt */
480 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
481
482 /* Decode the index, opcode<2:1> and size. */
483 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
484 opcodeh2 = extract_field_2 (&field, code, 0);
485 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
486 switch (opcodeh2)
487 {
488 case 0x0:
489 info->qualifier = AARCH64_OPND_QLF_S_B;
490 /* Index encoded in "Q:S:size". */
491 info->reglist.index = QSsize;
492 break;
493 case 0x1:
494 if (QSsize & 0x1)
495 /* UND. */
496 return 0;
497 info->qualifier = AARCH64_OPND_QLF_S_H;
498 /* Index encoded in "Q:S:size<1>". */
499 info->reglist.index = QSsize >> 1;
500 break;
501 case 0x2:
502 if ((QSsize >> 1) & 0x1)
503 /* UND. */
504 return 0;
505 if ((QSsize & 0x1) == 0)
506 {
507 info->qualifier = AARCH64_OPND_QLF_S_S;
508 /* Index encoded in "Q:S". */
509 info->reglist.index = QSsize >> 2;
510 }
511 else
512 {
513 if (extract_field (FLD_S, code, 0))
514 /* UND */
515 return 0;
516 info->qualifier = AARCH64_OPND_QLF_S_D;
517 /* Index encoded in "Q". */
518 info->reglist.index = QSsize >> 3;
519 }
520 break;
521 default:
522 return 0;
523 }
524
525 info->reglist.has_index = 1;
526 info->reglist.num_regs = 0;
527 /* Number of registers is equal to the number of elements in
528 each structure to be loaded/stored. */
529 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
530 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
531
532 return 1;
533 }
534
535 /* Decode fields immh:immb and/or Q for e.g.
536 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
537 or SSHR <V><d>, <V><n>, #<shift>. */
538
539 int
540 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
541 aarch64_opnd_info *info, const aarch64_insn code,
542 const aarch64_inst *inst)
543 {
544 int pos;
545 aarch64_insn Q, imm, immh;
546 enum aarch64_insn_class iclass = inst->opcode->iclass;
547
548 immh = extract_field (FLD_immh, code, 0);
549 if (immh == 0)
550 return 0;
551 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
552 pos = 4;
553 /* Get highest set bit in immh. */
554 while (--pos >= 0 && (immh & 0x8) == 0)
555 immh <<= 1;
556
557 assert ((iclass == asimdshf || iclass == asisdshf)
558 && (info->type == AARCH64_OPND_IMM_VLSR
559 || info->type == AARCH64_OPND_IMM_VLSL));
560
561 if (iclass == asimdshf)
562 {
563 Q = extract_field (FLD_Q, code, 0);
564 /* immh Q <T>
565 0000 x SEE AdvSIMD modified immediate
566 0001 0 8B
567 0001 1 16B
568 001x 0 4H
569 001x 1 8H
570 01xx 0 2S
571 01xx 1 4S
572 1xxx 0 RESERVED
573 1xxx 1 2D */
574 info->qualifier =
575 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
576 }
577 else
578 info->qualifier = get_sreg_qualifier_from_value (pos);
579
580 if (info->type == AARCH64_OPND_IMM_VLSR)
581 /* immh <shift>
582 0000 SEE AdvSIMD modified immediate
583 0001 (16-UInt(immh:immb))
584 001x (32-UInt(immh:immb))
585 01xx (64-UInt(immh:immb))
586 1xxx (128-UInt(immh:immb)) */
587 info->imm.value = (16 << pos) - imm;
588 else
589 /* immh:immb
590 immh <shift>
591 0000 SEE AdvSIMD modified immediate
592 0001 (UInt(immh:immb)-8)
593 001x (UInt(immh:immb)-16)
594 01xx (UInt(immh:immb)-32)
595 1xxx (UInt(immh:immb)-64) */
596 info->imm.value = imm - (8 << pos);
597
598 return 1;
599 }
600
601 /* Decode shift immediate for e.g. sshr (imm). */
602 int
603 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
604 aarch64_opnd_info *info, const aarch64_insn code,
605 const aarch64_inst *inst ATTRIBUTE_UNUSED)
606 {
607 int64_t imm;
608 aarch64_insn val;
609 val = extract_field (FLD_size, code, 0);
610 switch (val)
611 {
612 case 0: imm = 8; break;
613 case 1: imm = 16; break;
614 case 2: imm = 32; break;
615 default: return 0;
616 }
617 info->imm.value = imm;
618 return 1;
619 }
620
621 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
622 value in the field(s) will be extracted as unsigned immediate value. */
623 int
624 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
625 const aarch64_insn code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627 {
628 int64_t imm;
629
630 imm = extract_all_fields (self, code);
631
632 if (operand_need_sign_extension (self))
633 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
634
635 if (operand_need_shift_by_two (self))
636 imm <<= 2;
637
638 if (info->type == AARCH64_OPND_ADDR_ADRP)
639 imm <<= 12;
640
641 info->imm.value = imm;
642 return 1;
643 }
644
645 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
646 int
647 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
648 const aarch64_insn code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
650 {
651 aarch64_ext_imm (self, info, code, inst);
652 info->shifter.kind = AARCH64_MOD_LSL;
653 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
654 return 1;
655 }
656
657 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
658 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
659 int
660 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
661 aarch64_opnd_info *info,
662 const aarch64_insn code,
663 const aarch64_inst *inst ATTRIBUTE_UNUSED)
664 {
665 uint64_t imm;
666 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
667 aarch64_field field = {0, 0};
668
669 assert (info->idx == 1);
670
671 if (info->type == AARCH64_OPND_SIMD_FPIMM)
672 info->imm.is_fp = 1;
673
674 /* a:b:c:d:e:f:g:h */
675 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
676 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
677 {
678 /* Either MOVI <Dd>, #<imm>
679 or MOVI <Vd>.2D, #<imm>.
680 <imm> is a 64-bit immediate
681 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
682 encoded in "a:b:c:d:e:f:g:h". */
683 int i;
684 unsigned abcdefgh = imm;
685 for (imm = 0ull, i = 0; i < 8; i++)
686 if (((abcdefgh >> i) & 0x1) != 0)
687 imm |= 0xffull << (8 * i);
688 }
689 info->imm.value = imm;
690
691 /* cmode */
692 info->qualifier = get_expected_qualifier (inst, info->idx);
693 switch (info->qualifier)
694 {
695 case AARCH64_OPND_QLF_NIL:
696 /* no shift */
697 info->shifter.kind = AARCH64_MOD_NONE;
698 return 1;
699 case AARCH64_OPND_QLF_LSL:
700 /* shift zeros */
701 info->shifter.kind = AARCH64_MOD_LSL;
702 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
703 {
704 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
705 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
706 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
707 default: assert (0); return 0;
708 }
709 /* 00: 0; 01: 8; 10:16; 11:24. */
710 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
711 break;
712 case AARCH64_OPND_QLF_MSL:
713 /* shift ones */
714 info->shifter.kind = AARCH64_MOD_MSL;
715 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
716 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
717 break;
718 default:
719 assert (0);
720 return 0;
721 }
722
723 return 1;
724 }
725
726 /* Decode an 8-bit floating-point immediate. */
727 int
728 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
729 const aarch64_insn code,
730 const aarch64_inst *inst ATTRIBUTE_UNUSED)
731 {
732 info->imm.value = extract_all_fields (self, code);
733 info->imm.is_fp = 1;
734 return 1;
735 }
736
737 /* Decode a 1-bit rotate immediate (#90 or #270). */
738 int
739 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
740 const aarch64_insn code,
741 const aarch64_inst *inst ATTRIBUTE_UNUSED)
742 {
743 uint64_t rot = extract_field (self->fields[0], code, 0);
744 assert (rot < 2U);
745 info->imm.value = rot * 180 + 90;
746 return 1;
747 }
748
749 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270). */
750 int
751 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
752 const aarch64_insn code,
753 const aarch64_inst *inst ATTRIBUTE_UNUSED)
754 {
755 uint64_t rot = extract_field (self->fields[0], code, 0);
756 assert (rot < 4U);
757 info->imm.value = rot * 90;
758 return 1;
759 }
760
761 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
762 int
763 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
764 aarch64_opnd_info *info, const aarch64_insn code,
765 const aarch64_inst *inst ATTRIBUTE_UNUSED)
766 {
767 info->imm.value = 64- extract_field (FLD_scale, code, 0);
768 return 1;
769 }
770
771 /* Decode arithmetic immediate for e.g.
772 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
773 int
774 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
775 aarch64_opnd_info *info, const aarch64_insn code,
776 const aarch64_inst *inst ATTRIBUTE_UNUSED)
777 {
778 aarch64_insn value;
779
780 info->shifter.kind = AARCH64_MOD_LSL;
781 /* shift */
782 value = extract_field (FLD_shift, code, 0);
783 if (value >= 2)
784 return 0;
785 info->shifter.amount = value ? 12 : 0;
786 /* imm12 (unsigned) */
787 info->imm.value = extract_field (FLD_imm12, code, 0);
788
789 return 1;
790 }
791
792 /* Return true if VALUE is a valid logical immediate encoding, storing the
793 decoded value in *RESULT if so. ESIZE is the number of bytes in the
794 decoded immediate. */
795 static int
796 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
797 {
798 uint64_t imm, mask;
799 uint32_t N, R, S;
800 unsigned simd_size;
801
802 /* value is N:immr:imms. */
803 S = value & 0x3f;
804 R = (value >> 6) & 0x3f;
805 N = (value >> 12) & 0x1;
806
807 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
808 (in other words, right rotated by R), then replicated. */
809 if (N != 0)
810 {
811 simd_size = 64;
812 mask = 0xffffffffffffffffull;
813 }
814 else
815 {
816 switch (S)
817 {
818 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
819 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
820 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
821 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
822 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
823 default: return 0;
824 }
825 mask = (1ull << simd_size) - 1;
826 /* Top bits are IGNORED. */
827 R &= simd_size - 1;
828 }
829
830 if (simd_size > esize * 8)
831 return 0;
832
833 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
834 if (S == simd_size - 1)
835 return 0;
836 /* S+1 consecutive bits to 1. */
837 /* NOTE: S can't be 63 due to detection above. */
838 imm = (1ull << (S + 1)) - 1;
839 /* Rotate to the left by simd_size - R. */
840 if (R != 0)
841 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
842 /* Replicate the value according to SIMD size. */
843 switch (simd_size)
844 {
845 case 2: imm = (imm << 2) | imm;
846 /* Fall through. */
847 case 4: imm = (imm << 4) | imm;
848 /* Fall through. */
849 case 8: imm = (imm << 8) | imm;
850 /* Fall through. */
851 case 16: imm = (imm << 16) | imm;
852 /* Fall through. */
853 case 32: imm = (imm << 32) | imm;
854 /* Fall through. */
855 case 64: break;
856 default: assert (0); return 0;
857 }
858
859 *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
860
861 return 1;
862 }
863
864 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
865 int
866 aarch64_ext_limm (const aarch64_operand *self,
867 aarch64_opnd_info *info, const aarch64_insn code,
868 const aarch64_inst *inst)
869 {
870 uint32_t esize;
871 aarch64_insn value;
872
873 value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
874 self->fields[2]);
875 esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
876 return decode_limm (esize, value, &info->imm.value);
877 }
878
879 /* Decode a logical immediate for the BIC alias of AND (etc.). */
880 int
881 aarch64_ext_inv_limm (const aarch64_operand *self,
882 aarch64_opnd_info *info, const aarch64_insn code,
883 const aarch64_inst *inst)
884 {
885 if (!aarch64_ext_limm (self, info, code, inst))
886 return 0;
887 info->imm.value = ~info->imm.value;
888 return 1;
889 }
890
891 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
892 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
893 int
894 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
895 aarch64_opnd_info *info,
896 const aarch64_insn code, const aarch64_inst *inst)
897 {
898 aarch64_insn value;
899
900 /* Rt */
901 info->reg.regno = extract_field (FLD_Rt, code, 0);
902
903 /* size */
904 value = extract_field (FLD_ldst_size, code, 0);
905 if (inst->opcode->iclass == ldstpair_indexed
906 || inst->opcode->iclass == ldstnapair_offs
907 || inst->opcode->iclass == ldstpair_off
908 || inst->opcode->iclass == loadlit)
909 {
910 enum aarch64_opnd_qualifier qualifier;
911 switch (value)
912 {
913 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
914 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
915 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
916 default: return 0;
917 }
918 info->qualifier = qualifier;
919 }
920 else
921 {
922 /* opc1:size */
923 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
924 if (value > 0x4)
925 return 0;
926 info->qualifier = get_sreg_qualifier_from_value (value);
927 }
928
929 return 1;
930 }
931
932 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
933 int
934 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 aarch64_opnd_info *info,
936 aarch64_insn code,
937 const aarch64_inst *inst ATTRIBUTE_UNUSED)
938 {
939 /* Rn */
940 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
941 return 1;
942 }
943
944 /* Decode the address operand for e.g.
945 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
946 int
947 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
948 aarch64_opnd_info *info,
949 aarch64_insn code, const aarch64_inst *inst)
950 {
951 info->qualifier = get_expected_qualifier (inst, info->idx);
952
953 /* Rn */
954 info->addr.base_regno = extract_field (self->fields[0], code, 0);
955
956 /* simm9 */
957 aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
958 info->addr.offset.imm = sign_extend (imm, 8);
959 if (extract_field (self->fields[2], code, 0) == 1) {
960 info->addr.writeback = 1;
961 info->addr.preind = 1;
962 }
963 return 1;
964 }
965
966 /* Decode the address operand for e.g.
967 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
968 int
969 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
970 aarch64_opnd_info *info,
971 aarch64_insn code, const aarch64_inst *inst)
972 {
973 aarch64_insn S, value;
974
975 /* Rn */
976 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
977 /* Rm */
978 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
979 /* option */
980 value = extract_field (FLD_option, code, 0);
981 info->shifter.kind =
982 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
983 /* Fix-up the shifter kind; although the table-driven approach is
984 efficient, it is slightly inflexible, thus needing this fix-up. */
985 if (info->shifter.kind == AARCH64_MOD_UXTX)
986 info->shifter.kind = AARCH64_MOD_LSL;
987 /* S */
988 S = extract_field (FLD_S, code, 0);
989 if (S == 0)
990 {
991 info->shifter.amount = 0;
992 info->shifter.amount_present = 0;
993 }
994 else
995 {
996 int size;
997 /* Need information in other operand(s) to help achieve the decoding
998 from 'S' field. */
999 info->qualifier = get_expected_qualifier (inst, info->idx);
1000 /* Get the size of the data element that is accessed, which may be
1001 different from that of the source register size, e.g. in strb/ldrb. */
1002 size = aarch64_get_qualifier_esize (info->qualifier);
1003 info->shifter.amount = get_logsz (size);
1004 info->shifter.amount_present = 1;
1005 }
1006
1007 return 1;
1008 }
1009
1010 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
1011 int
1012 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1013 aarch64_insn code, const aarch64_inst *inst)
1014 {
1015 aarch64_insn imm;
1016 info->qualifier = get_expected_qualifier (inst, info->idx);
1017
1018 /* Rn */
1019 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1020 /* simm (imm9 or imm7) */
1021 imm = extract_field (self->fields[0], code, 0);
1022 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1023 if (self->fields[0] == FLD_imm7)
1024 /* scaled immediate in ld/st pair instructions. */
1025 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1026 /* qualifier */
1027 if (inst->opcode->iclass == ldst_unscaled
1028 || inst->opcode->iclass == ldstnapair_offs
1029 || inst->opcode->iclass == ldstpair_off
1030 || inst->opcode->iclass == ldst_unpriv)
1031 info->addr.writeback = 0;
1032 else
1033 {
1034 /* pre/post- index */
1035 info->addr.writeback = 1;
1036 if (extract_field (self->fields[1], code, 0) == 1)
1037 info->addr.preind = 1;
1038 else
1039 info->addr.postind = 1;
1040 }
1041
1042 return 1;
1043 }
1044
1045 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
1046 int
1047 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1048 aarch64_insn code,
1049 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1050 {
1051 int shift;
1052 info->qualifier = get_expected_qualifier (inst, info->idx);
1053 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1054 /* Rn */
1055 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1056 /* uimm12 */
1057 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1058 return 1;
1059 }
1060
1061 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
1062 int
1063 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1064 aarch64_insn code,
1065 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1066 {
1067 aarch64_insn imm;
1068
1069 info->qualifier = get_expected_qualifier (inst, info->idx);
1070 /* Rn */
1071 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1072 /* simm10 */
1073 imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1074 info->addr.offset.imm = sign_extend (imm, 9) << 3;
1075 if (extract_field (self->fields[3], code, 0) == 1) {
1076 info->addr.writeback = 1;
1077 info->addr.preind = 1;
1078 }
1079 return 1;
1080 }
1081
1082 /* Decode the address operand for e.g.
1083 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
1084 int
1085 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1086 aarch64_opnd_info *info,
1087 aarch64_insn code, const aarch64_inst *inst)
1088 {
1089 /* The opcode dependent area stores the number of elements in
1090 each structure to be loaded/stored. */
1091 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1092
1093 /* Rn */
1094 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1095 /* Rm | #<amount> */
1096 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1097 if (info->addr.offset.regno == 31)
1098 {
1099 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1100 /* Special handling of loading single structure to all lane. */
1101 info->addr.offset.imm = (is_ld1r ? 1
1102 : inst->operands[0].reglist.num_regs)
1103 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1104 else
1105 info->addr.offset.imm = inst->operands[0].reglist.num_regs
1106 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1107 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1108 }
1109 else
1110 info->addr.offset.is_reg = 1;
1111 info->addr.writeback = 1;
1112
1113 return 1;
1114 }
1115
1116 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
1117 int
1118 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1119 aarch64_opnd_info *info,
1120 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1121 {
1122 aarch64_insn value;
1123 /* cond */
1124 value = extract_field (FLD_cond, code, 0);
1125 info->cond = get_cond_from_value (value);
1126 return 1;
1127 }
1128
1129 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1130 int
1131 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1132 aarch64_opnd_info *info,
1133 aarch64_insn code,
1134 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1135 {
1136 /* op0:op1:CRn:CRm:op2 */
1137 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1138 FLD_CRm, FLD_op2);
1139 return 1;
1140 }
1141
1142 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1143 int
1144 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1145 aarch64_opnd_info *info, aarch64_insn code,
1146 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1147 {
1148 int i;
1149 /* op1:op2 */
1150 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1151 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1152 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1153 return 1;
1154 /* Reserved value in <pstatefield>. */
1155 return 0;
1156 }
1157
1158 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1159 int
1160 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1161 aarch64_opnd_info *info,
1162 aarch64_insn code,
1163 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1164 {
1165 int i;
1166 aarch64_insn value;
1167 const aarch64_sys_ins_reg *sysins_ops;
1168 /* op0:op1:CRn:CRm:op2 */
1169 value = extract_fields (code, 0, 5,
1170 FLD_op0, FLD_op1, FLD_CRn,
1171 FLD_CRm, FLD_op2);
1172
1173 switch (info->type)
1174 {
1175 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1176 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1177 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1178 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1179 default: assert (0); return 0;
1180 }
1181
1182 for (i = 0; sysins_ops[i].name != NULL; ++i)
1183 if (sysins_ops[i].value == value)
1184 {
1185 info->sysins_op = sysins_ops + i;
1186 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1187 info->sysins_op->name,
1188 (unsigned)info->sysins_op->value,
1189 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1190 return 1;
1191 }
1192
1193 return 0;
1194 }
1195
1196 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1197
1198 int
1199 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1200 aarch64_opnd_info *info,
1201 aarch64_insn code,
1202 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1203 {
1204 /* CRm */
1205 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1206 return 1;
1207 }
1208
1209 /* Decode the prefetch operation option operand for e.g.
1210 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1211
1212 int
1213 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1214 aarch64_opnd_info *info,
1215 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1216 {
1217 /* prfop in Rt */
1218 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1219 return 1;
1220 }
1221
1222 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1223 to the matching name/value pair in aarch64_hint_options. */
1224
1225 int
1226 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1227 aarch64_opnd_info *info,
1228 aarch64_insn code,
1229 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1230 {
1231 /* CRm:op2. */
1232 unsigned hint_number;
1233 int i;
1234
1235 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1236
1237 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1238 {
1239 if (hint_number == aarch64_hint_options[i].value)
1240 {
1241 info->hint_option = &(aarch64_hint_options[i]);
1242 return 1;
1243 }
1244 }
1245
1246 return 0;
1247 }
1248
1249 /* Decode the extended register operand for e.g.
1250 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1251 int
1252 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1253 aarch64_opnd_info *info,
1254 aarch64_insn code,
1255 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1256 {
1257 aarch64_insn value;
1258
1259 /* Rm */
1260 info->reg.regno = extract_field (FLD_Rm, code, 0);
1261 /* option */
1262 value = extract_field (FLD_option, code, 0);
1263 info->shifter.kind =
1264 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1265 /* imm3 */
1266 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1267
1268 /* This makes the constraint checking happy. */
1269 info->shifter.operator_present = 1;
1270
1271 /* Assume inst->operands[0].qualifier has been resolved. */
1272 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1273 info->qualifier = AARCH64_OPND_QLF_W;
1274 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1275 && (info->shifter.kind == AARCH64_MOD_UXTX
1276 || info->shifter.kind == AARCH64_MOD_SXTX))
1277 info->qualifier = AARCH64_OPND_QLF_X;
1278
1279 return 1;
1280 }
1281
1282 /* Decode the shifted register operand for e.g.
1283 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1284 int
1285 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1286 aarch64_opnd_info *info,
1287 aarch64_insn code,
1288 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1289 {
1290 aarch64_insn value;
1291
1292 /* Rm */
1293 info->reg.regno = extract_field (FLD_Rm, code, 0);
1294 /* shift */
1295 value = extract_field (FLD_shift, code, 0);
1296 info->shifter.kind =
1297 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1298 if (info->shifter.kind == AARCH64_MOD_ROR
1299 && inst->opcode->iclass != log_shift)
1300 /* ROR is not available for the shifted register operand in arithmetic
1301 instructions. */
1302 return 0;
1303 /* imm6 */
1304 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1305
1306 /* This makes the constraint checking happy. */
1307 info->shifter.operator_present = 1;
1308
1309 return 1;
1310 }
1311
1312 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1313 where <offset> is given by the OFFSET parameter and where <factor> is
1314 1 plus SELF's operand-dependent value. fields[0] specifies the field
1315 that holds <base>. */
1316 static int
1317 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1318 aarch64_opnd_info *info, aarch64_insn code,
1319 int64_t offset)
1320 {
1321 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1322 info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1323 info->addr.offset.is_reg = FALSE;
1324 info->addr.writeback = FALSE;
1325 info->addr.preind = TRUE;
1326 if (offset != 0)
1327 info->shifter.kind = AARCH64_MOD_MUL_VL;
1328 info->shifter.amount = 1;
1329 info->shifter.operator_present = (info->addr.offset.imm != 0);
1330 info->shifter.amount_present = FALSE;
1331 return 1;
1332 }
1333
1334 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1335 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1336 SELF's operand-dependent value. fields[0] specifies the field that
1337 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1338 int
1339 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1340 aarch64_opnd_info *info, aarch64_insn code,
1341 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1342 {
1343 int offset;
1344
1345 offset = extract_field (FLD_SVE_imm4, code, 0);
1346 offset = ((offset + 8) & 15) - 8;
1347 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1348 }
1349
1350 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1351 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1352 SELF's operand-dependent value. fields[0] specifies the field that
1353 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1354 int
1355 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1356 aarch64_opnd_info *info, aarch64_insn code,
1357 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1358 {
1359 int offset;
1360
1361 offset = extract_field (FLD_SVE_imm6, code, 0);
1362 offset = (((offset + 32) & 63) - 32);
1363 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1364 }
1365
1366 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1367 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1368 SELF's operand-dependent value. fields[0] specifies the field that
1369 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1370 and imm3 fields, with imm3 being the less-significant part. */
1371 int
1372 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1373 aarch64_opnd_info *info,
1374 aarch64_insn code,
1375 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1376 {
1377 int offset;
1378
1379 offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1380 offset = (((offset + 256) & 511) - 256);
1381 return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1382 }
1383
1384 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1385 is given by the OFFSET parameter and where <shift> is SELF's operand-
1386 dependent value. fields[0] specifies the base register field <base>. */
1387 static int
1388 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1389 aarch64_opnd_info *info, aarch64_insn code,
1390 int64_t offset)
1391 {
1392 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1393 info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1394 info->addr.offset.is_reg = FALSE;
1395 info->addr.writeback = FALSE;
1396 info->addr.preind = TRUE;
1397 info->shifter.operator_present = FALSE;
1398 info->shifter.amount_present = FALSE;
1399 return 1;
1400 }
1401
1402 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1403 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1404 value. fields[0] specifies the base register field. */
1405 int
1406 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1407 aarch64_opnd_info *info, aarch64_insn code,
1408 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1409 {
1410 int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1411 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1412 }
1413
1414 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1415 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1416 value. fields[0] specifies the base register field. */
1417 int
1418 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1419 aarch64_opnd_info *info, aarch64_insn code,
1420 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1421 {
1422 int offset = extract_field (FLD_SVE_imm6, code, 0);
1423 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1424 }
1425
1426 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1427 is SELF's operand-dependent value. fields[0] specifies the base
1428 register field and fields[1] specifies the offset register field. */
1429 int
1430 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1431 aarch64_opnd_info *info, aarch64_insn code,
1432 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1433 {
1434 int index_regno;
1435
1436 index_regno = extract_field (self->fields[1], code, 0);
1437 if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1438 return 0;
1439
1440 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1441 info->addr.offset.regno = index_regno;
1442 info->addr.offset.is_reg = TRUE;
1443 info->addr.writeback = FALSE;
1444 info->addr.preind = TRUE;
1445 info->shifter.kind = AARCH64_MOD_LSL;
1446 info->shifter.amount = get_operand_specific_data (self);
1447 info->shifter.operator_present = (info->shifter.amount != 0);
1448 info->shifter.amount_present = (info->shifter.amount != 0);
1449 return 1;
1450 }
1451
1452 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1453 <shift> is SELF's operand-dependent value. fields[0] specifies the
1454 base register field, fields[1] specifies the offset register field and
1455 fields[2] is a single-bit field that selects SXTW over UXTW. */
1456 int
1457 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1458 aarch64_opnd_info *info, aarch64_insn code,
1459 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1460 {
1461 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1462 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1463 info->addr.offset.is_reg = TRUE;
1464 info->addr.writeback = FALSE;
1465 info->addr.preind = TRUE;
1466 if (extract_field (self->fields[2], code, 0))
1467 info->shifter.kind = AARCH64_MOD_SXTW;
1468 else
1469 info->shifter.kind = AARCH64_MOD_UXTW;
1470 info->shifter.amount = get_operand_specific_data (self);
1471 info->shifter.operator_present = TRUE;
1472 info->shifter.amount_present = (info->shifter.amount != 0);
1473 return 1;
1474 }
1475
1476 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1477 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1478 fields[0] specifies the base register field. */
1479 int
1480 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1481 aarch64_opnd_info *info, aarch64_insn code,
1482 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1483 {
1484 int offset = extract_field (FLD_imm5, code, 0);
1485 return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1486 }
1487
1488 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1489 where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1490 number. fields[0] specifies the base register field and fields[1]
1491 specifies the offset register field. */
1492 static int
1493 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1494 aarch64_insn code, enum aarch64_modifier_kind kind)
1495 {
1496 info->addr.base_regno = extract_field (self->fields[0], code, 0);
1497 info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1498 info->addr.offset.is_reg = TRUE;
1499 info->addr.writeback = FALSE;
1500 info->addr.preind = TRUE;
1501 info->shifter.kind = kind;
1502 info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1503 info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1504 || info->shifter.amount != 0);
1505 info->shifter.amount_present = (info->shifter.amount != 0);
1506 return 1;
1507 }
1508
1509 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1510 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1511 field and fields[1] specifies the offset register field. */
1512 int
1513 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1514 aarch64_opnd_info *info, aarch64_insn code,
1515 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1516 {
1517 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1518 }
1519
1520 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1521 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1522 field and fields[1] specifies the offset register field. */
1523 int
1524 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1525 aarch64_opnd_info *info, aarch64_insn code,
1526 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1527 {
1528 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1529 }
1530
1531 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1532 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1533 field and fields[1] specifies the offset register field. */
1534 int
1535 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1536 aarch64_opnd_info *info, aarch64_insn code,
1537 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1538 {
1539 return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1540 }
1541
1542 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1543 has the raw field value and that the low 8 bits decode to VALUE. */
1544 static int
1545 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1546 {
1547 info->shifter.kind = AARCH64_MOD_LSL;
1548 info->shifter.amount = 0;
1549 if (info->imm.value & 0x100)
1550 {
1551 if (value == 0)
1552 /* Decode 0x100 as #0, LSL #8. */
1553 info->shifter.amount = 8;
1554 else
1555 value *= 256;
1556 }
1557 info->shifter.operator_present = (info->shifter.amount != 0);
1558 info->shifter.amount_present = (info->shifter.amount != 0);
1559 info->imm.value = value;
1560 return 1;
1561 }
1562
1563 /* Decode an SVE ADD/SUB immediate. */
1564 int
1565 aarch64_ext_sve_aimm (const aarch64_operand *self,
1566 aarch64_opnd_info *info, const aarch64_insn code,
1567 const aarch64_inst *inst)
1568 {
1569 return (aarch64_ext_imm (self, info, code, inst)
1570 && decode_sve_aimm (info, (uint8_t) info->imm.value));
1571 }
1572
1573 /* Decode an SVE CPY/DUP immediate. */
1574 int
1575 aarch64_ext_sve_asimm (const aarch64_operand *self,
1576 aarch64_opnd_info *info, const aarch64_insn code,
1577 const aarch64_inst *inst)
1578 {
1579 return (aarch64_ext_imm (self, info, code, inst)
1580 && decode_sve_aimm (info, (int8_t) info->imm.value));
1581 }
1582
1583 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1584 The fields array specifies which field to use. */
1585 int
1586 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1587 aarch64_opnd_info *info, aarch64_insn code,
1588 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1589 {
1590 if (extract_field (self->fields[0], code, 0))
1591 info->imm.value = 0x3f800000;
1592 else
1593 info->imm.value = 0x3f000000;
1594 info->imm.is_fp = TRUE;
1595 return 1;
1596 }
1597
1598 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1599 The fields array specifies which field to use. */
1600 int
1601 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1602 aarch64_opnd_info *info, aarch64_insn code,
1603 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1604 {
1605 if (extract_field (self->fields[0], code, 0))
1606 info->imm.value = 0x40000000;
1607 else
1608 info->imm.value = 0x3f000000;
1609 info->imm.is_fp = TRUE;
1610 return 1;
1611 }
1612
1613 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1614 The fields array specifies which field to use. */
1615 int
1616 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1617 aarch64_opnd_info *info, aarch64_insn code,
1618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1619 {
1620 if (extract_field (self->fields[0], code, 0))
1621 info->imm.value = 0x3f800000;
1622 else
1623 info->imm.value = 0x0;
1624 info->imm.is_fp = TRUE;
1625 return 1;
1626 }
1627
1628 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1629 array specifies which field to use for Zn. MM is encoded in the
1630 concatenation of imm5 and SVE_tszh, with imm5 being the less
1631 significant part. */
1632 int
1633 aarch64_ext_sve_index (const aarch64_operand *self,
1634 aarch64_opnd_info *info, aarch64_insn code,
1635 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1636 {
1637 int val;
1638
1639 info->reglane.regno = extract_field (self->fields[0], code, 0);
1640 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1641 if ((val & 31) == 0)
1642 return 0;
1643 while ((val & 1) == 0)
1644 val /= 2;
1645 info->reglane.index = val / 2;
1646 return 1;
1647 }
1648
1649 /* Decode a logical immediate for the MOV alias of SVE DUPM. */
1650 int
1651 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1652 aarch64_opnd_info *info, const aarch64_insn code,
1653 const aarch64_inst *inst)
1654 {
1655 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1656 return (aarch64_ext_limm (self, info, code, inst)
1657 && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1658 }
1659
1660 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1661 and where MM occupies the most-significant part. The operand-dependent
1662 value specifies the number of bits in Zn. */
1663 int
1664 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1665 aarch64_opnd_info *info, aarch64_insn code,
1666 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1667 {
1668 unsigned int reg_bits = get_operand_specific_data (self);
1669 unsigned int val = extract_all_fields (self, code);
1670 info->reglane.regno = val & ((1 << reg_bits) - 1);
1671 info->reglane.index = val >> reg_bits;
1672 return 1;
1673 }
1674
1675 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1676 to use for Zn. The opcode-dependent value specifies the number
1677 of registers in the list. */
1678 int
1679 aarch64_ext_sve_reglist (const aarch64_operand *self,
1680 aarch64_opnd_info *info, aarch64_insn code,
1681 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1682 {
1683 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1684 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1685 return 1;
1686 }
1687
1688 /* Decode <pattern>{, MUL #<amount>}. The fields array specifies which
1689 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1690 field. */
1691 int
1692 aarch64_ext_sve_scale (const aarch64_operand *self,
1693 aarch64_opnd_info *info, aarch64_insn code,
1694 const aarch64_inst *inst)
1695 {
1696 int val;
1697
1698 if (!aarch64_ext_imm (self, info, code, inst))
1699 return 0;
1700 val = extract_field (FLD_SVE_imm4, code, 0);
1701 info->shifter.kind = AARCH64_MOD_MUL;
1702 info->shifter.amount = val + 1;
1703 info->shifter.operator_present = (val != 0);
1704 info->shifter.amount_present = (val != 0);
1705 return 1;
1706 }
1707
1708 /* Return the top set bit in VALUE, which is expected to be relatively
1709 small. */
1710 static uint64_t
1711 get_top_bit (uint64_t value)
1712 {
1713 while ((value & -value) != value)
1714 value -= value & -value;
1715 return value;
1716 }
1717
1718 /* Decode an SVE shift-left immediate. */
1719 int
1720 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1721 aarch64_opnd_info *info, const aarch64_insn code,
1722 const aarch64_inst *inst)
1723 {
1724 if (!aarch64_ext_imm (self, info, code, inst)
1725 || info->imm.value == 0)
1726 return 0;
1727
1728 info->imm.value -= get_top_bit (info->imm.value);
1729 return 1;
1730 }
1731
1732 /* Decode an SVE shift-right immediate. */
1733 int
1734 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1735 aarch64_opnd_info *info, const aarch64_insn code,
1736 const aarch64_inst *inst)
1737 {
1738 if (!aarch64_ext_imm (self, info, code, inst)
1739 || info->imm.value == 0)
1740 return 0;
1741
1742 info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1743 return 1;
1744 }
1745 \f
1746 /* Bitfields that are commonly used to encode certain operands' information
1747 may be partially used as part of the base opcode in some instructions.
1748 For example, the bit 1 of the field 'size' in
1749 FCVTXN <Vb><d>, <Va><n>
1750 is actually part of the base opcode, while only size<0> is available
1751 for encoding the register type. Another example is the AdvSIMD
1752 instruction ORR (register), in which the field 'size' is also used for
1753 the base opcode, leaving only the field 'Q' available to encode the
1754 vector register arrangement specifier '8B' or '16B'.
1755
1756 This function tries to deduce the qualifier from the value of partially
1757 constrained field(s). Given the VALUE of such a field or fields, the
1758 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1759 operand encoding), the function returns the matching qualifier or
1760 AARCH64_OPND_QLF_NIL if nothing matches.
1761
1762 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1763 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1764 may end with AARCH64_OPND_QLF_NIL. */
1765
1766 static enum aarch64_opnd_qualifier
1767 get_qualifier_from_partial_encoding (aarch64_insn value,
1768 const enum aarch64_opnd_qualifier* \
1769 candidates,
1770 aarch64_insn mask)
1771 {
1772 int i;
1773 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1774 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1775 {
1776 aarch64_insn standard_value;
1777 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1778 break;
1779 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1780 if ((standard_value & mask) == (value & mask))
1781 return candidates[i];
1782 }
1783 return AARCH64_OPND_QLF_NIL;
1784 }
1785
1786 /* Given a list of qualifier sequences, return all possible valid qualifiers
1787 for operand IDX in QUALIFIERS.
1788 Assume QUALIFIERS is an array whose length is large enough. */
1789
1790 static void
1791 get_operand_possible_qualifiers (int idx,
1792 const aarch64_opnd_qualifier_seq_t *list,
1793 enum aarch64_opnd_qualifier *qualifiers)
1794 {
1795 int i;
1796 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1797 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1798 break;
1799 }
1800
1801 /* Decode the size Q field for e.g. SHADD.
1802 We tag one operand with the qualifer according to the code;
1803 whether the qualifier is valid for this opcode or not, it is the
1804 duty of the semantic checking. */
1805
1806 static int
1807 decode_sizeq (aarch64_inst *inst)
1808 {
1809 int idx;
1810 enum aarch64_opnd_qualifier qualifier;
1811 aarch64_insn code;
1812 aarch64_insn value, mask;
1813 enum aarch64_field_kind fld_sz;
1814 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1815
1816 if (inst->opcode->iclass == asisdlse
1817 || inst->opcode->iclass == asisdlsep
1818 || inst->opcode->iclass == asisdlso
1819 || inst->opcode->iclass == asisdlsop)
1820 fld_sz = FLD_vldst_size;
1821 else
1822 fld_sz = FLD_size;
1823
1824 code = inst->value;
1825 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1826 /* Obtain the info that which bits of fields Q and size are actually
1827 available for operand encoding. Opcodes like FMAXNM and FMLA have
1828 size[1] unavailable. */
1829 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1830
1831 /* The index of the operand we are going to tag a qualifier and the qualifer
1832 itself are reasoned from the value of the size and Q fields and the
1833 possible valid qualifier lists. */
1834 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1835 DEBUG_TRACE ("key idx: %d", idx);
1836
1837 /* For most related instruciton, size:Q are fully available for operand
1838 encoding. */
1839 if (mask == 0x7)
1840 {
1841 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1842 return 1;
1843 }
1844
1845 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1846 candidates);
1847 #ifdef DEBUG_AARCH64
1848 if (debug_dump)
1849 {
1850 int i;
1851 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1852 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1853 DEBUG_TRACE ("qualifier %d: %s", i,
1854 aarch64_get_qualifier_name(candidates[i]));
1855 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1856 }
1857 #endif /* DEBUG_AARCH64 */
1858
1859 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1860
1861 if (qualifier == AARCH64_OPND_QLF_NIL)
1862 return 0;
1863
1864 inst->operands[idx].qualifier = qualifier;
1865 return 1;
1866 }
1867
1868 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1869 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1870
1871 static int
1872 decode_asimd_fcvt (aarch64_inst *inst)
1873 {
1874 aarch64_field field = {0, 0};
1875 aarch64_insn value;
1876 enum aarch64_opnd_qualifier qualifier;
1877
1878 gen_sub_field (FLD_size, 0, 1, &field);
1879 value = extract_field_2 (&field, inst->value, 0);
1880 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1881 : AARCH64_OPND_QLF_V_2D;
1882 switch (inst->opcode->op)
1883 {
1884 case OP_FCVTN:
1885 case OP_FCVTN2:
1886 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1887 inst->operands[1].qualifier = qualifier;
1888 break;
1889 case OP_FCVTL:
1890 case OP_FCVTL2:
1891 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1892 inst->operands[0].qualifier = qualifier;
1893 break;
1894 default:
1895 assert (0);
1896 return 0;
1897 }
1898
1899 return 1;
1900 }
1901
1902 /* Decode size[0], i.e. bit 22, for
1903 e.g. FCVTXN <Vb><d>, <Va><n>. */
1904
1905 static int
1906 decode_asisd_fcvtxn (aarch64_inst *inst)
1907 {
1908 aarch64_field field = {0, 0};
1909 gen_sub_field (FLD_size, 0, 1, &field);
1910 if (!extract_field_2 (&field, inst->value, 0))
1911 return 0;
1912 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1913 return 1;
1914 }
1915
1916 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1917 static int
1918 decode_fcvt (aarch64_inst *inst)
1919 {
1920 enum aarch64_opnd_qualifier qualifier;
1921 aarch64_insn value;
1922 const aarch64_field field = {15, 2};
1923
1924 /* opc dstsize */
1925 value = extract_field_2 (&field, inst->value, 0);
1926 switch (value)
1927 {
1928 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1929 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1930 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1931 default: return 0;
1932 }
1933 inst->operands[0].qualifier = qualifier;
1934
1935 return 1;
1936 }
1937
1938 /* Do miscellaneous decodings that are not common enough to be driven by
1939 flags. */
1940
1941 static int
1942 do_misc_decoding (aarch64_inst *inst)
1943 {
1944 unsigned int value;
1945 switch (inst->opcode->op)
1946 {
1947 case OP_FCVT:
1948 return decode_fcvt (inst);
1949
1950 case OP_FCVTN:
1951 case OP_FCVTN2:
1952 case OP_FCVTL:
1953 case OP_FCVTL2:
1954 return decode_asimd_fcvt (inst);
1955
1956 case OP_FCVTXN_S:
1957 return decode_asisd_fcvtxn (inst);
1958
1959 case OP_MOV_P_P:
1960 case OP_MOVS_P_P:
1961 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1962 return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
1963 && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1964
1965 case OP_MOV_Z_P_Z:
1966 return (extract_field (FLD_SVE_Zd, inst->value, 0)
1967 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1968
1969 case OP_MOV_Z_V:
1970 /* Index must be zero. */
1971 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1972 return value > 0 && value <= 16 && value == (value & -value);
1973
1974 case OP_MOV_Z_Z:
1975 return (extract_field (FLD_SVE_Zn, inst->value, 0)
1976 == extract_field (FLD_SVE_Zm_16, inst->value, 0));
1977
1978 case OP_MOV_Z_Zi:
1979 /* Index must be nonzero. */
1980 value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
1981 return value > 0 && value != (value & -value);
1982
1983 case OP_MOVM_P_P_P:
1984 return (extract_field (FLD_SVE_Pd, inst->value, 0)
1985 == extract_field (FLD_SVE_Pm, inst->value, 0));
1986
1987 case OP_MOVZS_P_P_P:
1988 case OP_MOVZ_P_P_P:
1989 return (extract_field (FLD_SVE_Pn, inst->value, 0)
1990 == extract_field (FLD_SVE_Pm, inst->value, 0));
1991
1992 case OP_NOTS_P_P_P_Z:
1993 case OP_NOT_P_P_P_Z:
1994 return (extract_field (FLD_SVE_Pm, inst->value, 0)
1995 == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
1996
1997 default:
1998 return 0;
1999 }
2000 }
2001
2002 /* Opcodes that have fields shared by multiple operands are usually flagged
2003 with flags. In this function, we detect such flags, decode the related
2004 field(s) and store the information in one of the related operands. The
2005 'one' operand is not any operand but one of the operands that can
2006 accommadate all the information that has been decoded. */
2007
2008 static int
2009 do_special_decoding (aarch64_inst *inst)
2010 {
2011 int idx;
2012 aarch64_insn value;
2013 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2014 if (inst->opcode->flags & F_COND)
2015 {
2016 value = extract_field (FLD_cond2, inst->value, 0);
2017 inst->cond = get_cond_from_value (value);
2018 }
2019 /* 'sf' field. */
2020 if (inst->opcode->flags & F_SF)
2021 {
2022 idx = select_operand_for_sf_field_coding (inst->opcode);
2023 value = extract_field (FLD_sf, inst->value, 0);
2024 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2025 if ((inst->opcode->flags & F_N)
2026 && extract_field (FLD_N, inst->value, 0) != value)
2027 return 0;
2028 }
2029 /* 'sf' field. */
2030 if (inst->opcode->flags & F_LSE_SZ)
2031 {
2032 idx = select_operand_for_sf_field_coding (inst->opcode);
2033 value = extract_field (FLD_lse_sz, inst->value, 0);
2034 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2035 }
2036 /* size:Q fields. */
2037 if (inst->opcode->flags & F_SIZEQ)
2038 return decode_sizeq (inst);
2039
2040 if (inst->opcode->flags & F_FPTYPE)
2041 {
2042 idx = select_operand_for_fptype_field_coding (inst->opcode);
2043 value = extract_field (FLD_type, inst->value, 0);
2044 switch (value)
2045 {
2046 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2047 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2048 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2049 default: return 0;
2050 }
2051 }
2052
2053 if (inst->opcode->flags & F_SSIZE)
2054 {
2055 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2056 of the base opcode. */
2057 aarch64_insn mask;
2058 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2059 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2060 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2061 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2062 /* For most related instruciton, the 'size' field is fully available for
2063 operand encoding. */
2064 if (mask == 0x3)
2065 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2066 else
2067 {
2068 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2069 candidates);
2070 inst->operands[idx].qualifier
2071 = get_qualifier_from_partial_encoding (value, candidates, mask);
2072 }
2073 }
2074
2075 if (inst->opcode->flags & F_T)
2076 {
2077 /* Num of consecutive '0's on the right side of imm5<3:0>. */
2078 int num = 0;
2079 unsigned val, Q;
2080 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2081 == AARCH64_OPND_CLASS_SIMD_REG);
2082 /* imm5<3:0> q <t>
2083 0000 x reserved
2084 xxx1 0 8b
2085 xxx1 1 16b
2086 xx10 0 4h
2087 xx10 1 8h
2088 x100 0 2s
2089 x100 1 4s
2090 1000 0 reserved
2091 1000 1 2d */
2092 val = extract_field (FLD_imm5, inst->value, 0);
2093 while ((val & 0x1) == 0 && ++num <= 3)
2094 val >>= 1;
2095 if (num > 3)
2096 return 0;
2097 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2098 inst->operands[0].qualifier =
2099 get_vreg_qualifier_from_value ((num << 1) | Q);
2100 }
2101
2102 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2103 {
2104 /* Use Rt to encode in the case of e.g.
2105 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2106 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2107 if (idx == -1)
2108 {
2109 /* Otherwise use the result operand, which has to be a integer
2110 register. */
2111 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2112 == AARCH64_OPND_CLASS_INT_REG);
2113 idx = 0;
2114 }
2115 assert (idx == 0 || idx == 1);
2116 value = extract_field (FLD_Q, inst->value, 0);
2117 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2118 }
2119
2120 if (inst->opcode->flags & F_LDS_SIZE)
2121 {
2122 aarch64_field field = {0, 0};
2123 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2124 == AARCH64_OPND_CLASS_INT_REG);
2125 gen_sub_field (FLD_opc, 0, 1, &field);
2126 value = extract_field_2 (&field, inst->value, 0);
2127 inst->operands[0].qualifier
2128 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2129 }
2130
2131 /* Miscellaneous decoding; done as the last step. */
2132 if (inst->opcode->flags & F_MISC)
2133 return do_misc_decoding (inst);
2134
2135 return 1;
2136 }
2137
2138 /* Converters converting a real opcode instruction to its alias form. */
2139
2140 /* ROR <Wd>, <Ws>, #<shift>
2141 is equivalent to:
2142 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2143 static int
2144 convert_extr_to_ror (aarch64_inst *inst)
2145 {
2146 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2147 {
2148 copy_operand_info (inst, 2, 3);
2149 inst->operands[3].type = AARCH64_OPND_NIL;
2150 return 1;
2151 }
2152 return 0;
2153 }
2154
2155 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2156 is equivalent to:
2157 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2158 static int
2159 convert_shll_to_xtl (aarch64_inst *inst)
2160 {
2161 if (inst->operands[2].imm.value == 0)
2162 {
2163 inst->operands[2].type = AARCH64_OPND_NIL;
2164 return 1;
2165 }
2166 return 0;
2167 }
2168
2169 /* Convert
2170 UBFM <Xd>, <Xn>, #<shift>, #63.
2171 to
2172 LSR <Xd>, <Xn>, #<shift>. */
2173 static int
2174 convert_bfm_to_sr (aarch64_inst *inst)
2175 {
2176 int64_t imms, val;
2177
2178 imms = inst->operands[3].imm.value;
2179 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2180 if (imms == val)
2181 {
2182 inst->operands[3].type = AARCH64_OPND_NIL;
2183 return 1;
2184 }
2185
2186 return 0;
2187 }
2188
2189 /* Convert MOV to ORR. */
2190 static int
2191 convert_orr_to_mov (aarch64_inst *inst)
2192 {
2193 /* MOV <Vd>.<T>, <Vn>.<T>
2194 is equivalent to:
2195 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2196 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2197 {
2198 inst->operands[2].type = AARCH64_OPND_NIL;
2199 return 1;
2200 }
2201 return 0;
2202 }
2203
2204 /* When <imms> >= <immr>, the instruction written:
2205 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2206 is equivalent to:
2207 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2208
2209 static int
2210 convert_bfm_to_bfx (aarch64_inst *inst)
2211 {
2212 int64_t immr, imms;
2213
2214 immr = inst->operands[2].imm.value;
2215 imms = inst->operands[3].imm.value;
2216 if (imms >= immr)
2217 {
2218 int64_t lsb = immr;
2219 inst->operands[2].imm.value = lsb;
2220 inst->operands[3].imm.value = imms + 1 - lsb;
2221 /* The two opcodes have different qualifiers for
2222 the immediate operands; reset to help the checking. */
2223 reset_operand_qualifier (inst, 2);
2224 reset_operand_qualifier (inst, 3);
2225 return 1;
2226 }
2227
2228 return 0;
2229 }
2230
2231 /* When <imms> < <immr>, the instruction written:
2232 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2233 is equivalent to:
2234 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2235
2236 static int
2237 convert_bfm_to_bfi (aarch64_inst *inst)
2238 {
2239 int64_t immr, imms, val;
2240
2241 immr = inst->operands[2].imm.value;
2242 imms = inst->operands[3].imm.value;
2243 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2244 if (imms < immr)
2245 {
2246 inst->operands[2].imm.value = (val - immr) & (val - 1);
2247 inst->operands[3].imm.value = imms + 1;
2248 /* The two opcodes have different qualifiers for
2249 the immediate operands; reset to help the checking. */
2250 reset_operand_qualifier (inst, 2);
2251 reset_operand_qualifier (inst, 3);
2252 return 1;
2253 }
2254
2255 return 0;
2256 }
2257
2258 /* The instruction written:
2259 BFC <Xd>, #<lsb>, #<width>
2260 is equivalent to:
2261 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2262
2263 static int
2264 convert_bfm_to_bfc (aarch64_inst *inst)
2265 {
2266 int64_t immr, imms, val;
2267
2268 /* Should have been assured by the base opcode value. */
2269 assert (inst->operands[1].reg.regno == 0x1f);
2270
2271 immr = inst->operands[2].imm.value;
2272 imms = inst->operands[3].imm.value;
2273 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2274 if (imms < immr)
2275 {
2276 /* Drop XZR from the second operand. */
2277 copy_operand_info (inst, 1, 2);
2278 copy_operand_info (inst, 2, 3);
2279 inst->operands[3].type = AARCH64_OPND_NIL;
2280
2281 /* Recalculate the immediates. */
2282 inst->operands[1].imm.value = (val - immr) & (val - 1);
2283 inst->operands[2].imm.value = imms + 1;
2284
2285 /* The two opcodes have different qualifiers for the operands; reset to
2286 help the checking. */
2287 reset_operand_qualifier (inst, 1);
2288 reset_operand_qualifier (inst, 2);
2289 reset_operand_qualifier (inst, 3);
2290
2291 return 1;
2292 }
2293
2294 return 0;
2295 }
2296
2297 /* The instruction written:
2298 LSL <Xd>, <Xn>, #<shift>
2299 is equivalent to:
2300 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2301
2302 static int
2303 convert_ubfm_to_lsl (aarch64_inst *inst)
2304 {
2305 int64_t immr = inst->operands[2].imm.value;
2306 int64_t imms = inst->operands[3].imm.value;
2307 int64_t val
2308 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2309
2310 if ((immr == 0 && imms == val) || immr == imms + 1)
2311 {
2312 inst->operands[3].type = AARCH64_OPND_NIL;
2313 inst->operands[2].imm.value = val - imms;
2314 return 1;
2315 }
2316
2317 return 0;
2318 }
2319
2320 /* CINC <Wd>, <Wn>, <cond>
2321 is equivalent to:
2322 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2323 where <cond> is not AL or NV. */
2324
2325 static int
2326 convert_from_csel (aarch64_inst *inst)
2327 {
2328 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2329 && (inst->operands[3].cond->value & 0xe) != 0xe)
2330 {
2331 copy_operand_info (inst, 2, 3);
2332 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2333 inst->operands[3].type = AARCH64_OPND_NIL;
2334 return 1;
2335 }
2336 return 0;
2337 }
2338
2339 /* CSET <Wd>, <cond>
2340 is equivalent to:
2341 CSINC <Wd>, WZR, WZR, invert(<cond>)
2342 where <cond> is not AL or NV. */
2343
2344 static int
2345 convert_csinc_to_cset (aarch64_inst *inst)
2346 {
2347 if (inst->operands[1].reg.regno == 0x1f
2348 && inst->operands[2].reg.regno == 0x1f
2349 && (inst->operands[3].cond->value & 0xe) != 0xe)
2350 {
2351 copy_operand_info (inst, 1, 3);
2352 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2353 inst->operands[3].type = AARCH64_OPND_NIL;
2354 inst->operands[2].type = AARCH64_OPND_NIL;
2355 return 1;
2356 }
2357 return 0;
2358 }
2359
2360 /* MOV <Wd>, #<imm>
2361 is equivalent to:
2362 MOVZ <Wd>, #<imm16>, LSL #<shift>.
2363
2364 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2365 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2366 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2367 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2368 machine-instruction mnemonic must be used. */
2369
2370 static int
2371 convert_movewide_to_mov (aarch64_inst *inst)
2372 {
2373 uint64_t value = inst->operands[1].imm.value;
2374 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
2375 if (value == 0 && inst->operands[1].shifter.amount != 0)
2376 return 0;
2377 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2378 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2379 value <<= inst->operands[1].shifter.amount;
2380 /* As an alias convertor, it has to be clear that the INST->OPCODE
2381 is the opcode of the real instruction. */
2382 if (inst->opcode->op == OP_MOVN)
2383 {
2384 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2385 value = ~value;
2386 /* A MOVN has an immediate that could be encoded by MOVZ. */
2387 if (aarch64_wide_constant_p (value, is32, NULL))
2388 return 0;
2389 }
2390 inst->operands[1].imm.value = value;
2391 inst->operands[1].shifter.amount = 0;
2392 return 1;
2393 }
2394
2395 /* MOV <Wd>, #<imm>
2396 is equivalent to:
2397 ORR <Wd>, WZR, #<imm>.
2398
2399 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2400 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2401 or where a MOVN has an immediate that could be encoded by MOVZ, or where
2402 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2403 machine-instruction mnemonic must be used. */
2404
2405 static int
2406 convert_movebitmask_to_mov (aarch64_inst *inst)
2407 {
2408 int is32;
2409 uint64_t value;
2410
2411 /* Should have been assured by the base opcode value. */
2412 assert (inst->operands[1].reg.regno == 0x1f);
2413 copy_operand_info (inst, 1, 2);
2414 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2415 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2416 value = inst->operands[1].imm.value;
2417 /* ORR has an immediate that could be generated by a MOVZ or MOVN
2418 instruction. */
2419 if (inst->operands[0].reg.regno != 0x1f
2420 && (aarch64_wide_constant_p (value, is32, NULL)
2421 || aarch64_wide_constant_p (~value, is32, NULL)))
2422 return 0;
2423
2424 inst->operands[2].type = AARCH64_OPND_NIL;
2425 return 1;
2426 }
2427
2428 /* Some alias opcodes are disassembled by being converted from their real-form.
2429 N.B. INST->OPCODE is the real opcode rather than the alias. */
2430
2431 static int
2432 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2433 {
2434 switch (alias->op)
2435 {
2436 case OP_ASR_IMM:
2437 case OP_LSR_IMM:
2438 return convert_bfm_to_sr (inst);
2439 case OP_LSL_IMM:
2440 return convert_ubfm_to_lsl (inst);
2441 case OP_CINC:
2442 case OP_CINV:
2443 case OP_CNEG:
2444 return convert_from_csel (inst);
2445 case OP_CSET:
2446 case OP_CSETM:
2447 return convert_csinc_to_cset (inst);
2448 case OP_UBFX:
2449 case OP_BFXIL:
2450 case OP_SBFX:
2451 return convert_bfm_to_bfx (inst);
2452 case OP_SBFIZ:
2453 case OP_BFI:
2454 case OP_UBFIZ:
2455 return convert_bfm_to_bfi (inst);
2456 case OP_BFC:
2457 return convert_bfm_to_bfc (inst);
2458 case OP_MOV_V:
2459 return convert_orr_to_mov (inst);
2460 case OP_MOV_IMM_WIDE:
2461 case OP_MOV_IMM_WIDEN:
2462 return convert_movewide_to_mov (inst);
2463 case OP_MOV_IMM_LOG:
2464 return convert_movebitmask_to_mov (inst);
2465 case OP_ROR_IMM:
2466 return convert_extr_to_ror (inst);
2467 case OP_SXTL:
2468 case OP_SXTL2:
2469 case OP_UXTL:
2470 case OP_UXTL2:
2471 return convert_shll_to_xtl (inst);
2472 default:
2473 return 0;
2474 }
2475 }
2476
2477 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2478 aarch64_inst *, int);
2479
2480 /* Given the instruction information in *INST, check if the instruction has
2481 any alias form that can be used to represent *INST. If the answer is yes,
2482 update *INST to be in the form of the determined alias. */
2483
2484 /* In the opcode description table, the following flags are used in opcode
2485 entries to help establish the relations between the real and alias opcodes:
2486
2487 F_ALIAS: opcode is an alias
2488 F_HAS_ALIAS: opcode has alias(es)
2489 F_P1
2490 F_P2
2491 F_P3: Disassembly preference priority 1-3 (the larger the
2492 higher). If nothing is specified, it is the priority
2493 0 by default, i.e. the lowest priority.
2494
2495 Although the relation between the machine and the alias instructions are not
2496 explicitly described, it can be easily determined from the base opcode
2497 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2498 description entries:
2499
2500 The mask of an alias opcode must be equal to or a super-set (i.e. more
2501 constrained) of that of the aliased opcode; so is the base opcode value.
2502
2503 if (opcode_has_alias (real) && alias_opcode_p (opcode)
2504 && (opcode->mask & real->mask) == real->mask
2505 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2506 then OPCODE is an alias of, and only of, the REAL instruction
2507
2508 The alias relationship is forced flat-structured to keep related algorithm
2509 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2510
2511 During the disassembling, the decoding decision tree (in
2512 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2513 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2514 not specified), the disassembler will check whether there is any alias
2515 instruction exists for this real instruction. If there is, the disassembler
2516 will try to disassemble the 32-bit binary again using the alias's rule, or
2517 try to convert the IR to the form of the alias. In the case of the multiple
2518 aliases, the aliases are tried one by one from the highest priority
2519 (currently the flag F_P3) to the lowest priority (no priority flag), and the
2520 first succeeds first adopted.
2521
2522 You may ask why there is a need for the conversion of IR from one form to
2523 another in handling certain aliases. This is because on one hand it avoids
2524 adding more operand code to handle unusual encoding/decoding; on other
2525 hand, during the disassembling, the conversion is an effective approach to
2526 check the condition of an alias (as an alias may be adopted only if certain
2527 conditions are met).
2528
2529 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2530 aarch64_opcode_table and generated aarch64_find_alias_opcode and
2531 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
2532
2533 static void
2534 determine_disassembling_preference (struct aarch64_inst *inst)
2535 {
2536 const aarch64_opcode *opcode;
2537 const aarch64_opcode *alias;
2538
2539 opcode = inst->opcode;
2540
2541 /* This opcode does not have an alias, so use itself. */
2542 if (!opcode_has_alias (opcode))
2543 return;
2544
2545 alias = aarch64_find_alias_opcode (opcode);
2546 assert (alias);
2547
2548 #ifdef DEBUG_AARCH64
2549 if (debug_dump)
2550 {
2551 const aarch64_opcode *tmp = alias;
2552 printf ("#### LIST orderd: ");
2553 while (tmp)
2554 {
2555 printf ("%s, ", tmp->name);
2556 tmp = aarch64_find_next_alias_opcode (tmp);
2557 }
2558 printf ("\n");
2559 }
2560 #endif /* DEBUG_AARCH64 */
2561
2562 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2563 {
2564 DEBUG_TRACE ("try %s", alias->name);
2565 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2566
2567 /* An alias can be a pseudo opcode which will never be used in the
2568 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2569 aliasing AND. */
2570 if (pseudo_opcode_p (alias))
2571 {
2572 DEBUG_TRACE ("skip pseudo %s", alias->name);
2573 continue;
2574 }
2575
2576 if ((inst->value & alias->mask) != alias->opcode)
2577 {
2578 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2579 continue;
2580 }
2581 /* No need to do any complicated transformation on operands, if the alias
2582 opcode does not have any operand. */
2583 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2584 {
2585 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2586 aarch64_replace_opcode (inst, alias);
2587 return;
2588 }
2589 if (alias->flags & F_CONV)
2590 {
2591 aarch64_inst copy;
2592 memcpy (&copy, inst, sizeof (aarch64_inst));
2593 /* ALIAS is the preference as long as the instruction can be
2594 successfully converted to the form of ALIAS. */
2595 if (convert_to_alias (&copy, alias) == 1)
2596 {
2597 aarch64_replace_opcode (&copy, alias);
2598 assert (aarch64_match_operands_constraint (&copy, NULL));
2599 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2600 memcpy (inst, &copy, sizeof (aarch64_inst));
2601 return;
2602 }
2603 }
2604 else
2605 {
2606 /* Directly decode the alias opcode. */
2607 aarch64_inst temp;
2608 memset (&temp, '\0', sizeof (aarch64_inst));
2609 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2610 {
2611 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2612 memcpy (inst, &temp, sizeof (aarch64_inst));
2613 return;
2614 }
2615 }
2616 }
2617 }
2618
2619 /* Some instructions (including all SVE ones) use the instruction class
2620 to describe how a qualifiers_list index is represented in the instruction
2621 encoding. If INST is such an instruction, decode the appropriate fields
2622 and fill in the operand qualifiers accordingly. Return true if no
2623 problems are found. */
2624
2625 static bfd_boolean
2626 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2627 {
2628 int i, variant;
2629
2630 variant = 0;
2631 switch (inst->opcode->iclass)
2632 {
2633 case sve_cpy:
2634 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2635 break;
2636
2637 case sve_index:
2638 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2639 if ((i & 31) == 0)
2640 return FALSE;
2641 while ((i & 1) == 0)
2642 {
2643 i >>= 1;
2644 variant += 1;
2645 }
2646 break;
2647
2648 case sve_limm:
2649 /* Pick the smallest applicable element size. */
2650 if ((inst->value & 0x20600) == 0x600)
2651 variant = 0;
2652 else if ((inst->value & 0x20400) == 0x400)
2653 variant = 1;
2654 else if ((inst->value & 0x20000) == 0)
2655 variant = 2;
2656 else
2657 variant = 3;
2658 break;
2659
2660 case sve_misc:
2661 /* sve_misc instructions have only a single variant. */
2662 break;
2663
2664 case sve_movprfx:
2665 variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2666 break;
2667
2668 case sve_pred_zm:
2669 variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2670 break;
2671
2672 case sve_shift_pred:
2673 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2674 sve_shift:
2675 if (i == 0)
2676 return FALSE;
2677 while (i != 1)
2678 {
2679 i >>= 1;
2680 variant += 1;
2681 }
2682 break;
2683
2684 case sve_shift_unpred:
2685 i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2686 goto sve_shift;
2687
2688 case sve_size_bhs:
2689 variant = extract_field (FLD_size, inst->value, 0);
2690 if (variant >= 3)
2691 return FALSE;
2692 break;
2693
2694 case sve_size_bhsd:
2695 variant = extract_field (FLD_size, inst->value, 0);
2696 break;
2697
2698 case sve_size_hsd:
2699 i = extract_field (FLD_size, inst->value, 0);
2700 if (i < 1)
2701 return FALSE;
2702 variant = i - 1;
2703 break;
2704
2705 case sve_size_sd:
2706 variant = extract_field (FLD_SVE_sz, inst->value, 0);
2707 break;
2708
2709 default:
2710 /* No mapping between instruction class and qualifiers. */
2711 return TRUE;
2712 }
2713
2714 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2715 inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2716 return TRUE;
2717 }
2718 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2719 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2720 return 1.
2721
2722 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2723 determined and used to disassemble CODE; this is done just before the
2724 return. */
2725
2726 static int
2727 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2728 aarch64_inst *inst, int noaliases_p)
2729 {
2730 int i;
2731
2732 DEBUG_TRACE ("enter with %s", opcode->name);
2733
2734 assert (opcode && inst);
2735
2736 /* Check the base opcode. */
2737 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2738 {
2739 DEBUG_TRACE ("base opcode match FAIL");
2740 goto decode_fail;
2741 }
2742
2743 /* Clear inst. */
2744 memset (inst, '\0', sizeof (aarch64_inst));
2745
2746 inst->opcode = opcode;
2747 inst->value = code;
2748
2749 /* Assign operand codes and indexes. */
2750 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2751 {
2752 if (opcode->operands[i] == AARCH64_OPND_NIL)
2753 break;
2754 inst->operands[i].type = opcode->operands[i];
2755 inst->operands[i].idx = i;
2756 }
2757
2758 /* Call the opcode decoder indicated by flags. */
2759 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2760 {
2761 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2762 goto decode_fail;
2763 }
2764
2765 /* Possibly use the instruction class to determine the correct
2766 qualifier. */
2767 if (!aarch64_decode_variant_using_iclass (inst))
2768 {
2769 DEBUG_TRACE ("iclass-based decoder FAIL");
2770 goto decode_fail;
2771 }
2772
2773 /* Call operand decoders. */
2774 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2775 {
2776 const aarch64_operand *opnd;
2777 enum aarch64_opnd type;
2778
2779 type = opcode->operands[i];
2780 if (type == AARCH64_OPND_NIL)
2781 break;
2782 opnd = &aarch64_operands[type];
2783 if (operand_has_extractor (opnd)
2784 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2785 {
2786 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2787 goto decode_fail;
2788 }
2789 }
2790
2791 /* If the opcode has a verifier, then check it now. */
2792 if (opcode->verifier && ! opcode->verifier (opcode, code))
2793 {
2794 DEBUG_TRACE ("operand verifier FAIL");
2795 goto decode_fail;
2796 }
2797
2798 /* Match the qualifiers. */
2799 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2800 {
2801 /* Arriving here, the CODE has been determined as a valid instruction
2802 of OPCODE and *INST has been filled with information of this OPCODE
2803 instruction. Before the return, check if the instruction has any
2804 alias and should be disassembled in the form of its alias instead.
2805 If the answer is yes, *INST will be updated. */
2806 if (!noaliases_p)
2807 determine_disassembling_preference (inst);
2808 DEBUG_TRACE ("SUCCESS");
2809 return 1;
2810 }
2811 else
2812 {
2813 DEBUG_TRACE ("constraint matching FAIL");
2814 }
2815
2816 decode_fail:
2817 return 0;
2818 }
2819 \f
2820 /* This does some user-friendly fix-up to *INST. It is currently focus on
2821 the adjustment of qualifiers to help the printed instruction
2822 recognized/understood more easily. */
2823
2824 static void
2825 user_friendly_fixup (aarch64_inst *inst)
2826 {
2827 switch (inst->opcode->iclass)
2828 {
2829 case testbranch:
2830 /* TBNZ Xn|Wn, #uimm6, label
2831 Test and Branch Not Zero: conditionally jumps to label if bit number
2832 uimm6 in register Xn is not zero. The bit number implies the width of
2833 the register, which may be written and should be disassembled as Wn if
2834 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2835 */
2836 if (inst->operands[1].imm.value < 32)
2837 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2838 break;
2839 default: break;
2840 }
2841 }
2842
2843 /* Decode INSN and fill in *INST the instruction information. An alias
2844 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2845 success. */
2846
2847 int
2848 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2849 bfd_boolean noaliases_p)
2850 {
2851 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2852
2853 #ifdef DEBUG_AARCH64
2854 if (debug_dump)
2855 {
2856 const aarch64_opcode *tmp = opcode;
2857 printf ("\n");
2858 DEBUG_TRACE ("opcode lookup:");
2859 while (tmp != NULL)
2860 {
2861 aarch64_verbose (" %s", tmp->name);
2862 tmp = aarch64_find_next_opcode (tmp);
2863 }
2864 }
2865 #endif /* DEBUG_AARCH64 */
2866
2867 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2868 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2869 opcode field and value, apart from the difference that one of them has an
2870 extra field as part of the opcode, but such a field is used for operand
2871 encoding in other opcode(s) ('immh' in the case of the example). */
2872 while (opcode != NULL)
2873 {
2874 /* But only one opcode can be decoded successfully for, as the
2875 decoding routine will check the constraint carefully. */
2876 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2877 return ERR_OK;
2878 opcode = aarch64_find_next_opcode (opcode);
2879 }
2880
2881 return ERR_UND;
2882 }
2883
2884 /* Print operands. */
2885
2886 static void
2887 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2888 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2889 {
2890 int i, pcrel_p, num_printed;
2891 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2892 {
2893 char str[128];
2894 /* We regard the opcode operand info more, however we also look into
2895 the inst->operands to support the disassembling of the optional
2896 operand.
2897 The two operand code should be the same in all cases, apart from
2898 when the operand can be optional. */
2899 if (opcode->operands[i] == AARCH64_OPND_NIL
2900 || opnds[i].type == AARCH64_OPND_NIL)
2901 break;
2902
2903 /* Generate the operand string in STR. */
2904 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2905 &info->target);
2906
2907 /* Print the delimiter (taking account of omitted operand(s)). */
2908 if (str[0] != '\0')
2909 (*info->fprintf_func) (info->stream, "%s",
2910 num_printed++ == 0 ? "\t" : ", ");
2911
2912 /* Print the operand. */
2913 if (pcrel_p)
2914 (*info->print_address_func) (info->target, info);
2915 else
2916 (*info->fprintf_func) (info->stream, "%s", str);
2917 }
2918 }
2919
2920 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed. */
2921
2922 static void
2923 remove_dot_suffix (char *name, const aarch64_inst *inst)
2924 {
2925 char *ptr;
2926 size_t len;
2927
2928 ptr = strchr (inst->opcode->name, '.');
2929 assert (ptr && inst->cond);
2930 len = ptr - inst->opcode->name;
2931 assert (len < 8);
2932 strncpy (name, inst->opcode->name, len);
2933 name[len] = '\0';
2934 }
2935
2936 /* Print the instruction mnemonic name. */
2937
2938 static void
2939 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2940 {
2941 if (inst->opcode->flags & F_COND)
2942 {
2943 /* For instructions that are truly conditionally executed, e.g. b.cond,
2944 prepare the full mnemonic name with the corresponding condition
2945 suffix. */
2946 char name[8];
2947
2948 remove_dot_suffix (name, inst);
2949 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2950 }
2951 else
2952 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2953 }
2954
2955 /* Decide whether we need to print a comment after the operands of
2956 instruction INST. */
2957
2958 static void
2959 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
2960 {
2961 if (inst->opcode->flags & F_COND)
2962 {
2963 char name[8];
2964 unsigned int i, num_conds;
2965
2966 remove_dot_suffix (name, inst);
2967 num_conds = ARRAY_SIZE (inst->cond->names);
2968 for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
2969 (*info->fprintf_func) (info->stream, "%s %s.%s",
2970 i == 1 ? " //" : ",",
2971 name, inst->cond->names[i]);
2972 }
2973 }
2974
2975 /* Print the instruction according to *INST. */
2976
2977 static void
2978 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2979 struct disassemble_info *info)
2980 {
2981 print_mnemonic_name (inst, info);
2982 print_operands (pc, inst->opcode, inst->operands, info);
2983 print_comment (inst, info);
2984 }
2985
2986 /* Entry-point of the instruction disassembler and printer. */
2987
2988 static void
2989 print_insn_aarch64_word (bfd_vma pc,
2990 uint32_t word,
2991 struct disassemble_info *info)
2992 {
2993 static const char *err_msg[6] =
2994 {
2995 [ERR_OK] = "_",
2996 [-ERR_UND] = "undefined",
2997 [-ERR_UNP] = "unpredictable",
2998 [-ERR_NYI] = "NYI"
2999 };
3000
3001 int ret;
3002 aarch64_inst inst;
3003
3004 info->insn_info_valid = 1;
3005 info->branch_delay_insns = 0;
3006 info->data_size = 0;
3007 info->target = 0;
3008 info->target2 = 0;
3009
3010 if (info->flags & INSN_HAS_RELOC)
3011 /* If the instruction has a reloc associated with it, then
3012 the offset field in the instruction will actually be the
3013 addend for the reloc. (If we are using REL type relocs).
3014 In such cases, we can ignore the pc when computing
3015 addresses, since the addend is not currently pc-relative. */
3016 pc = 0;
3017
3018 ret = aarch64_decode_insn (word, &inst, no_aliases);
3019
3020 if (((word >> 21) & 0x3ff) == 1)
3021 {
3022 /* RESERVED for ALES. */
3023 assert (ret != ERR_OK);
3024 ret = ERR_NYI;
3025 }
3026
3027 switch (ret)
3028 {
3029 case ERR_UND:
3030 case ERR_UNP:
3031 case ERR_NYI:
3032 /* Handle undefined instructions. */
3033 info->insn_type = dis_noninsn;
3034 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3035 word, err_msg[-ret]);
3036 break;
3037 case ERR_OK:
3038 user_friendly_fixup (&inst);
3039 print_aarch64_insn (pc, &inst, info);
3040 break;
3041 default:
3042 abort ();
3043 }
3044 }
3045
3046 /* Disallow mapping symbols ($x, $d etc) from
3047 being displayed in symbol relative addresses. */
3048
3049 bfd_boolean
3050 aarch64_symbol_is_valid (asymbol * sym,
3051 struct disassemble_info * info ATTRIBUTE_UNUSED)
3052 {
3053 const char * name;
3054
3055 if (sym == NULL)
3056 return FALSE;
3057
3058 name = bfd_asymbol_name (sym);
3059
3060 return name
3061 && (name[0] != '$'
3062 || (name[1] != 'x' && name[1] != 'd')
3063 || (name[2] != '\0' && name[2] != '.'));
3064 }
3065
3066 /* Print data bytes on INFO->STREAM. */
3067
3068 static void
3069 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3070 uint32_t word,
3071 struct disassemble_info *info)
3072 {
3073 switch (info->bytes_per_chunk)
3074 {
3075 case 1:
3076 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3077 break;
3078 case 2:
3079 info->fprintf_func (info->stream, ".short\t0x%04x", word);
3080 break;
3081 case 4:
3082 info->fprintf_func (info->stream, ".word\t0x%08x", word);
3083 break;
3084 default:
3085 abort ();
3086 }
3087 }
3088
3089 /* Try to infer the code or data type from a symbol.
3090 Returns nonzero if *MAP_TYPE was set. */
3091
3092 static int
3093 get_sym_code_type (struct disassemble_info *info, int n,
3094 enum map_type *map_type)
3095 {
3096 elf_symbol_type *es;
3097 unsigned int type;
3098 const char *name;
3099
3100 /* If the symbol is in a different section, ignore it. */
3101 if (info->section != NULL && info->section != info->symtab[n]->section)
3102 return FALSE;
3103
3104 es = *(elf_symbol_type **)(info->symtab + n);
3105 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3106
3107 /* If the symbol has function type then use that. */
3108 if (type == STT_FUNC)
3109 {
3110 *map_type = MAP_INSN;
3111 return TRUE;
3112 }
3113
3114 /* Check for mapping symbols. */
3115 name = bfd_asymbol_name(info->symtab[n]);
3116 if (name[0] == '$'
3117 && (name[1] == 'x' || name[1] == 'd')
3118 && (name[2] == '\0' || name[2] == '.'))
3119 {
3120 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3121 return TRUE;
3122 }
3123
3124 return FALSE;
3125 }
3126
3127 /* Entry-point of the AArch64 disassembler. */
3128
3129 int
3130 print_insn_aarch64 (bfd_vma pc,
3131 struct disassemble_info *info)
3132 {
3133 bfd_byte buffer[INSNLEN];
3134 int status;
3135 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
3136 bfd_boolean found = FALSE;
3137 unsigned int size = 4;
3138 unsigned long data;
3139
3140 if (info->disassembler_options)
3141 {
3142 set_default_aarch64_dis_options (info);
3143
3144 parse_aarch64_dis_options (info->disassembler_options);
3145
3146 /* To avoid repeated parsing of these options, we remove them here. */
3147 info->disassembler_options = NULL;
3148 }
3149
3150 /* Aarch64 instructions are always little-endian */
3151 info->endian_code = BFD_ENDIAN_LITTLE;
3152
3153 /* First check the full symtab for a mapping symbol, even if there
3154 are no usable non-mapping symbols for this address. */
3155 if (info->symtab_size != 0
3156 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3157 {
3158 enum map_type type = MAP_INSN;
3159 int last_sym = -1;
3160 bfd_vma addr;
3161 int n;
3162
3163 if (pc <= last_mapping_addr)
3164 last_mapping_sym = -1;
3165
3166 /* Start scanning at the start of the function, or wherever
3167 we finished last time. */
3168 n = info->symtab_pos + 1;
3169 if (n < last_mapping_sym)
3170 n = last_mapping_sym;
3171
3172 /* Scan up to the location being disassembled. */
3173 for (; n < info->symtab_size; n++)
3174 {
3175 addr = bfd_asymbol_value (info->symtab[n]);
3176 if (addr > pc)
3177 break;
3178 if (get_sym_code_type (info, n, &type))
3179 {
3180 last_sym = n;
3181 found = TRUE;
3182 }
3183 }
3184
3185 if (!found)
3186 {
3187 n = info->symtab_pos;
3188 if (n < last_mapping_sym)
3189 n = last_mapping_sym;
3190
3191 /* No mapping symbol found at this address. Look backwards
3192 for a preceeding one. */
3193 for (; n >= 0; n--)
3194 {
3195 if (get_sym_code_type (info, n, &type))
3196 {
3197 last_sym = n;
3198 found = TRUE;
3199 break;
3200 }
3201 }
3202 }
3203
3204 last_mapping_sym = last_sym;
3205 last_type = type;
3206
3207 /* Look a little bit ahead to see if we should print out
3208 less than four bytes of data. If there's a symbol,
3209 mapping or otherwise, after two bytes then don't
3210 print more. */
3211 if (last_type == MAP_DATA)
3212 {
3213 size = 4 - (pc & 3);
3214 for (n = last_sym + 1; n < info->symtab_size; n++)
3215 {
3216 addr = bfd_asymbol_value (info->symtab[n]);
3217 if (addr > pc)
3218 {
3219 if (addr - pc < size)
3220 size = addr - pc;
3221 break;
3222 }
3223 }
3224 /* If the next symbol is after three bytes, we need to
3225 print only part of the data, so that we can use either
3226 .byte or .short. */
3227 if (size == 3)
3228 size = (pc & 1) ? 1 : 2;
3229 }
3230 }
3231
3232 if (last_type == MAP_DATA)
3233 {
3234 /* size was set above. */
3235 info->bytes_per_chunk = size;
3236 info->display_endian = info->endian;
3237 printer = print_insn_data;
3238 }
3239 else
3240 {
3241 info->bytes_per_chunk = size = INSNLEN;
3242 info->display_endian = info->endian_code;
3243 printer = print_insn_aarch64_word;
3244 }
3245
3246 status = (*info->read_memory_func) (pc, buffer, size, info);
3247 if (status != 0)
3248 {
3249 (*info->memory_error_func) (status, pc, info);
3250 return -1;
3251 }
3252
3253 data = bfd_get_bits (buffer, size * 8,
3254 info->display_endian == BFD_ENDIAN_BIG);
3255
3256 (*printer) (pc, data, info);
3257
3258 return size;
3259 }
3260 \f
3261 void
3262 print_aarch64_disassembler_options (FILE *stream)
3263 {
3264 fprintf (stream, _("\n\
3265 The following AARCH64 specific disassembler options are supported for use\n\
3266 with the -M switch (multiple options should be separated by commas):\n"));
3267
3268 fprintf (stream, _("\n\
3269 no-aliases Don't print instruction aliases.\n"));
3270
3271 fprintf (stream, _("\n\
3272 aliases Do print instruction aliases.\n"));
3273
3274 #ifdef DEBUG_AARCH64
3275 fprintf (stream, _("\n\
3276 debug_dump Temp switch for debug trace.\n"));
3277 #endif /* DEBUG_AARCH64 */
3278
3279 fprintf (stream, _("\n"));
3280 }
This page took 0.097453 seconds and 5 git commands to generate.