ab93234e28afd54630199f2ea7ce35f7b948d070
[deliverable/binutils-gdb.git] / opcodes / aarch64-dis.c
1 /* aarch64-dis.c -- AArch64 disassembler.
2 Copyright (C) 2009-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "bfd_stdint.h"
23 #include "dis-asm.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28
29 #define ERR_OK 0
30 #define ERR_UND -1
31 #define ERR_UNP -3
32 #define ERR_NYI -5
33
34 #define INSNLEN 4
35
36 /* Cached mapping symbol state. */
37 enum map_type
38 {
39 MAP_INSN,
40 MAP_DATA
41 };
42
43 static enum map_type last_type;
44 static int last_mapping_sym = -1;
45 static bfd_vma last_mapping_addr = 0;
46
47 /* Other options */
48 static int no_aliases = 0; /* If set disassemble as most general inst. */
49 \f
50
51 static void
52 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
53 {
54 }
55
56 static void
57 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
58 {
59 /* Try to match options that are simple flags */
60 if (CONST_STRNEQ (option, "no-aliases"))
61 {
62 no_aliases = 1;
63 return;
64 }
65
66 if (CONST_STRNEQ (option, "aliases"))
67 {
68 no_aliases = 0;
69 return;
70 }
71
72 #ifdef DEBUG_AARCH64
73 if (CONST_STRNEQ (option, "debug_dump"))
74 {
75 debug_dump = 1;
76 return;
77 }
78 #endif /* DEBUG_AARCH64 */
79
80 /* Invalid option. */
81 fprintf (stderr, _("Unrecognised disassembler option: %s\n"), option);
82 }
83
84 static void
85 parse_aarch64_dis_options (const char *options)
86 {
87 const char *option_end;
88
89 if (options == NULL)
90 return;
91
92 while (*options != '\0')
93 {
94 /* Skip empty options. */
95 if (*options == ',')
96 {
97 options++;
98 continue;
99 }
100
101 /* We know that *options is neither NUL or a comma. */
102 option_end = options + 1;
103 while (*option_end != ',' && *option_end != '\0')
104 option_end++;
105
106 parse_aarch64_dis_option (options, option_end - options);
107
108 /* Go on to the next one. If option_end points to a comma, it
109 will be skipped above. */
110 options = option_end;
111 }
112 }
113 \f
114 /* Functions doing the instruction disassembling. */
115
116 /* The unnamed arguments consist of the number of fields and information about
117 these fields where the VALUE will be extracted from CODE and returned.
118 MASK can be zero or the base mask of the opcode.
119
120 N.B. the fields are required to be in such an order than the most signficant
121 field for VALUE comes the first, e.g. the <index> in
122 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
123 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
124 the order of H, L, M. */
125
126 static inline aarch64_insn
127 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
128 {
129 uint32_t num;
130 const aarch64_field *field;
131 enum aarch64_field_kind kind;
132 va_list va;
133
134 va_start (va, mask);
135 num = va_arg (va, uint32_t);
136 assert (num <= 5);
137 aarch64_insn value = 0x0;
138 while (num--)
139 {
140 kind = va_arg (va, enum aarch64_field_kind);
141 field = &fields[kind];
142 value <<= field->width;
143 value |= extract_field (kind, code, mask);
144 }
145 return value;
146 }
147
148 /* Extract the value of all fields in SELF->fields from instruction CODE.
149 The least significant bit comes from the final field. */
150
151 static aarch64_insn
152 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
153 {
154 aarch64_insn value;
155 unsigned int i;
156 enum aarch64_field_kind kind;
157
158 value = 0;
159 for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
160 {
161 kind = self->fields[i];
162 value <<= fields[kind].width;
163 value |= extract_field (kind, code, 0);
164 }
165 return value;
166 }
167
168 /* Sign-extend bit I of VALUE. */
169 static inline int32_t
170 sign_extend (aarch64_insn value, unsigned i)
171 {
172 uint32_t ret = value;
173
174 assert (i < 32);
175 if ((value >> i) & 0x1)
176 {
177 uint32_t val = (uint32_t)(-1) << i;
178 ret = ret | val;
179 }
180 return (int32_t) ret;
181 }
182
183 /* N.B. the following inline helpfer functions create a dependency on the
184 order of operand qualifier enumerators. */
185
186 /* Given VALUE, return qualifier for a general purpose register. */
187 static inline enum aarch64_opnd_qualifier
188 get_greg_qualifier_from_value (aarch64_insn value)
189 {
190 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
191 assert (value <= 0x1
192 && aarch64_get_qualifier_standard_value (qualifier) == value);
193 return qualifier;
194 }
195
196 /* Given VALUE, return qualifier for a vector register. This does not support
197 decoding instructions that accept the 2H vector type. */
198
199 static inline enum aarch64_opnd_qualifier
200 get_vreg_qualifier_from_value (aarch64_insn value)
201 {
202 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
203
204 /* Instructions using vector type 2H should not call this function. Skip over
205 the 2H qualifier. */
206 if (qualifier >= AARCH64_OPND_QLF_V_2H)
207 qualifier += 1;
208
209 assert (value <= 0x8
210 && aarch64_get_qualifier_standard_value (qualifier) == value);
211 return qualifier;
212 }
213
214 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register. */
215 static inline enum aarch64_opnd_qualifier
216 get_sreg_qualifier_from_value (aarch64_insn value)
217 {
218 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
219
220 assert (value <= 0x4
221 && aarch64_get_qualifier_standard_value (qualifier) == value);
222 return qualifier;
223 }
224
225 /* Given the instruction in *INST which is probably half way through the
226 decoding and our caller wants to know the expected qualifier for operand
227 I. Return such a qualifier if we can establish it; otherwise return
228 AARCH64_OPND_QLF_NIL. */
229
230 static aarch64_opnd_qualifier_t
231 get_expected_qualifier (const aarch64_inst *inst, int i)
232 {
233 aarch64_opnd_qualifier_seq_t qualifiers;
234 /* Should not be called if the qualifier is known. */
235 assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
236 if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
237 i, qualifiers))
238 return qualifiers[i];
239 else
240 return AARCH64_OPND_QLF_NIL;
241 }
242
243 /* Operand extractors. */
244
245 int
246 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
247 const aarch64_insn code,
248 const aarch64_inst *inst ATTRIBUTE_UNUSED)
249 {
250 info->reg.regno = extract_field (self->fields[0], code, 0);
251 return 1;
252 }
253
254 int
255 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
256 const aarch64_insn code ATTRIBUTE_UNUSED,
257 const aarch64_inst *inst ATTRIBUTE_UNUSED)
258 {
259 assert (info->idx == 1
260 || info->idx ==3);
261 info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
262 return 1;
263 }
264
265 /* e.g. IC <ic_op>{, <Xt>}. */
266 int
267 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
268 const aarch64_insn code,
269 const aarch64_inst *inst ATTRIBUTE_UNUSED)
270 {
271 info->reg.regno = extract_field (self->fields[0], code, 0);
272 assert (info->idx == 1
273 && (aarch64_get_operand_class (inst->operands[0].type)
274 == AARCH64_OPND_CLASS_SYSTEM));
275 /* This will make the constraint checking happy and more importantly will
276 help the disassembler determine whether this operand is optional or
277 not. */
278 info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
279
280 return 1;
281 }
282
283 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
284 int
285 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
286 const aarch64_insn code,
287 const aarch64_inst *inst ATTRIBUTE_UNUSED)
288 {
289 /* regno */
290 info->reglane.regno = extract_field (self->fields[0], code,
291 inst->opcode->mask);
292
293 /* Index and/or type. */
294 if (inst->opcode->iclass == asisdone
295 || inst->opcode->iclass == asimdins)
296 {
297 if (info->type == AARCH64_OPND_En
298 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
299 {
300 unsigned shift;
301 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
302 assert (info->idx == 1); /* Vn */
303 aarch64_insn value = extract_field (FLD_imm4, code, 0);
304 /* Depend on AARCH64_OPND_Ed to determine the qualifier. */
305 info->qualifier = get_expected_qualifier (inst, info->idx);
306 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
307 info->reglane.index = value >> shift;
308 }
309 else
310 {
311 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
312 imm5<3:0> <V>
313 0000 RESERVED
314 xxx1 B
315 xx10 H
316 x100 S
317 1000 D */
318 int pos = -1;
319 aarch64_insn value = extract_field (FLD_imm5, code, 0);
320 while (++pos <= 3 && (value & 0x1) == 0)
321 value >>= 1;
322 if (pos > 3)
323 return 0;
324 info->qualifier = get_sreg_qualifier_from_value (pos);
325 info->reglane.index = (unsigned) (value >> 1);
326 }
327 }
328 else
329 {
330 /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
331 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
332
333 /* Need information in other operand(s) to help decoding. */
334 info->qualifier = get_expected_qualifier (inst, info->idx);
335 switch (info->qualifier)
336 {
337 case AARCH64_OPND_QLF_S_H:
338 /* h:l:m */
339 info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
340 FLD_M);
341 info->reglane.regno &= 0xf;
342 break;
343 case AARCH64_OPND_QLF_S_S:
344 /* h:l */
345 info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
346 break;
347 case AARCH64_OPND_QLF_S_D:
348 /* H */
349 info->reglane.index = extract_field (FLD_H, code, 0);
350 break;
351 default:
352 return 0;
353 }
354 }
355
356 return 1;
357 }
358
359 int
360 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
361 const aarch64_insn code,
362 const aarch64_inst *inst ATTRIBUTE_UNUSED)
363 {
364 /* R */
365 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
366 /* len */
367 info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
368 return 1;
369 }
370
371 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions. */
372 int
373 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
374 aarch64_opnd_info *info, const aarch64_insn code,
375 const aarch64_inst *inst)
376 {
377 aarch64_insn value;
378 /* Number of elements in each structure to be loaded/stored. */
379 unsigned expected_num = get_opcode_dependent_value (inst->opcode);
380
381 struct
382 {
383 unsigned is_reserved;
384 unsigned num_regs;
385 unsigned num_elements;
386 } data [] =
387 { {0, 4, 4},
388 {1, 4, 4},
389 {0, 4, 1},
390 {0, 4, 2},
391 {0, 3, 3},
392 {1, 3, 3},
393 {0, 3, 1},
394 {0, 1, 1},
395 {0, 2, 2},
396 {1, 2, 2},
397 {0, 2, 1},
398 };
399
400 /* Rt */
401 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
402 /* opcode */
403 value = extract_field (FLD_opcode, code, 0);
404 if (expected_num != data[value].num_elements || data[value].is_reserved)
405 return 0;
406 info->reglist.num_regs = data[value].num_regs;
407
408 return 1;
409 }
410
411 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
412 lanes instructions. */
413 int
414 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
415 aarch64_opnd_info *info, const aarch64_insn code,
416 const aarch64_inst *inst)
417 {
418 aarch64_insn value;
419
420 /* Rt */
421 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
422 /* S */
423 value = extract_field (FLD_S, code, 0);
424
425 /* Number of registers is equal to the number of elements in
426 each structure to be loaded/stored. */
427 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
428 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
429
430 /* Except when it is LD1R. */
431 if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
432 info->reglist.num_regs = 2;
433
434 return 1;
435 }
436
437 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
438 load/store single element instructions. */
439 int
440 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
441 aarch64_opnd_info *info, const aarch64_insn code,
442 const aarch64_inst *inst ATTRIBUTE_UNUSED)
443 {
444 aarch64_field field = {0, 0};
445 aarch64_insn QSsize; /* fields Q:S:size. */
446 aarch64_insn opcodeh2; /* opcode<2:1> */
447
448 /* Rt */
449 info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
450
451 /* Decode the index, opcode<2:1> and size. */
452 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
453 opcodeh2 = extract_field_2 (&field, code, 0);
454 QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
455 switch (opcodeh2)
456 {
457 case 0x0:
458 info->qualifier = AARCH64_OPND_QLF_S_B;
459 /* Index encoded in "Q:S:size". */
460 info->reglist.index = QSsize;
461 break;
462 case 0x1:
463 if (QSsize & 0x1)
464 /* UND. */
465 return 0;
466 info->qualifier = AARCH64_OPND_QLF_S_H;
467 /* Index encoded in "Q:S:size<1>". */
468 info->reglist.index = QSsize >> 1;
469 break;
470 case 0x2:
471 if ((QSsize >> 1) & 0x1)
472 /* UND. */
473 return 0;
474 if ((QSsize & 0x1) == 0)
475 {
476 info->qualifier = AARCH64_OPND_QLF_S_S;
477 /* Index encoded in "Q:S". */
478 info->reglist.index = QSsize >> 2;
479 }
480 else
481 {
482 if (extract_field (FLD_S, code, 0))
483 /* UND */
484 return 0;
485 info->qualifier = AARCH64_OPND_QLF_S_D;
486 /* Index encoded in "Q". */
487 info->reglist.index = QSsize >> 3;
488 }
489 break;
490 default:
491 return 0;
492 }
493
494 info->reglist.has_index = 1;
495 info->reglist.num_regs = 0;
496 /* Number of registers is equal to the number of elements in
497 each structure to be loaded/stored. */
498 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
499 assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
500
501 return 1;
502 }
503
504 /* Decode fields immh:immb and/or Q for e.g.
505 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
506 or SSHR <V><d>, <V><n>, #<shift>. */
507
508 int
509 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
510 aarch64_opnd_info *info, const aarch64_insn code,
511 const aarch64_inst *inst)
512 {
513 int pos;
514 aarch64_insn Q, imm, immh;
515 enum aarch64_insn_class iclass = inst->opcode->iclass;
516
517 immh = extract_field (FLD_immh, code, 0);
518 if (immh == 0)
519 return 0;
520 imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
521 pos = 4;
522 /* Get highest set bit in immh. */
523 while (--pos >= 0 && (immh & 0x8) == 0)
524 immh <<= 1;
525
526 assert ((iclass == asimdshf || iclass == asisdshf)
527 && (info->type == AARCH64_OPND_IMM_VLSR
528 || info->type == AARCH64_OPND_IMM_VLSL));
529
530 if (iclass == asimdshf)
531 {
532 Q = extract_field (FLD_Q, code, 0);
533 /* immh Q <T>
534 0000 x SEE AdvSIMD modified immediate
535 0001 0 8B
536 0001 1 16B
537 001x 0 4H
538 001x 1 8H
539 01xx 0 2S
540 01xx 1 4S
541 1xxx 0 RESERVED
542 1xxx 1 2D */
543 info->qualifier =
544 get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
545 }
546 else
547 info->qualifier = get_sreg_qualifier_from_value (pos);
548
549 if (info->type == AARCH64_OPND_IMM_VLSR)
550 /* immh <shift>
551 0000 SEE AdvSIMD modified immediate
552 0001 (16-UInt(immh:immb))
553 001x (32-UInt(immh:immb))
554 01xx (64-UInt(immh:immb))
555 1xxx (128-UInt(immh:immb)) */
556 info->imm.value = (16 << pos) - imm;
557 else
558 /* immh:immb
559 immh <shift>
560 0000 SEE AdvSIMD modified immediate
561 0001 (UInt(immh:immb)-8)
562 001x (UInt(immh:immb)-16)
563 01xx (UInt(immh:immb)-32)
564 1xxx (UInt(immh:immb)-64) */
565 info->imm.value = imm - (8 << pos);
566
567 return 1;
568 }
569
570 /* Decode shift immediate for e.g. sshr (imm). */
571 int
572 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
573 aarch64_opnd_info *info, const aarch64_insn code,
574 const aarch64_inst *inst ATTRIBUTE_UNUSED)
575 {
576 int64_t imm;
577 aarch64_insn val;
578 val = extract_field (FLD_size, code, 0);
579 switch (val)
580 {
581 case 0: imm = 8; break;
582 case 1: imm = 16; break;
583 case 2: imm = 32; break;
584 default: return 0;
585 }
586 info->imm.value = imm;
587 return 1;
588 }
589
590 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
591 value in the field(s) will be extracted as unsigned immediate value. */
592 int
593 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
594 const aarch64_insn code,
595 const aarch64_inst *inst ATTRIBUTE_UNUSED)
596 {
597 int64_t imm;
598
599 imm = extract_all_fields (self, code);
600
601 if (operand_need_sign_extension (self))
602 imm = sign_extend (imm, get_operand_fields_width (self) - 1);
603
604 if (operand_need_shift_by_two (self))
605 imm <<= 2;
606
607 if (info->type == AARCH64_OPND_ADDR_ADRP)
608 imm <<= 12;
609
610 info->imm.value = imm;
611 return 1;
612 }
613
614 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
615 int
616 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
617 const aarch64_insn code,
618 const aarch64_inst *inst ATTRIBUTE_UNUSED)
619 {
620 aarch64_ext_imm (self, info, code, inst);
621 info->shifter.kind = AARCH64_MOD_LSL;
622 info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
623 return 1;
624 }
625
626 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
627 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
628 int
629 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
630 aarch64_opnd_info *info,
631 const aarch64_insn code,
632 const aarch64_inst *inst ATTRIBUTE_UNUSED)
633 {
634 uint64_t imm;
635 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
636 aarch64_field field = {0, 0};
637
638 assert (info->idx == 1);
639
640 if (info->type == AARCH64_OPND_SIMD_FPIMM)
641 info->imm.is_fp = 1;
642
643 /* a:b:c:d:e:f:g:h */
644 imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
645 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
646 {
647 /* Either MOVI <Dd>, #<imm>
648 or MOVI <Vd>.2D, #<imm>.
649 <imm> is a 64-bit immediate
650 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
651 encoded in "a:b:c:d:e:f:g:h". */
652 int i;
653 unsigned abcdefgh = imm;
654 for (imm = 0ull, i = 0; i < 8; i++)
655 if (((abcdefgh >> i) & 0x1) != 0)
656 imm |= 0xffull << (8 * i);
657 }
658 info->imm.value = imm;
659
660 /* cmode */
661 info->qualifier = get_expected_qualifier (inst, info->idx);
662 switch (info->qualifier)
663 {
664 case AARCH64_OPND_QLF_NIL:
665 /* no shift */
666 info->shifter.kind = AARCH64_MOD_NONE;
667 return 1;
668 case AARCH64_OPND_QLF_LSL:
669 /* shift zeros */
670 info->shifter.kind = AARCH64_MOD_LSL;
671 switch (aarch64_get_qualifier_esize (opnd0_qualifier))
672 {
673 case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break; /* per word */
674 case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break; /* per half */
675 case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break; /* per byte */
676 default: assert (0); return 0;
677 }
678 /* 00: 0; 01: 8; 10:16; 11:24. */
679 info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
680 break;
681 case AARCH64_OPND_QLF_MSL:
682 /* shift ones */
683 info->shifter.kind = AARCH64_MOD_MSL;
684 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
685 info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
686 break;
687 default:
688 assert (0);
689 return 0;
690 }
691
692 return 1;
693 }
694
695 /* Decode an 8-bit floating-point immediate. */
696 int
697 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
698 const aarch64_insn code,
699 const aarch64_inst *inst ATTRIBUTE_UNUSED)
700 {
701 info->imm.value = extract_all_fields (self, code);
702 info->imm.is_fp = 1;
703 return 1;
704 }
705
706 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
707 int
708 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
709 aarch64_opnd_info *info, const aarch64_insn code,
710 const aarch64_inst *inst ATTRIBUTE_UNUSED)
711 {
712 info->imm.value = 64- extract_field (FLD_scale, code, 0);
713 return 1;
714 }
715
716 /* Decode arithmetic immediate for e.g.
717 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
718 int
719 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
720 aarch64_opnd_info *info, const aarch64_insn code,
721 const aarch64_inst *inst ATTRIBUTE_UNUSED)
722 {
723 aarch64_insn value;
724
725 info->shifter.kind = AARCH64_MOD_LSL;
726 /* shift */
727 value = extract_field (FLD_shift, code, 0);
728 if (value >= 2)
729 return 0;
730 info->shifter.amount = value ? 12 : 0;
731 /* imm12 (unsigned) */
732 info->imm.value = extract_field (FLD_imm12, code, 0);
733
734 return 1;
735 }
736
737 /* Decode logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>. */
738
739 int
740 aarch64_ext_limm (const aarch64_operand *self ATTRIBUTE_UNUSED,
741 aarch64_opnd_info *info, const aarch64_insn code,
742 const aarch64_inst *inst ATTRIBUTE_UNUSED)
743 {
744 uint64_t imm, mask;
745 uint32_t sf;
746 uint32_t N, R, S;
747 unsigned simd_size;
748 aarch64_insn value;
749
750 value = extract_fields (code, 0, 3, FLD_N, FLD_immr, FLD_imms);
751 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_W
752 || inst->operands[0].qualifier == AARCH64_OPND_QLF_X);
753 sf = aarch64_get_qualifier_esize (inst->operands[0].qualifier) != 4;
754
755 /* value is N:immr:imms. */
756 S = value & 0x3f;
757 R = (value >> 6) & 0x3f;
758 N = (value >> 12) & 0x1;
759
760 if (sf == 0 && N == 1)
761 return 0;
762
763 /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
764 (in other words, right rotated by R), then replicated. */
765 if (N != 0)
766 {
767 simd_size = 64;
768 mask = 0xffffffffffffffffull;
769 }
770 else
771 {
772 switch (S)
773 {
774 case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32; break;
775 case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
776 case 0x30 ... 0x37: /* 110xxx */ simd_size = 8; S &= 0x7; break;
777 case 0x38 ... 0x3b: /* 1110xx */ simd_size = 4; S &= 0x3; break;
778 case 0x3c ... 0x3d: /* 11110x */ simd_size = 2; S &= 0x1; break;
779 default: return 0;
780 }
781 mask = (1ull << simd_size) - 1;
782 /* Top bits are IGNORED. */
783 R &= simd_size - 1;
784 }
785 /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected. */
786 if (S == simd_size - 1)
787 return 0;
788 /* S+1 consecutive bits to 1. */
789 /* NOTE: S can't be 63 due to detection above. */
790 imm = (1ull << (S + 1)) - 1;
791 /* Rotate to the left by simd_size - R. */
792 if (R != 0)
793 imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
794 /* Replicate the value according to SIMD size. */
795 switch (simd_size)
796 {
797 case 2: imm = (imm << 2) | imm;
798 case 4: imm = (imm << 4) | imm;
799 case 8: imm = (imm << 8) | imm;
800 case 16: imm = (imm << 16) | imm;
801 case 32: imm = (imm << 32) | imm;
802 case 64: break;
803 default: assert (0); return 0;
804 }
805
806 info->imm.value = sf ? imm : imm & 0xffffffff;
807
808 return 1;
809 }
810
811 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
812 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
813 int
814 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
815 aarch64_opnd_info *info,
816 const aarch64_insn code, const aarch64_inst *inst)
817 {
818 aarch64_insn value;
819
820 /* Rt */
821 info->reg.regno = extract_field (FLD_Rt, code, 0);
822
823 /* size */
824 value = extract_field (FLD_ldst_size, code, 0);
825 if (inst->opcode->iclass == ldstpair_indexed
826 || inst->opcode->iclass == ldstnapair_offs
827 || inst->opcode->iclass == ldstpair_off
828 || inst->opcode->iclass == loadlit)
829 {
830 enum aarch64_opnd_qualifier qualifier;
831 switch (value)
832 {
833 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
834 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
835 case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
836 default: return 0;
837 }
838 info->qualifier = qualifier;
839 }
840 else
841 {
842 /* opc1:size */
843 value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
844 if (value > 0x4)
845 return 0;
846 info->qualifier = get_sreg_qualifier_from_value (value);
847 }
848
849 return 1;
850 }
851
852 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
853 int
854 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
855 aarch64_opnd_info *info,
856 aarch64_insn code,
857 const aarch64_inst *inst ATTRIBUTE_UNUSED)
858 {
859 /* Rn */
860 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
861 return 1;
862 }
863
864 /* Decode the address operand for e.g.
865 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
866 int
867 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
868 aarch64_opnd_info *info,
869 aarch64_insn code, const aarch64_inst *inst)
870 {
871 aarch64_insn S, value;
872
873 /* Rn */
874 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
875 /* Rm */
876 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
877 /* option */
878 value = extract_field (FLD_option, code, 0);
879 info->shifter.kind =
880 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
881 /* Fix-up the shifter kind; although the table-driven approach is
882 efficient, it is slightly inflexible, thus needing this fix-up. */
883 if (info->shifter.kind == AARCH64_MOD_UXTX)
884 info->shifter.kind = AARCH64_MOD_LSL;
885 /* S */
886 S = extract_field (FLD_S, code, 0);
887 if (S == 0)
888 {
889 info->shifter.amount = 0;
890 info->shifter.amount_present = 0;
891 }
892 else
893 {
894 int size;
895 /* Need information in other operand(s) to help achieve the decoding
896 from 'S' field. */
897 info->qualifier = get_expected_qualifier (inst, info->idx);
898 /* Get the size of the data element that is accessed, which may be
899 different from that of the source register size, e.g. in strb/ldrb. */
900 size = aarch64_get_qualifier_esize (info->qualifier);
901 info->shifter.amount = get_logsz (size);
902 info->shifter.amount_present = 1;
903 }
904
905 return 1;
906 }
907
908 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>. */
909 int
910 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
911 aarch64_insn code, const aarch64_inst *inst)
912 {
913 aarch64_insn imm;
914 info->qualifier = get_expected_qualifier (inst, info->idx);
915
916 /* Rn */
917 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
918 /* simm (imm9 or imm7) */
919 imm = extract_field (self->fields[0], code, 0);
920 info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
921 if (self->fields[0] == FLD_imm7)
922 /* scaled immediate in ld/st pair instructions. */
923 info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
924 /* qualifier */
925 if (inst->opcode->iclass == ldst_unscaled
926 || inst->opcode->iclass == ldstnapair_offs
927 || inst->opcode->iclass == ldstpair_off
928 || inst->opcode->iclass == ldst_unpriv)
929 info->addr.writeback = 0;
930 else
931 {
932 /* pre/post- index */
933 info->addr.writeback = 1;
934 if (extract_field (self->fields[1], code, 0) == 1)
935 info->addr.preind = 1;
936 else
937 info->addr.postind = 1;
938 }
939
940 return 1;
941 }
942
943 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}]. */
944 int
945 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
946 aarch64_insn code,
947 const aarch64_inst *inst ATTRIBUTE_UNUSED)
948 {
949 int shift;
950 info->qualifier = get_expected_qualifier (inst, info->idx);
951 shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
952 /* Rn */
953 info->addr.base_regno = extract_field (self->fields[0], code, 0);
954 /* uimm12 */
955 info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
956 return 1;
957 }
958
959 /* Decode the address operand for e.g.
960 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
961 int
962 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
963 aarch64_opnd_info *info,
964 aarch64_insn code, const aarch64_inst *inst)
965 {
966 /* The opcode dependent area stores the number of elements in
967 each structure to be loaded/stored. */
968 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
969
970 /* Rn */
971 info->addr.base_regno = extract_field (FLD_Rn, code, 0);
972 /* Rm | #<amount> */
973 info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
974 if (info->addr.offset.regno == 31)
975 {
976 if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
977 /* Special handling of loading single structure to all lane. */
978 info->addr.offset.imm = (is_ld1r ? 1
979 : inst->operands[0].reglist.num_regs)
980 * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
981 else
982 info->addr.offset.imm = inst->operands[0].reglist.num_regs
983 * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
984 * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
985 }
986 else
987 info->addr.offset.is_reg = 1;
988 info->addr.writeback = 1;
989
990 return 1;
991 }
992
993 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
994 int
995 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
996 aarch64_opnd_info *info,
997 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
998 {
999 aarch64_insn value;
1000 /* cond */
1001 value = extract_field (FLD_cond, code, 0);
1002 info->cond = get_cond_from_value (value);
1003 return 1;
1004 }
1005
1006 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>. */
1007 int
1008 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1009 aarch64_opnd_info *info,
1010 aarch64_insn code,
1011 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1012 {
1013 /* op0:op1:CRn:CRm:op2 */
1014 info->sysreg = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1015 FLD_CRm, FLD_op2);
1016 return 1;
1017 }
1018
1019 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
1020 int
1021 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1022 aarch64_opnd_info *info, aarch64_insn code,
1023 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1024 {
1025 int i;
1026 /* op1:op2 */
1027 info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1028 for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1029 if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1030 return 1;
1031 /* Reserved value in <pstatefield>. */
1032 return 0;
1033 }
1034
1035 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
1036 int
1037 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1038 aarch64_opnd_info *info,
1039 aarch64_insn code,
1040 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1041 {
1042 int i;
1043 aarch64_insn value;
1044 const aarch64_sys_ins_reg *sysins_ops;
1045 /* op0:op1:CRn:CRm:op2 */
1046 value = extract_fields (code, 0, 5,
1047 FLD_op0, FLD_op1, FLD_CRn,
1048 FLD_CRm, FLD_op2);
1049
1050 switch (info->type)
1051 {
1052 case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1053 case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1054 case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1055 case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1056 default: assert (0); return 0;
1057 }
1058
1059 for (i = 0; sysins_ops[i].name != NULL; ++i)
1060 if (sysins_ops[i].value == value)
1061 {
1062 info->sysins_op = sysins_ops + i;
1063 DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1064 info->sysins_op->name,
1065 (unsigned)info->sysins_op->value,
1066 aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1067 return 1;
1068 }
1069
1070 return 0;
1071 }
1072
1073 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
1074
1075 int
1076 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1077 aarch64_opnd_info *info,
1078 aarch64_insn code,
1079 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1080 {
1081 /* CRm */
1082 info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1083 return 1;
1084 }
1085
1086 /* Decode the prefetch operation option operand for e.g.
1087 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
1088
1089 int
1090 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1091 aarch64_opnd_info *info,
1092 aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
1093 {
1094 /* prfop in Rt */
1095 info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1096 return 1;
1097 }
1098
1099 /* Decode the hint number for an alias taking an operand. Set info->hint_option
1100 to the matching name/value pair in aarch64_hint_options. */
1101
1102 int
1103 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1104 aarch64_opnd_info *info,
1105 aarch64_insn code,
1106 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1107 {
1108 /* CRm:op2. */
1109 unsigned hint_number;
1110 int i;
1111
1112 hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1113
1114 for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1115 {
1116 if (hint_number == aarch64_hint_options[i].value)
1117 {
1118 info->hint_option = &(aarch64_hint_options[i]);
1119 return 1;
1120 }
1121 }
1122
1123 return 0;
1124 }
1125
1126 /* Decode the extended register operand for e.g.
1127 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1128 int
1129 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1130 aarch64_opnd_info *info,
1131 aarch64_insn code,
1132 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1133 {
1134 aarch64_insn value;
1135
1136 /* Rm */
1137 info->reg.regno = extract_field (FLD_Rm, code, 0);
1138 /* option */
1139 value = extract_field (FLD_option, code, 0);
1140 info->shifter.kind =
1141 aarch64_get_operand_modifier_from_value (value, TRUE /* extend_p */);
1142 /* imm3 */
1143 info->shifter.amount = extract_field (FLD_imm3, code, 0);
1144
1145 /* This makes the constraint checking happy. */
1146 info->shifter.operator_present = 1;
1147
1148 /* Assume inst->operands[0].qualifier has been resolved. */
1149 assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1150 info->qualifier = AARCH64_OPND_QLF_W;
1151 if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1152 && (info->shifter.kind == AARCH64_MOD_UXTX
1153 || info->shifter.kind == AARCH64_MOD_SXTX))
1154 info->qualifier = AARCH64_OPND_QLF_X;
1155
1156 return 1;
1157 }
1158
1159 /* Decode the shifted register operand for e.g.
1160 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1161 int
1162 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1163 aarch64_opnd_info *info,
1164 aarch64_insn code,
1165 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1166 {
1167 aarch64_insn value;
1168
1169 /* Rm */
1170 info->reg.regno = extract_field (FLD_Rm, code, 0);
1171 /* shift */
1172 value = extract_field (FLD_shift, code, 0);
1173 info->shifter.kind =
1174 aarch64_get_operand_modifier_from_value (value, FALSE /* extend_p */);
1175 if (info->shifter.kind == AARCH64_MOD_ROR
1176 && inst->opcode->iclass != log_shift)
1177 /* ROR is not available for the shifted register operand in arithmetic
1178 instructions. */
1179 return 0;
1180 /* imm6 */
1181 info->shifter.amount = extract_field (FLD_imm6, code, 0);
1182
1183 /* This makes the constraint checking happy. */
1184 info->shifter.operator_present = 1;
1185
1186 return 1;
1187 }
1188
1189 /* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1190 array specifies which field to use for Zn. MM is encoded in the
1191 concatenation of imm5 and SVE_tszh, with imm5 being the less
1192 significant part. */
1193 int
1194 aarch64_ext_sve_index (const aarch64_operand *self,
1195 aarch64_opnd_info *info, aarch64_insn code,
1196 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1197 {
1198 int val;
1199
1200 info->reglane.regno = extract_field (self->fields[0], code, 0);
1201 val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1202 if ((val & 15) == 0)
1203 return 0;
1204 while ((val & 1) == 0)
1205 val /= 2;
1206 info->reglane.index = val / 2;
1207 return 1;
1208 }
1209
1210 /* Decode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1211 to use for Zn. The opcode-dependent value specifies the number
1212 of registers in the list. */
1213 int
1214 aarch64_ext_sve_reglist (const aarch64_operand *self,
1215 aarch64_opnd_info *info, aarch64_insn code,
1216 const aarch64_inst *inst ATTRIBUTE_UNUSED)
1217 {
1218 info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1219 info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1220 return 1;
1221 }
1222 \f
1223 /* Bitfields that are commonly used to encode certain operands' information
1224 may be partially used as part of the base opcode in some instructions.
1225 For example, the bit 1 of the field 'size' in
1226 FCVTXN <Vb><d>, <Va><n>
1227 is actually part of the base opcode, while only size<0> is available
1228 for encoding the register type. Another example is the AdvSIMD
1229 instruction ORR (register), in which the field 'size' is also used for
1230 the base opcode, leaving only the field 'Q' available to encode the
1231 vector register arrangement specifier '8B' or '16B'.
1232
1233 This function tries to deduce the qualifier from the value of partially
1234 constrained field(s). Given the VALUE of such a field or fields, the
1235 qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1236 operand encoding), the function returns the matching qualifier or
1237 AARCH64_OPND_QLF_NIL if nothing matches.
1238
1239 N.B. CANDIDATES is a group of possible qualifiers that are valid for
1240 one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1241 may end with AARCH64_OPND_QLF_NIL. */
1242
1243 static enum aarch64_opnd_qualifier
1244 get_qualifier_from_partial_encoding (aarch64_insn value,
1245 const enum aarch64_opnd_qualifier* \
1246 candidates,
1247 aarch64_insn mask)
1248 {
1249 int i;
1250 DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1251 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1252 {
1253 aarch64_insn standard_value;
1254 if (candidates[i] == AARCH64_OPND_QLF_NIL)
1255 break;
1256 standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1257 if ((standard_value & mask) == (value & mask))
1258 return candidates[i];
1259 }
1260 return AARCH64_OPND_QLF_NIL;
1261 }
1262
1263 /* Given a list of qualifier sequences, return all possible valid qualifiers
1264 for operand IDX in QUALIFIERS.
1265 Assume QUALIFIERS is an array whose length is large enough. */
1266
1267 static void
1268 get_operand_possible_qualifiers (int idx,
1269 const aarch64_opnd_qualifier_seq_t *list,
1270 enum aarch64_opnd_qualifier *qualifiers)
1271 {
1272 int i;
1273 for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1274 if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1275 break;
1276 }
1277
1278 /* Decode the size Q field for e.g. SHADD.
1279 We tag one operand with the qualifer according to the code;
1280 whether the qualifier is valid for this opcode or not, it is the
1281 duty of the semantic checking. */
1282
1283 static int
1284 decode_sizeq (aarch64_inst *inst)
1285 {
1286 int idx;
1287 enum aarch64_opnd_qualifier qualifier;
1288 aarch64_insn code;
1289 aarch64_insn value, mask;
1290 enum aarch64_field_kind fld_sz;
1291 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1292
1293 if (inst->opcode->iclass == asisdlse
1294 || inst->opcode->iclass == asisdlsep
1295 || inst->opcode->iclass == asisdlso
1296 || inst->opcode->iclass == asisdlsop)
1297 fld_sz = FLD_vldst_size;
1298 else
1299 fld_sz = FLD_size;
1300
1301 code = inst->value;
1302 value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1303 /* Obtain the info that which bits of fields Q and size are actually
1304 available for operand encoding. Opcodes like FMAXNM and FMLA have
1305 size[1] unavailable. */
1306 mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1307
1308 /* The index of the operand we are going to tag a qualifier and the qualifer
1309 itself are reasoned from the value of the size and Q fields and the
1310 possible valid qualifier lists. */
1311 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1312 DEBUG_TRACE ("key idx: %d", idx);
1313
1314 /* For most related instruciton, size:Q are fully available for operand
1315 encoding. */
1316 if (mask == 0x7)
1317 {
1318 inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1319 return 1;
1320 }
1321
1322 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1323 candidates);
1324 #ifdef DEBUG_AARCH64
1325 if (debug_dump)
1326 {
1327 int i;
1328 for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1329 && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1330 DEBUG_TRACE ("qualifier %d: %s", i,
1331 aarch64_get_qualifier_name(candidates[i]));
1332 DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1333 }
1334 #endif /* DEBUG_AARCH64 */
1335
1336 qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1337
1338 if (qualifier == AARCH64_OPND_QLF_NIL)
1339 return 0;
1340
1341 inst->operands[idx].qualifier = qualifier;
1342 return 1;
1343 }
1344
1345 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1346 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1347
1348 static int
1349 decode_asimd_fcvt (aarch64_inst *inst)
1350 {
1351 aarch64_field field = {0, 0};
1352 aarch64_insn value;
1353 enum aarch64_opnd_qualifier qualifier;
1354
1355 gen_sub_field (FLD_size, 0, 1, &field);
1356 value = extract_field_2 (&field, inst->value, 0);
1357 qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
1358 : AARCH64_OPND_QLF_V_2D;
1359 switch (inst->opcode->op)
1360 {
1361 case OP_FCVTN:
1362 case OP_FCVTN2:
1363 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1364 inst->operands[1].qualifier = qualifier;
1365 break;
1366 case OP_FCVTL:
1367 case OP_FCVTL2:
1368 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1369 inst->operands[0].qualifier = qualifier;
1370 break;
1371 default:
1372 assert (0);
1373 return 0;
1374 }
1375
1376 return 1;
1377 }
1378
1379 /* Decode size[0], i.e. bit 22, for
1380 e.g. FCVTXN <Vb><d>, <Va><n>. */
1381
1382 static int
1383 decode_asisd_fcvtxn (aarch64_inst *inst)
1384 {
1385 aarch64_field field = {0, 0};
1386 gen_sub_field (FLD_size, 0, 1, &field);
1387 if (!extract_field_2 (&field, inst->value, 0))
1388 return 0;
1389 inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
1390 return 1;
1391 }
1392
1393 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1394 static int
1395 decode_fcvt (aarch64_inst *inst)
1396 {
1397 enum aarch64_opnd_qualifier qualifier;
1398 aarch64_insn value;
1399 const aarch64_field field = {15, 2};
1400
1401 /* opc dstsize */
1402 value = extract_field_2 (&field, inst->value, 0);
1403 switch (value)
1404 {
1405 case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
1406 case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
1407 case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
1408 default: return 0;
1409 }
1410 inst->operands[0].qualifier = qualifier;
1411
1412 return 1;
1413 }
1414
1415 /* Do miscellaneous decodings that are not common enough to be driven by
1416 flags. */
1417
1418 static int
1419 do_misc_decoding (aarch64_inst *inst)
1420 {
1421 switch (inst->opcode->op)
1422 {
1423 case OP_FCVT:
1424 return decode_fcvt (inst);
1425 case OP_FCVTN:
1426 case OP_FCVTN2:
1427 case OP_FCVTL:
1428 case OP_FCVTL2:
1429 return decode_asimd_fcvt (inst);
1430 case OP_FCVTXN_S:
1431 return decode_asisd_fcvtxn (inst);
1432 default:
1433 return 0;
1434 }
1435 }
1436
1437 /* Opcodes that have fields shared by multiple operands are usually flagged
1438 with flags. In this function, we detect such flags, decode the related
1439 field(s) and store the information in one of the related operands. The
1440 'one' operand is not any operand but one of the operands that can
1441 accommadate all the information that has been decoded. */
1442
1443 static int
1444 do_special_decoding (aarch64_inst *inst)
1445 {
1446 int idx;
1447 aarch64_insn value;
1448 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1449 if (inst->opcode->flags & F_COND)
1450 {
1451 value = extract_field (FLD_cond2, inst->value, 0);
1452 inst->cond = get_cond_from_value (value);
1453 }
1454 /* 'sf' field. */
1455 if (inst->opcode->flags & F_SF)
1456 {
1457 idx = select_operand_for_sf_field_coding (inst->opcode);
1458 value = extract_field (FLD_sf, inst->value, 0);
1459 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1460 if ((inst->opcode->flags & F_N)
1461 && extract_field (FLD_N, inst->value, 0) != value)
1462 return 0;
1463 }
1464 /* 'sf' field. */
1465 if (inst->opcode->flags & F_LSE_SZ)
1466 {
1467 idx = select_operand_for_sf_field_coding (inst->opcode);
1468 value = extract_field (FLD_lse_sz, inst->value, 0);
1469 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1470 }
1471 /* size:Q fields. */
1472 if (inst->opcode->flags & F_SIZEQ)
1473 return decode_sizeq (inst);
1474
1475 if (inst->opcode->flags & F_FPTYPE)
1476 {
1477 idx = select_operand_for_fptype_field_coding (inst->opcode);
1478 value = extract_field (FLD_type, inst->value, 0);
1479 switch (value)
1480 {
1481 case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
1482 case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
1483 case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
1484 default: return 0;
1485 }
1486 }
1487
1488 if (inst->opcode->flags & F_SSIZE)
1489 {
1490 /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
1491 of the base opcode. */
1492 aarch64_insn mask;
1493 enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1494 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1495 value = extract_field (FLD_size, inst->value, inst->opcode->mask);
1496 mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
1497 /* For most related instruciton, the 'size' field is fully available for
1498 operand encoding. */
1499 if (mask == 0x3)
1500 inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
1501 else
1502 {
1503 get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1504 candidates);
1505 inst->operands[idx].qualifier
1506 = get_qualifier_from_partial_encoding (value, candidates, mask);
1507 }
1508 }
1509
1510 if (inst->opcode->flags & F_T)
1511 {
1512 /* Num of consecutive '0's on the right side of imm5<3:0>. */
1513 int num = 0;
1514 unsigned val, Q;
1515 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1516 == AARCH64_OPND_CLASS_SIMD_REG);
1517 /* imm5<3:0> q <t>
1518 0000 x reserved
1519 xxx1 0 8b
1520 xxx1 1 16b
1521 xx10 0 4h
1522 xx10 1 8h
1523 x100 0 2s
1524 x100 1 4s
1525 1000 0 reserved
1526 1000 1 2d */
1527 val = extract_field (FLD_imm5, inst->value, 0);
1528 while ((val & 0x1) == 0 && ++num <= 3)
1529 val >>= 1;
1530 if (num > 3)
1531 return 0;
1532 Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
1533 inst->operands[0].qualifier =
1534 get_vreg_qualifier_from_value ((num << 1) | Q);
1535 }
1536
1537 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1538 {
1539 /* Use Rt to encode in the case of e.g.
1540 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1541 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1542 if (idx == -1)
1543 {
1544 /* Otherwise use the result operand, which has to be a integer
1545 register. */
1546 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1547 == AARCH64_OPND_CLASS_INT_REG);
1548 idx = 0;
1549 }
1550 assert (idx == 0 || idx == 1);
1551 value = extract_field (FLD_Q, inst->value, 0);
1552 inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
1553 }
1554
1555 if (inst->opcode->flags & F_LDS_SIZE)
1556 {
1557 aarch64_field field = {0, 0};
1558 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1559 == AARCH64_OPND_CLASS_INT_REG);
1560 gen_sub_field (FLD_opc, 0, 1, &field);
1561 value = extract_field_2 (&field, inst->value, 0);
1562 inst->operands[0].qualifier
1563 = value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
1564 }
1565
1566 /* Miscellaneous decoding; done as the last step. */
1567 if (inst->opcode->flags & F_MISC)
1568 return do_misc_decoding (inst);
1569
1570 return 1;
1571 }
1572
1573 /* Converters converting a real opcode instruction to its alias form. */
1574
1575 /* ROR <Wd>, <Ws>, #<shift>
1576 is equivalent to:
1577 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1578 static int
1579 convert_extr_to_ror (aarch64_inst *inst)
1580 {
1581 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1582 {
1583 copy_operand_info (inst, 2, 3);
1584 inst->operands[3].type = AARCH64_OPND_NIL;
1585 return 1;
1586 }
1587 return 0;
1588 }
1589
1590 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1591 is equivalent to:
1592 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1593 static int
1594 convert_shll_to_xtl (aarch64_inst *inst)
1595 {
1596 if (inst->operands[2].imm.value == 0)
1597 {
1598 inst->operands[2].type = AARCH64_OPND_NIL;
1599 return 1;
1600 }
1601 return 0;
1602 }
1603
1604 /* Convert
1605 UBFM <Xd>, <Xn>, #<shift>, #63.
1606 to
1607 LSR <Xd>, <Xn>, #<shift>. */
1608 static int
1609 convert_bfm_to_sr (aarch64_inst *inst)
1610 {
1611 int64_t imms, val;
1612
1613 imms = inst->operands[3].imm.value;
1614 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1615 if (imms == val)
1616 {
1617 inst->operands[3].type = AARCH64_OPND_NIL;
1618 return 1;
1619 }
1620
1621 return 0;
1622 }
1623
1624 /* Convert MOV to ORR. */
1625 static int
1626 convert_orr_to_mov (aarch64_inst *inst)
1627 {
1628 /* MOV <Vd>.<T>, <Vn>.<T>
1629 is equivalent to:
1630 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1631 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
1632 {
1633 inst->operands[2].type = AARCH64_OPND_NIL;
1634 return 1;
1635 }
1636 return 0;
1637 }
1638
1639 /* When <imms> >= <immr>, the instruction written:
1640 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1641 is equivalent to:
1642 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1643
1644 static int
1645 convert_bfm_to_bfx (aarch64_inst *inst)
1646 {
1647 int64_t immr, imms;
1648
1649 immr = inst->operands[2].imm.value;
1650 imms = inst->operands[3].imm.value;
1651 if (imms >= immr)
1652 {
1653 int64_t lsb = immr;
1654 inst->operands[2].imm.value = lsb;
1655 inst->operands[3].imm.value = imms + 1 - lsb;
1656 /* The two opcodes have different qualifiers for
1657 the immediate operands; reset to help the checking. */
1658 reset_operand_qualifier (inst, 2);
1659 reset_operand_qualifier (inst, 3);
1660 return 1;
1661 }
1662
1663 return 0;
1664 }
1665
1666 /* When <imms> < <immr>, the instruction written:
1667 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1668 is equivalent to:
1669 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1670
1671 static int
1672 convert_bfm_to_bfi (aarch64_inst *inst)
1673 {
1674 int64_t immr, imms, val;
1675
1676 immr = inst->operands[2].imm.value;
1677 imms = inst->operands[3].imm.value;
1678 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1679 if (imms < immr)
1680 {
1681 inst->operands[2].imm.value = (val - immr) & (val - 1);
1682 inst->operands[3].imm.value = imms + 1;
1683 /* The two opcodes have different qualifiers for
1684 the immediate operands; reset to help the checking. */
1685 reset_operand_qualifier (inst, 2);
1686 reset_operand_qualifier (inst, 3);
1687 return 1;
1688 }
1689
1690 return 0;
1691 }
1692
1693 /* The instruction written:
1694 BFC <Xd>, #<lsb>, #<width>
1695 is equivalent to:
1696 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1697
1698 static int
1699 convert_bfm_to_bfc (aarch64_inst *inst)
1700 {
1701 int64_t immr, imms, val;
1702
1703 /* Should have been assured by the base opcode value. */
1704 assert (inst->operands[1].reg.regno == 0x1f);
1705
1706 immr = inst->operands[2].imm.value;
1707 imms = inst->operands[3].imm.value;
1708 val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
1709 if (imms < immr)
1710 {
1711 /* Drop XZR from the second operand. */
1712 copy_operand_info (inst, 1, 2);
1713 copy_operand_info (inst, 2, 3);
1714 inst->operands[3].type = AARCH64_OPND_NIL;
1715
1716 /* Recalculate the immediates. */
1717 inst->operands[1].imm.value = (val - immr) & (val - 1);
1718 inst->operands[2].imm.value = imms + 1;
1719
1720 /* The two opcodes have different qualifiers for the operands; reset to
1721 help the checking. */
1722 reset_operand_qualifier (inst, 1);
1723 reset_operand_qualifier (inst, 2);
1724 reset_operand_qualifier (inst, 3);
1725
1726 return 1;
1727 }
1728
1729 return 0;
1730 }
1731
1732 /* The instruction written:
1733 LSL <Xd>, <Xn>, #<shift>
1734 is equivalent to:
1735 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1736
1737 static int
1738 convert_ubfm_to_lsl (aarch64_inst *inst)
1739 {
1740 int64_t immr = inst->operands[2].imm.value;
1741 int64_t imms = inst->operands[3].imm.value;
1742 int64_t val
1743 = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1744
1745 if ((immr == 0 && imms == val) || immr == imms + 1)
1746 {
1747 inst->operands[3].type = AARCH64_OPND_NIL;
1748 inst->operands[2].imm.value = val - imms;
1749 return 1;
1750 }
1751
1752 return 0;
1753 }
1754
1755 /* CINC <Wd>, <Wn>, <cond>
1756 is equivalent to:
1757 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
1758 where <cond> is not AL or NV. */
1759
1760 static int
1761 convert_from_csel (aarch64_inst *inst)
1762 {
1763 if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
1764 && (inst->operands[3].cond->value & 0xe) != 0xe)
1765 {
1766 copy_operand_info (inst, 2, 3);
1767 inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
1768 inst->operands[3].type = AARCH64_OPND_NIL;
1769 return 1;
1770 }
1771 return 0;
1772 }
1773
1774 /* CSET <Wd>, <cond>
1775 is equivalent to:
1776 CSINC <Wd>, WZR, WZR, invert(<cond>)
1777 where <cond> is not AL or NV. */
1778
1779 static int
1780 convert_csinc_to_cset (aarch64_inst *inst)
1781 {
1782 if (inst->operands[1].reg.regno == 0x1f
1783 && inst->operands[2].reg.regno == 0x1f
1784 && (inst->operands[3].cond->value & 0xe) != 0xe)
1785 {
1786 copy_operand_info (inst, 1, 3);
1787 inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
1788 inst->operands[3].type = AARCH64_OPND_NIL;
1789 inst->operands[2].type = AARCH64_OPND_NIL;
1790 return 1;
1791 }
1792 return 0;
1793 }
1794
1795 /* MOV <Wd>, #<imm>
1796 is equivalent to:
1797 MOVZ <Wd>, #<imm16>, LSL #<shift>.
1798
1799 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1800 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1801 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1802 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1803 machine-instruction mnemonic must be used. */
1804
1805 static int
1806 convert_movewide_to_mov (aarch64_inst *inst)
1807 {
1808 uint64_t value = inst->operands[1].imm.value;
1809 /* MOVZ/MOVN #0 have a shift amount other than LSL #0. */
1810 if (value == 0 && inst->operands[1].shifter.amount != 0)
1811 return 0;
1812 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1813 inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
1814 value <<= inst->operands[1].shifter.amount;
1815 /* As an alias convertor, it has to be clear that the INST->OPCODE
1816 is the opcode of the real instruction. */
1817 if (inst->opcode->op == OP_MOVN)
1818 {
1819 int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1820 value = ~value;
1821 /* A MOVN has an immediate that could be encoded by MOVZ. */
1822 if (aarch64_wide_constant_p (value, is32, NULL) == TRUE)
1823 return 0;
1824 }
1825 inst->operands[1].imm.value = value;
1826 inst->operands[1].shifter.amount = 0;
1827 return 1;
1828 }
1829
1830 /* MOV <Wd>, #<imm>
1831 is equivalent to:
1832 ORR <Wd>, WZR, #<imm>.
1833
1834 A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
1835 ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
1836 or where a MOVN has an immediate that could be encoded by MOVZ, or where
1837 MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
1838 machine-instruction mnemonic must be used. */
1839
1840 static int
1841 convert_movebitmask_to_mov (aarch64_inst *inst)
1842 {
1843 int is32;
1844 uint64_t value;
1845
1846 /* Should have been assured by the base opcode value. */
1847 assert (inst->operands[1].reg.regno == 0x1f);
1848 copy_operand_info (inst, 1, 2);
1849 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1850 inst->operands[1].type = AARCH64_OPND_IMM_MOV;
1851 value = inst->operands[1].imm.value;
1852 /* ORR has an immediate that could be generated by a MOVZ or MOVN
1853 instruction. */
1854 if (inst->operands[0].reg.regno != 0x1f
1855 && (aarch64_wide_constant_p (value, is32, NULL) == TRUE
1856 || aarch64_wide_constant_p (~value, is32, NULL) == TRUE))
1857 return 0;
1858
1859 inst->operands[2].type = AARCH64_OPND_NIL;
1860 return 1;
1861 }
1862
1863 /* Some alias opcodes are disassembled by being converted from their real-form.
1864 N.B. INST->OPCODE is the real opcode rather than the alias. */
1865
1866 static int
1867 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
1868 {
1869 switch (alias->op)
1870 {
1871 case OP_ASR_IMM:
1872 case OP_LSR_IMM:
1873 return convert_bfm_to_sr (inst);
1874 case OP_LSL_IMM:
1875 return convert_ubfm_to_lsl (inst);
1876 case OP_CINC:
1877 case OP_CINV:
1878 case OP_CNEG:
1879 return convert_from_csel (inst);
1880 case OP_CSET:
1881 case OP_CSETM:
1882 return convert_csinc_to_cset (inst);
1883 case OP_UBFX:
1884 case OP_BFXIL:
1885 case OP_SBFX:
1886 return convert_bfm_to_bfx (inst);
1887 case OP_SBFIZ:
1888 case OP_BFI:
1889 case OP_UBFIZ:
1890 return convert_bfm_to_bfi (inst);
1891 case OP_BFC:
1892 return convert_bfm_to_bfc (inst);
1893 case OP_MOV_V:
1894 return convert_orr_to_mov (inst);
1895 case OP_MOV_IMM_WIDE:
1896 case OP_MOV_IMM_WIDEN:
1897 return convert_movewide_to_mov (inst);
1898 case OP_MOV_IMM_LOG:
1899 return convert_movebitmask_to_mov (inst);
1900 case OP_ROR_IMM:
1901 return convert_extr_to_ror (inst);
1902 case OP_SXTL:
1903 case OP_SXTL2:
1904 case OP_UXTL:
1905 case OP_UXTL2:
1906 return convert_shll_to_xtl (inst);
1907 default:
1908 return 0;
1909 }
1910 }
1911
1912 static int aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
1913 aarch64_inst *, int);
1914
1915 /* Given the instruction information in *INST, check if the instruction has
1916 any alias form that can be used to represent *INST. If the answer is yes,
1917 update *INST to be in the form of the determined alias. */
1918
1919 /* In the opcode description table, the following flags are used in opcode
1920 entries to help establish the relations between the real and alias opcodes:
1921
1922 F_ALIAS: opcode is an alias
1923 F_HAS_ALIAS: opcode has alias(es)
1924 F_P1
1925 F_P2
1926 F_P3: Disassembly preference priority 1-3 (the larger the
1927 higher). If nothing is specified, it is the priority
1928 0 by default, i.e. the lowest priority.
1929
1930 Although the relation between the machine and the alias instructions are not
1931 explicitly described, it can be easily determined from the base opcode
1932 values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
1933 description entries:
1934
1935 The mask of an alias opcode must be equal to or a super-set (i.e. more
1936 constrained) of that of the aliased opcode; so is the base opcode value.
1937
1938 if (opcode_has_alias (real) && alias_opcode_p (opcode)
1939 && (opcode->mask & real->mask) == real->mask
1940 && (real->mask & opcode->opcode) == (real->mask & real->opcode))
1941 then OPCODE is an alias of, and only of, the REAL instruction
1942
1943 The alias relationship is forced flat-structured to keep related algorithm
1944 simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
1945
1946 During the disassembling, the decoding decision tree (in
1947 opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
1948 if the decoding of such a machine instruction succeeds (and -Mno-aliases is
1949 not specified), the disassembler will check whether there is any alias
1950 instruction exists for this real instruction. If there is, the disassembler
1951 will try to disassemble the 32-bit binary again using the alias's rule, or
1952 try to convert the IR to the form of the alias. In the case of the multiple
1953 aliases, the aliases are tried one by one from the highest priority
1954 (currently the flag F_P3) to the lowest priority (no priority flag), and the
1955 first succeeds first adopted.
1956
1957 You may ask why there is a need for the conversion of IR from one form to
1958 another in handling certain aliases. This is because on one hand it avoids
1959 adding more operand code to handle unusual encoding/decoding; on other
1960 hand, during the disassembling, the conversion is an effective approach to
1961 check the condition of an alias (as an alias may be adopted only if certain
1962 conditions are met).
1963
1964 In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
1965 aarch64_opcode_table and generated aarch64_find_alias_opcode and
1966 aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help. */
1967
1968 static void
1969 determine_disassembling_preference (struct aarch64_inst *inst)
1970 {
1971 const aarch64_opcode *opcode;
1972 const aarch64_opcode *alias;
1973
1974 opcode = inst->opcode;
1975
1976 /* This opcode does not have an alias, so use itself. */
1977 if (opcode_has_alias (opcode) == FALSE)
1978 return;
1979
1980 alias = aarch64_find_alias_opcode (opcode);
1981 assert (alias);
1982
1983 #ifdef DEBUG_AARCH64
1984 if (debug_dump)
1985 {
1986 const aarch64_opcode *tmp = alias;
1987 printf ("#### LIST orderd: ");
1988 while (tmp)
1989 {
1990 printf ("%s, ", tmp->name);
1991 tmp = aarch64_find_next_alias_opcode (tmp);
1992 }
1993 printf ("\n");
1994 }
1995 #endif /* DEBUG_AARCH64 */
1996
1997 for (; alias; alias = aarch64_find_next_alias_opcode (alias))
1998 {
1999 DEBUG_TRACE ("try %s", alias->name);
2000 assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2001
2002 /* An alias can be a pseudo opcode which will never be used in the
2003 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2004 aliasing AND. */
2005 if (pseudo_opcode_p (alias))
2006 {
2007 DEBUG_TRACE ("skip pseudo %s", alias->name);
2008 continue;
2009 }
2010
2011 if ((inst->value & alias->mask) != alias->opcode)
2012 {
2013 DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2014 continue;
2015 }
2016 /* No need to do any complicated transformation on operands, if the alias
2017 opcode does not have any operand. */
2018 if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2019 {
2020 DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2021 aarch64_replace_opcode (inst, alias);
2022 return;
2023 }
2024 if (alias->flags & F_CONV)
2025 {
2026 aarch64_inst copy;
2027 memcpy (&copy, inst, sizeof (aarch64_inst));
2028 /* ALIAS is the preference as long as the instruction can be
2029 successfully converted to the form of ALIAS. */
2030 if (convert_to_alias (&copy, alias) == 1)
2031 {
2032 aarch64_replace_opcode (&copy, alias);
2033 assert (aarch64_match_operands_constraint (&copy, NULL));
2034 DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2035 memcpy (inst, &copy, sizeof (aarch64_inst));
2036 return;
2037 }
2038 }
2039 else
2040 {
2041 /* Directly decode the alias opcode. */
2042 aarch64_inst temp;
2043 memset (&temp, '\0', sizeof (aarch64_inst));
2044 if (aarch64_opcode_decode (alias, inst->value, &temp, 1) == 1)
2045 {
2046 DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2047 memcpy (inst, &temp, sizeof (aarch64_inst));
2048 return;
2049 }
2050 }
2051 }
2052 }
2053
2054 /* Decode the CODE according to OPCODE; fill INST. Return 0 if the decoding
2055 fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2056 return 1.
2057
2058 If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2059 determined and used to disassemble CODE; this is done just before the
2060 return. */
2061
2062 static int
2063 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2064 aarch64_inst *inst, int noaliases_p)
2065 {
2066 int i;
2067
2068 DEBUG_TRACE ("enter with %s", opcode->name);
2069
2070 assert (opcode && inst);
2071
2072 /* Check the base opcode. */
2073 if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2074 {
2075 DEBUG_TRACE ("base opcode match FAIL");
2076 goto decode_fail;
2077 }
2078
2079 /* Clear inst. */
2080 memset (inst, '\0', sizeof (aarch64_inst));
2081
2082 inst->opcode = opcode;
2083 inst->value = code;
2084
2085 /* Assign operand codes and indexes. */
2086 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2087 {
2088 if (opcode->operands[i] == AARCH64_OPND_NIL)
2089 break;
2090 inst->operands[i].type = opcode->operands[i];
2091 inst->operands[i].idx = i;
2092 }
2093
2094 /* Call the opcode decoder indicated by flags. */
2095 if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2096 {
2097 DEBUG_TRACE ("opcode flag-based decoder FAIL");
2098 goto decode_fail;
2099 }
2100
2101 /* Call operand decoders. */
2102 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2103 {
2104 const aarch64_operand *opnd;
2105 enum aarch64_opnd type;
2106
2107 type = opcode->operands[i];
2108 if (type == AARCH64_OPND_NIL)
2109 break;
2110 opnd = &aarch64_operands[type];
2111 if (operand_has_extractor (opnd)
2112 && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst)))
2113 {
2114 DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2115 goto decode_fail;
2116 }
2117 }
2118
2119 /* If the opcode has a verifier, then check it now. */
2120 if (opcode->verifier && ! opcode->verifier (opcode, code))
2121 {
2122 DEBUG_TRACE ("operand verifier FAIL");
2123 goto decode_fail;
2124 }
2125
2126 /* Match the qualifiers. */
2127 if (aarch64_match_operands_constraint (inst, NULL) == 1)
2128 {
2129 /* Arriving here, the CODE has been determined as a valid instruction
2130 of OPCODE and *INST has been filled with information of this OPCODE
2131 instruction. Before the return, check if the instruction has any
2132 alias and should be disassembled in the form of its alias instead.
2133 If the answer is yes, *INST will be updated. */
2134 if (!noaliases_p)
2135 determine_disassembling_preference (inst);
2136 DEBUG_TRACE ("SUCCESS");
2137 return 1;
2138 }
2139 else
2140 {
2141 DEBUG_TRACE ("constraint matching FAIL");
2142 }
2143
2144 decode_fail:
2145 return 0;
2146 }
2147 \f
2148 /* This does some user-friendly fix-up to *INST. It is currently focus on
2149 the adjustment of qualifiers to help the printed instruction
2150 recognized/understood more easily. */
2151
2152 static void
2153 user_friendly_fixup (aarch64_inst *inst)
2154 {
2155 switch (inst->opcode->iclass)
2156 {
2157 case testbranch:
2158 /* TBNZ Xn|Wn, #uimm6, label
2159 Test and Branch Not Zero: conditionally jumps to label if bit number
2160 uimm6 in register Xn is not zero. The bit number implies the width of
2161 the register, which may be written and should be disassembled as Wn if
2162 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
2163 */
2164 if (inst->operands[1].imm.value < 32)
2165 inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
2166 break;
2167 default: break;
2168 }
2169 }
2170
2171 /* Decode INSN and fill in *INST the instruction information. An alias
2172 opcode may be filled in *INSN if NOALIASES_P is FALSE. Return zero on
2173 success. */
2174
2175 int
2176 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
2177 bfd_boolean noaliases_p)
2178 {
2179 const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
2180
2181 #ifdef DEBUG_AARCH64
2182 if (debug_dump)
2183 {
2184 const aarch64_opcode *tmp = opcode;
2185 printf ("\n");
2186 DEBUG_TRACE ("opcode lookup:");
2187 while (tmp != NULL)
2188 {
2189 aarch64_verbose (" %s", tmp->name);
2190 tmp = aarch64_find_next_opcode (tmp);
2191 }
2192 }
2193 #endif /* DEBUG_AARCH64 */
2194
2195 /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
2196 distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
2197 opcode field and value, apart from the difference that one of them has an
2198 extra field as part of the opcode, but such a field is used for operand
2199 encoding in other opcode(s) ('immh' in the case of the example). */
2200 while (opcode != NULL)
2201 {
2202 /* But only one opcode can be decoded successfully for, as the
2203 decoding routine will check the constraint carefully. */
2204 if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p) == 1)
2205 return ERR_OK;
2206 opcode = aarch64_find_next_opcode (opcode);
2207 }
2208
2209 return ERR_UND;
2210 }
2211
2212 /* Print operands. */
2213
2214 static void
2215 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
2216 const aarch64_opnd_info *opnds, struct disassemble_info *info)
2217 {
2218 int i, pcrel_p, num_printed;
2219 for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2220 {
2221 char str[128];
2222 /* We regard the opcode operand info more, however we also look into
2223 the inst->operands to support the disassembling of the optional
2224 operand.
2225 The two operand code should be the same in all cases, apart from
2226 when the operand can be optional. */
2227 if (opcode->operands[i] == AARCH64_OPND_NIL
2228 || opnds[i].type == AARCH64_OPND_NIL)
2229 break;
2230
2231 /* Generate the operand string in STR. */
2232 aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
2233 &info->target);
2234
2235 /* Print the delimiter (taking account of omitted operand(s)). */
2236 if (str[0] != '\0')
2237 (*info->fprintf_func) (info->stream, "%s",
2238 num_printed++ == 0 ? "\t" : ", ");
2239
2240 /* Print the operand. */
2241 if (pcrel_p)
2242 (*info->print_address_func) (info->target, info);
2243 else
2244 (*info->fprintf_func) (info->stream, "%s", str);
2245 }
2246 }
2247
2248 /* Print the instruction mnemonic name. */
2249
2250 static void
2251 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
2252 {
2253 if (inst->opcode->flags & F_COND)
2254 {
2255 /* For instructions that are truly conditionally executed, e.g. b.cond,
2256 prepare the full mnemonic name with the corresponding condition
2257 suffix. */
2258 char name[8], *ptr;
2259 size_t len;
2260
2261 ptr = strchr (inst->opcode->name, '.');
2262 assert (ptr && inst->cond);
2263 len = ptr - inst->opcode->name;
2264 assert (len < 8);
2265 strncpy (name, inst->opcode->name, len);
2266 name [len] = '\0';
2267 (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
2268 }
2269 else
2270 (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
2271 }
2272
2273 /* Print the instruction according to *INST. */
2274
2275 static void
2276 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
2277 struct disassemble_info *info)
2278 {
2279 print_mnemonic_name (inst, info);
2280 print_operands (pc, inst->opcode, inst->operands, info);
2281 }
2282
2283 /* Entry-point of the instruction disassembler and printer. */
2284
2285 static void
2286 print_insn_aarch64_word (bfd_vma pc,
2287 uint32_t word,
2288 struct disassemble_info *info)
2289 {
2290 static const char *err_msg[6] =
2291 {
2292 [ERR_OK] = "_",
2293 [-ERR_UND] = "undefined",
2294 [-ERR_UNP] = "unpredictable",
2295 [-ERR_NYI] = "NYI"
2296 };
2297
2298 int ret;
2299 aarch64_inst inst;
2300
2301 info->insn_info_valid = 1;
2302 info->branch_delay_insns = 0;
2303 info->data_size = 0;
2304 info->target = 0;
2305 info->target2 = 0;
2306
2307 if (info->flags & INSN_HAS_RELOC)
2308 /* If the instruction has a reloc associated with it, then
2309 the offset field in the instruction will actually be the
2310 addend for the reloc. (If we are using REL type relocs).
2311 In such cases, we can ignore the pc when computing
2312 addresses, since the addend is not currently pc-relative. */
2313 pc = 0;
2314
2315 ret = aarch64_decode_insn (word, &inst, no_aliases);
2316
2317 if (((word >> 21) & 0x3ff) == 1)
2318 {
2319 /* RESERVED for ALES. */
2320 assert (ret != ERR_OK);
2321 ret = ERR_NYI;
2322 }
2323
2324 switch (ret)
2325 {
2326 case ERR_UND:
2327 case ERR_UNP:
2328 case ERR_NYI:
2329 /* Handle undefined instructions. */
2330 info->insn_type = dis_noninsn;
2331 (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
2332 word, err_msg[-ret]);
2333 break;
2334 case ERR_OK:
2335 user_friendly_fixup (&inst);
2336 print_aarch64_insn (pc, &inst, info);
2337 break;
2338 default:
2339 abort ();
2340 }
2341 }
2342
2343 /* Disallow mapping symbols ($x, $d etc) from
2344 being displayed in symbol relative addresses. */
2345
2346 bfd_boolean
2347 aarch64_symbol_is_valid (asymbol * sym,
2348 struct disassemble_info * info ATTRIBUTE_UNUSED)
2349 {
2350 const char * name;
2351
2352 if (sym == NULL)
2353 return FALSE;
2354
2355 name = bfd_asymbol_name (sym);
2356
2357 return name
2358 && (name[0] != '$'
2359 || (name[1] != 'x' && name[1] != 'd')
2360 || (name[2] != '\0' && name[2] != '.'));
2361 }
2362
2363 /* Print data bytes on INFO->STREAM. */
2364
2365 static void
2366 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
2367 uint32_t word,
2368 struct disassemble_info *info)
2369 {
2370 switch (info->bytes_per_chunk)
2371 {
2372 case 1:
2373 info->fprintf_func (info->stream, ".byte\t0x%02x", word);
2374 break;
2375 case 2:
2376 info->fprintf_func (info->stream, ".short\t0x%04x", word);
2377 break;
2378 case 4:
2379 info->fprintf_func (info->stream, ".word\t0x%08x", word);
2380 break;
2381 default:
2382 abort ();
2383 }
2384 }
2385
2386 /* Try to infer the code or data type from a symbol.
2387 Returns nonzero if *MAP_TYPE was set. */
2388
2389 static int
2390 get_sym_code_type (struct disassemble_info *info, int n,
2391 enum map_type *map_type)
2392 {
2393 elf_symbol_type *es;
2394 unsigned int type;
2395 const char *name;
2396
2397 es = *(elf_symbol_type **)(info->symtab + n);
2398 type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
2399
2400 /* If the symbol has function type then use that. */
2401 if (type == STT_FUNC)
2402 {
2403 *map_type = MAP_INSN;
2404 return TRUE;
2405 }
2406
2407 /* Check for mapping symbols. */
2408 name = bfd_asymbol_name(info->symtab[n]);
2409 if (name[0] == '$'
2410 && (name[1] == 'x' || name[1] == 'd')
2411 && (name[2] == '\0' || name[2] == '.'))
2412 {
2413 *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
2414 return TRUE;
2415 }
2416
2417 return FALSE;
2418 }
2419
2420 /* Entry-point of the AArch64 disassembler. */
2421
2422 int
2423 print_insn_aarch64 (bfd_vma pc,
2424 struct disassemble_info *info)
2425 {
2426 bfd_byte buffer[INSNLEN];
2427 int status;
2428 void (*printer) (bfd_vma, uint32_t, struct disassemble_info *);
2429 bfd_boolean found = FALSE;
2430 unsigned int size = 4;
2431 unsigned long data;
2432
2433 if (info->disassembler_options)
2434 {
2435 set_default_aarch64_dis_options (info);
2436
2437 parse_aarch64_dis_options (info->disassembler_options);
2438
2439 /* To avoid repeated parsing of these options, we remove them here. */
2440 info->disassembler_options = NULL;
2441 }
2442
2443 /* Aarch64 instructions are always little-endian */
2444 info->endian_code = BFD_ENDIAN_LITTLE;
2445
2446 /* First check the full symtab for a mapping symbol, even if there
2447 are no usable non-mapping symbols for this address. */
2448 if (info->symtab_size != 0
2449 && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
2450 {
2451 enum map_type type = MAP_INSN;
2452 int last_sym = -1;
2453 bfd_vma addr;
2454 int n;
2455
2456 if (pc <= last_mapping_addr)
2457 last_mapping_sym = -1;
2458
2459 /* Start scanning at the start of the function, or wherever
2460 we finished last time. */
2461 n = info->symtab_pos + 1;
2462 if (n < last_mapping_sym)
2463 n = last_mapping_sym;
2464
2465 /* Scan up to the location being disassembled. */
2466 for (; n < info->symtab_size; n++)
2467 {
2468 addr = bfd_asymbol_value (info->symtab[n]);
2469 if (addr > pc)
2470 break;
2471 if ((info->section == NULL
2472 || info->section == info->symtab[n]->section)
2473 && get_sym_code_type (info, n, &type))
2474 {
2475 last_sym = n;
2476 found = TRUE;
2477 }
2478 }
2479
2480 if (!found)
2481 {
2482 n = info->symtab_pos;
2483 if (n < last_mapping_sym)
2484 n = last_mapping_sym;
2485
2486 /* No mapping symbol found at this address. Look backwards
2487 for a preceeding one. */
2488 for (; n >= 0; n--)
2489 {
2490 if (get_sym_code_type (info, n, &type))
2491 {
2492 last_sym = n;
2493 found = TRUE;
2494 break;
2495 }
2496 }
2497 }
2498
2499 last_mapping_sym = last_sym;
2500 last_type = type;
2501
2502 /* Look a little bit ahead to see if we should print out
2503 less than four bytes of data. If there's a symbol,
2504 mapping or otherwise, after two bytes then don't
2505 print more. */
2506 if (last_type == MAP_DATA)
2507 {
2508 size = 4 - (pc & 3);
2509 for (n = last_sym + 1; n < info->symtab_size; n++)
2510 {
2511 addr = bfd_asymbol_value (info->symtab[n]);
2512 if (addr > pc)
2513 {
2514 if (addr - pc < size)
2515 size = addr - pc;
2516 break;
2517 }
2518 }
2519 /* If the next symbol is after three bytes, we need to
2520 print only part of the data, so that we can use either
2521 .byte or .short. */
2522 if (size == 3)
2523 size = (pc & 1) ? 1 : 2;
2524 }
2525 }
2526
2527 if (last_type == MAP_DATA)
2528 {
2529 /* size was set above. */
2530 info->bytes_per_chunk = size;
2531 info->display_endian = info->endian;
2532 printer = print_insn_data;
2533 }
2534 else
2535 {
2536 info->bytes_per_chunk = size = INSNLEN;
2537 info->display_endian = info->endian_code;
2538 printer = print_insn_aarch64_word;
2539 }
2540
2541 status = (*info->read_memory_func) (pc, buffer, size, info);
2542 if (status != 0)
2543 {
2544 (*info->memory_error_func) (status, pc, info);
2545 return -1;
2546 }
2547
2548 data = bfd_get_bits (buffer, size * 8,
2549 info->display_endian == BFD_ENDIAN_BIG);
2550
2551 (*printer) (pc, data, info);
2552
2553 return size;
2554 }
2555 \f
2556 void
2557 print_aarch64_disassembler_options (FILE *stream)
2558 {
2559 fprintf (stream, _("\n\
2560 The following AARCH64 specific disassembler options are supported for use\n\
2561 with the -M switch (multiple options should be separated by commas):\n"));
2562
2563 fprintf (stream, _("\n\
2564 no-aliases Don't print instruction aliases.\n"));
2565
2566 fprintf (stream, _("\n\
2567 aliases Do print instruction aliases.\n"));
2568
2569 #ifdef DEBUG_AARCH64
2570 fprintf (stream, _("\n\
2571 debug_dump Temp switch for debug trace.\n"));
2572 #endif /* DEBUG_AARCH64 */
2573
2574 fprintf (stream, _("\n"));
2575 }
This page took 0.103468 seconds and 4 git commands to generate.