1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
42 const aarch64_field
*field
;
43 enum aarch64_field_kind kind
;
47 num
= va_arg (va
, uint32_t);
51 kind
= va_arg (va
, enum aarch64_field_kind
);
52 field
= &fields
[kind
];
53 insert_field (kind
, code
, value
, mask
);
54 value
>>= field
->width
;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
67 enum aarch64_field_kind kind
;
69 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
70 if (self
->fields
[i
] != FLD_NIL
)
72 kind
= self
->fields
[i
];
73 insert_field (kind
, code
, value
, 0);
74 value
>>= fields
[kind
].width
;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
84 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
85 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
87 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
91 /* Insert register number, index and/or other data for SIMD register element
92 operand, e.g. the last source operand in
93 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
95 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
96 aarch64_insn
*code
, const aarch64_inst
*inst
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
100 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
101 /* index and/or type */
102 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
104 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
105 if (info
->type
== AARCH64_OPND_En
106 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
108 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
109 assert (info
->idx
== 1); /* Vn */
110 aarch64_insn value
= info
->reglane
.index
<< pos
;
111 insert_field (FLD_imm4
, code
, value
, 0);
115 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
122 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
123 insert_field (FLD_imm5
, code
, value
, 0);
126 else if (inst
->opcode
->iclass
== dotproduct
)
128 unsigned reglane_index
= info
->reglane
.index
;
129 switch (info
->qualifier
)
131 case AARCH64_OPND_QLF_S_4B
:
133 assert (reglane_index
< 4);
134 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
140 else if (inst
->opcode
->iclass
== cryptosm3
)
142 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
143 unsigned reglane_index
= info
->reglane
.index
;
144 assert (reglane_index
< 4);
145 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
149 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
150 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
151 unsigned reglane_index
= info
->reglane
.index
;
153 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
154 /* Complex operand takes two elements. */
157 switch (info
->qualifier
)
159 case AARCH64_OPND_QLF_S_H
:
161 assert (reglane_index
< 8);
162 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
164 case AARCH64_OPND_QLF_S_S
:
166 assert (reglane_index
< 4);
167 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
169 case AARCH64_OPND_QLF_S_D
:
171 assert (reglane_index
< 2);
172 insert_field (FLD_H
, code
, reglane_index
, 0);
181 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
183 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
185 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
186 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
189 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
191 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
195 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
196 in AdvSIMD load/store instructions. */
198 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
199 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
200 const aarch64_inst
*inst
,
201 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
203 aarch64_insn value
= 0;
204 /* Number of elements in each structure to be loaded/stored. */
205 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
208 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
213 switch (info
->reglist
.num_regs
)
215 case 1: value
= 0x7; break;
216 case 2: value
= 0xa; break;
217 case 3: value
= 0x6; break;
218 case 4: value
= 0x2; break;
223 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
234 insert_field (FLD_opcode
, code
, value
, 0);
239 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
240 single structure to all lanes instructions. */
242 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
243 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
244 const aarch64_inst
*inst
,
245 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
248 /* The opcode dependent area stores the number of elements in
249 each structure to be loaded/stored. */
250 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
253 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
255 value
= (aarch64_insn
) 0;
256 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
257 /* OP_LD1R does not have alternating variant, but have "two consecutive"
259 value
= (aarch64_insn
) 1;
260 insert_field (FLD_S
, code
, value
, 0);
265 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
266 operand e.g. Vt in AdvSIMD load/store single element instructions. */
268 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
269 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
270 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
271 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
273 aarch64_field field
= {0, 0};
274 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
275 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
277 assert (info
->reglist
.has_index
);
280 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
281 /* Encode the index, opcode<2:1> and size. */
282 switch (info
->qualifier
)
284 case AARCH64_OPND_QLF_S_B
:
285 /* Index encoded in "Q:S:size". */
286 QSsize
= info
->reglist
.index
;
289 case AARCH64_OPND_QLF_S_H
:
290 /* Index encoded in "Q:S:size<1>". */
291 QSsize
= info
->reglist
.index
<< 1;
294 case AARCH64_OPND_QLF_S_S
:
295 /* Index encoded in "Q:S". */
296 QSsize
= info
->reglist
.index
<< 2;
299 case AARCH64_OPND_QLF_S_D
:
300 /* Index encoded in "Q". */
301 QSsize
= info
->reglist
.index
<< 3 | 0x1;
307 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
308 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
309 insert_field_2 (&field
, code
, opcodeh2
, 0);
314 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
315 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
316 or SSHR <V><d>, <V><n>, #<shift>. */
318 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
319 const aarch64_opnd_info
*info
,
320 aarch64_insn
*code
, const aarch64_inst
*inst
,
321 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
323 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
326 if (inst
->opcode
->iclass
== asimdshf
)
330 0000 x SEE AdvSIMD modified immediate
339 Q
= (val
& 0x1) ? 1 : 0;
340 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
344 assert (info
->type
== AARCH64_OPND_IMM_VLSR
345 || info
->type
== AARCH64_OPND_IMM_VLSL
);
347 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
350 0000 SEE AdvSIMD modified immediate
351 0001 (16-UInt(immh:immb))
352 001x (32-UInt(immh:immb))
353 01xx (64-UInt(immh:immb))
354 1xxx (128-UInt(immh:immb)) */
355 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
359 0000 SEE AdvSIMD modified immediate
360 0001 (UInt(immh:immb)-8)
361 001x (UInt(immh:immb)-16)
362 01xx (UInt(immh:immb)-32)
363 1xxx (UInt(immh:immb)-64) */
364 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
365 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
370 /* Insert fields for e.g. the immediate operands in
371 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
373 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
375 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
376 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
380 imm
= info
->imm
.value
;
381 if (operand_need_shift_by_two (self
))
383 insert_all_fields (self
, code
, imm
);
387 /* Insert immediate and its shift amount for e.g. the last operand in
388 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
390 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
391 aarch64_insn
*code
, const aarch64_inst
*inst
,
392 aarch64_operand_error
*errors
)
395 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
397 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
401 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
402 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
404 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
405 const aarch64_opnd_info
*info
,
407 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
408 aarch64_operand_error
*errors
411 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
412 uint64_t imm
= info
->imm
.value
;
413 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
414 int amount
= info
->shifter
.amount
;
415 aarch64_field field
= {0, 0};
417 /* a:b:c:d:e:f:g:h */
418 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
420 /* Either MOVI <Dd>, #<imm>
421 or MOVI <Vd>.2D, #<imm>.
422 <imm> is a 64-bit immediate
423 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
424 encoded in "a:b:c:d:e:f:g:h". */
425 imm
= aarch64_shrink_expanded_imm8 (imm
);
426 assert ((int)imm
>= 0);
428 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
430 if (kind
== AARCH64_MOD_NONE
)
433 /* shift amount partially in cmode */
434 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
435 if (kind
== AARCH64_MOD_LSL
)
437 /* AARCH64_MOD_LSL: shift zeros. */
438 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
439 assert (esize
== 4 || esize
== 2 || esize
== 1);
440 /* For 8-bit move immediate, the optional LSL #0 does not require
446 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
448 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
452 /* AARCH64_MOD_MSL: shift ones. */
454 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
456 insert_field_2 (&field
, code
, amount
, 0);
461 /* Insert fields for an 8-bit floating-point immediate. */
463 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
465 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
466 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
468 insert_all_fields (self
, code
, info
->imm
.value
);
472 /* Insert 1-bit rotation immediate (#90 or #270). */
474 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
475 const aarch64_opnd_info
*info
,
476 aarch64_insn
*code
, const aarch64_inst
*inst
,
477 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
479 uint64_t rot
= (info
->imm
.value
- 90) / 180;
481 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
485 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
487 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
488 const aarch64_opnd_info
*info
,
489 aarch64_insn
*code
, const aarch64_inst
*inst
,
490 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
492 uint64_t rot
= info
->imm
.value
/ 90;
494 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
498 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
499 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
501 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
503 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
504 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
506 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
510 /* Insert arithmetic immediate for e.g. the last operand in
511 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
513 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
514 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
515 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
518 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
519 insert_field (self
->fields
[0], code
, value
, 0);
520 /* imm12 (unsigned) */
521 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
525 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
526 the operand should be inverted before encoding. */
528 aarch64_ins_limm_1 (const aarch64_operand
*self
,
529 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
530 const aarch64_inst
*inst
, bfd_boolean invert_p
,
531 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
534 uint64_t imm
= info
->imm
.value
;
535 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
539 /* The constraint check should have guaranteed this wouldn't happen. */
540 assert (aarch64_logical_immediate_p (imm
, esize
, &value
));
542 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
547 /* Insert logical/bitmask immediate for e.g. the last operand in
548 ORR <Wd|WSP>, <Wn>, #<imm>. */
550 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
551 aarch64_insn
*code
, const aarch64_inst
*inst
,
552 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
554 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
555 inst
->opcode
->op
== OP_BIC
, errors
);
558 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
560 aarch64_ins_inv_limm (const aarch64_operand
*self
,
561 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
562 const aarch64_inst
*inst
,
563 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
565 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
, errors
);
568 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
569 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
571 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
572 aarch64_insn
*code
, const aarch64_inst
*inst
,
573 aarch64_operand_error
*errors
)
575 aarch64_insn value
= 0;
577 assert (info
->idx
== 0);
580 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
581 if (inst
->opcode
->iclass
== ldstpair_indexed
582 || inst
->opcode
->iclass
== ldstnapair_offs
583 || inst
->opcode
->iclass
== ldstpair_off
584 || inst
->opcode
->iclass
== loadlit
)
587 switch (info
->qualifier
)
589 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
590 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
591 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
594 insert_field (FLD_ldst_size
, code
, value
, 0);
599 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
600 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
606 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
608 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
609 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
610 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
611 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
614 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
618 /* Encode the address operand for e.g.
619 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
621 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
622 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
623 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
624 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
627 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
630 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
632 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
634 if (kind
== AARCH64_MOD_LSL
)
635 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
636 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
638 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
639 S
= info
->shifter
.amount
!= 0;
641 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
645 Must be #0 if <extend> is explicitly LSL. */
646 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
647 insert_field (FLD_S
, code
, S
, 0);
652 /* Encode the address operand for e.g.
653 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
655 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
656 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
657 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
658 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
661 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
664 int imm
= info
->addr
.offset
.imm
;
665 insert_field (self
->fields
[1], code
, imm
, 0);
668 if (info
->addr
.writeback
)
670 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
671 insert_field (self
->fields
[2], code
, 1, 0);
676 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
678 aarch64_ins_addr_simm (const aarch64_operand
*self
,
679 const aarch64_opnd_info
*info
,
681 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
682 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
687 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
688 /* simm (imm9 or imm7) */
689 imm
= info
->addr
.offset
.imm
;
690 if (self
->fields
[0] == FLD_imm7
)
691 /* scaled immediate in ld/st pair instructions.. */
692 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
693 insert_field (self
->fields
[0], code
, imm
, 0);
694 /* pre/post- index */
695 if (info
->addr
.writeback
)
697 assert (inst
->opcode
->iclass
!= ldst_unscaled
698 && inst
->opcode
->iclass
!= ldstnapair_offs
699 && inst
->opcode
->iclass
!= ldstpair_off
700 && inst
->opcode
->iclass
!= ldst_unpriv
);
701 assert (info
->addr
.preind
!= info
->addr
.postind
);
702 if (info
->addr
.preind
)
703 insert_field (self
->fields
[1], code
, 1, 0);
709 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
711 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
712 const aarch64_opnd_info
*info
,
714 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
715 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
720 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
722 imm
= info
->addr
.offset
.imm
>> 3;
723 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
724 insert_field (self
->fields
[2], code
, imm
, 0);
726 if (info
->addr
.writeback
)
728 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
729 insert_field (self
->fields
[3], code
, 1, 0);
734 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
736 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
737 const aarch64_opnd_info
*info
,
739 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
740 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
742 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
745 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
747 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
751 /* Encode the address operand for e.g.
752 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
754 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
755 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
756 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
757 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
760 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
762 if (info
->addr
.offset
.is_reg
)
763 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
765 insert_field (FLD_Rm
, code
, 0x1f, 0);
769 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
771 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
772 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
773 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
774 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
777 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
781 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
783 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
784 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
785 const aarch64_inst
*inst
,
786 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
788 /* op0:op1:CRn:CRm:op2 */
789 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
790 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
794 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
796 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
797 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
798 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
799 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
802 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
807 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
809 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
810 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
811 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
812 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
814 /* op1:CRn:CRm:op2 */
815 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
816 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
820 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
823 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
824 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
825 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
826 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
829 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
833 /* Encode the prefetch operation option operand for e.g.
834 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
837 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
838 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
839 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
840 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
843 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
847 /* Encode the hint number for instructions that alias HINT but take an
851 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
852 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
853 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
854 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
857 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
861 /* Encode the extended register operand for e.g.
862 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
864 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
865 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
866 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
867 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
869 enum aarch64_modifier_kind kind
;
872 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
874 kind
= info
->shifter
.kind
;
875 if (kind
== AARCH64_MOD_LSL
)
876 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
877 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
878 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
880 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
885 /* Encode the shifted register operand for e.g.
886 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
888 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
889 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
890 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
891 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
894 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
896 insert_field (FLD_shift
, code
,
897 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
899 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
904 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
905 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
906 SELF's operand-dependent value. fields[0] specifies the field that
907 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
909 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
910 const aarch64_opnd_info
*info
,
912 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
913 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
915 int factor
= 1 + get_operand_specific_data (self
);
916 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
917 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
921 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
922 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
923 SELF's operand-dependent value. fields[0] specifies the field that
924 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
926 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
927 const aarch64_opnd_info
*info
,
929 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
930 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
932 int factor
= 1 + get_operand_specific_data (self
);
933 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
934 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
938 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
939 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
940 SELF's operand-dependent value. fields[0] specifies the field that
941 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
942 and imm3 fields, with imm3 being the less-significant part. */
944 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
945 const aarch64_opnd_info
*info
,
947 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
948 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
950 int factor
= 1 + get_operand_specific_data (self
);
951 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
952 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
953 2, FLD_imm3
, FLD_SVE_imm6
);
957 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
958 is a 4-bit signed number and where <shift> is SELF's operand-dependent
959 value. fields[0] specifies the base register field. */
961 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
962 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
963 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
964 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
966 int factor
= 1 << get_operand_specific_data (self
);
967 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
968 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
972 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
973 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
974 value. fields[0] specifies the base register field. */
976 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
977 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
978 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
979 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
981 int factor
= 1 << get_operand_specific_data (self
);
982 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
983 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
987 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
988 is SELF's operand-dependent value. fields[0] specifies the base
989 register field and fields[1] specifies the offset register field. */
991 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
992 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
993 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
994 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
996 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
997 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1001 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1002 <shift> is SELF's operand-dependent value. fields[0] specifies the
1003 base register field, fields[1] specifies the offset register field and
1004 fields[2] is a single-bit field that selects SXTW over UXTW. */
1006 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1007 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1008 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1009 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1011 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1012 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1013 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1014 insert_field (self
->fields
[2], code
, 0, 0);
1016 insert_field (self
->fields
[2], code
, 1, 0);
1020 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1021 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1022 fields[0] specifies the base register field. */
1024 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1025 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1026 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1027 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1029 int factor
= 1 << get_operand_specific_data (self
);
1030 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1031 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1035 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1036 where <modifier> is fixed by the instruction and where <msz> is a
1037 2-bit unsigned number. fields[0] specifies the base register field
1038 and fields[1] specifies the offset register field. */
1040 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1041 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1042 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1044 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1045 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1046 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1050 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1051 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1052 field and fields[1] specifies the offset register field. */
1054 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1055 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1056 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1057 aarch64_operand_error
*errors
)
1059 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1062 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1063 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1064 field and fields[1] specifies the offset register field. */
1066 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1067 const aarch64_opnd_info
*info
,
1069 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1070 aarch64_operand_error
*errors
)
1072 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1075 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1076 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1077 field and fields[1] specifies the offset register field. */
1079 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1080 const aarch64_opnd_info
*info
,
1082 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1083 aarch64_operand_error
*errors
)
1085 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1088 /* Encode an SVE ADD/SUB immediate. */
1090 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1091 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1092 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1093 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1095 if (info
->shifter
.amount
== 8)
1096 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1097 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1098 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1100 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1104 /* Encode an SVE CPY/DUP immediate. */
1106 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1107 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1108 const aarch64_inst
*inst
,
1109 aarch64_operand_error
*errors
)
1111 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1114 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1115 array specifies which field to use for Zn. MM is encoded in the
1116 concatenation of imm5 and SVE_tszh, with imm5 being the less
1117 significant part. */
1119 aarch64_ins_sve_index (const aarch64_operand
*self
,
1120 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1121 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1122 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1124 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1125 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1126 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1127 2, FLD_imm5
, FLD_SVE_tszh
);
1131 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1133 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1134 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1135 const aarch64_inst
*inst
,
1136 aarch64_operand_error
*errors
)
1138 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1141 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1142 and where MM occupies the most-significant part. The operand-dependent
1143 value specifies the number of bits in Zn. */
1145 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1146 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1147 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1148 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1150 unsigned int reg_bits
= get_operand_specific_data (self
);
1151 assert (info
->reglane
.regno
< (1U << reg_bits
));
1152 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1153 insert_all_fields (self
, code
, val
);
1157 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1160 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1161 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1162 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1163 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1165 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1169 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1170 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1173 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1174 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1175 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1176 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1178 insert_all_fields (self
, code
, info
->imm
.value
);
1179 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1183 /* Encode an SVE shift left immediate. */
1185 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1186 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1187 const aarch64_inst
*inst
,
1188 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1190 const aarch64_opnd_info
*prev_operand
;
1193 assert (info
->idx
> 0);
1194 prev_operand
= &inst
->operands
[info
->idx
- 1];
1195 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1196 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1200 /* Encode an SVE shift right immediate. */
1202 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1203 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1204 const aarch64_inst
*inst
,
1205 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1207 const aarch64_opnd_info
*prev_operand
;
1210 assert (info
->idx
> 0);
1211 prev_operand
= &inst
->operands
[info
->idx
- 1];
1212 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1213 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1217 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1218 The fields array specifies which field to use. */
1220 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1221 const aarch64_opnd_info
*info
,
1223 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1224 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1226 if (info
->imm
.value
== 0x3f000000)
1227 insert_field (self
->fields
[0], code
, 0, 0);
1229 insert_field (self
->fields
[0], code
, 1, 0);
1233 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1234 The fields array specifies which field to use. */
1236 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1237 const aarch64_opnd_info
*info
,
1239 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1240 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1242 if (info
->imm
.value
== 0x3f000000)
1243 insert_field (self
->fields
[0], code
, 0, 0);
1245 insert_field (self
->fields
[0], code
, 1, 0);
1249 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1250 The fields array specifies which field to use. */
1252 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1253 const aarch64_opnd_info
*info
,
1255 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1256 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1258 if (info
->imm
.value
== 0)
1259 insert_field (self
->fields
[0], code
, 0, 0);
1261 insert_field (self
->fields
[0], code
, 1, 0);
1265 /* Miscellaneous encoding functions. */
1267 /* Encode size[0], i.e. bit 22, for
1268 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1271 encode_asimd_fcvt (aarch64_inst
*inst
)
1274 aarch64_field field
= {0, 0};
1275 enum aarch64_opnd_qualifier qualifier
;
1277 switch (inst
->opcode
->op
)
1281 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1282 qualifier
= inst
->operands
[1].qualifier
;
1286 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1287 qualifier
= inst
->operands
[0].qualifier
;
1292 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1293 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1294 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1295 gen_sub_field (FLD_size
, 0, 1, &field
);
1296 insert_field_2 (&field
, &inst
->value
, value
, 0);
1299 /* Encode size[0], i.e. bit 22, for
1300 e.g. FCVTXN <Vb><d>, <Va><n>. */
1303 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1305 aarch64_insn val
= 1;
1306 aarch64_field field
= {0, 0};
1307 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1308 gen_sub_field (FLD_size
, 0, 1, &field
);
1309 insert_field_2 (&field
, &inst
->value
, val
, 0);
1312 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1314 encode_fcvt (aarch64_inst
*inst
)
1317 const aarch64_field field
= {15, 2};
1320 switch (inst
->operands
[0].qualifier
)
1322 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1323 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1324 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1327 insert_field_2 (&field
, &inst
->value
, val
, 0);
1332 /* Return the index in qualifiers_list that INST is using. Should only
1333 be called once the qualifiers are known to be valid. */
1336 aarch64_get_variant (struct aarch64_inst
*inst
)
1338 int i
, nops
, variant
;
1340 nops
= aarch64_num_of_operands (inst
->opcode
);
1341 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1343 for (i
= 0; i
< nops
; ++i
)
1344 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1345 != inst
->operands
[i
].qualifier
)
1353 /* Do miscellaneous encodings that are not common enough to be driven by
1357 do_misc_encoding (aarch64_inst
*inst
)
1361 switch (inst
->opcode
->op
)
1370 encode_asimd_fcvt (inst
);
1373 encode_asisd_fcvtxn (inst
);
1377 /* Copy Pn to Pm and Pg. */
1378 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1379 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1380 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1383 /* Copy Zd to Zm. */
1384 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1385 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1388 /* Fill in the zero immediate. */
1389 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1390 2, FLD_imm5
, FLD_SVE_tszh
);
1393 /* Copy Zn to Zm. */
1394 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1395 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1400 /* Copy Pd to Pm. */
1401 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1402 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1404 case OP_MOVZS_P_P_P
:
1406 /* Copy Pn to Pm. */
1407 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1408 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1410 case OP_NOTS_P_P_P_Z
:
1411 case OP_NOT_P_P_P_Z
:
1412 /* Copy Pg to Pm. */
1413 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1414 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1420 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1422 encode_sizeq (aarch64_inst
*inst
)
1425 enum aarch64_field_kind kind
;
1428 /* Get the index of the operand whose information we are going to use
1429 to encode the size and Q fields.
1430 This is deduced from the possible valid qualifier lists. */
1431 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1432 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1433 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1434 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1436 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1438 if (inst
->opcode
->iclass
== asisdlse
1439 || inst
->opcode
->iclass
== asisdlsep
1440 || inst
->opcode
->iclass
== asisdlso
1441 || inst
->opcode
->iclass
== asisdlsop
)
1442 kind
= FLD_vldst_size
;
1445 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1448 /* Opcodes that have fields shared by multiple operands are usually flagged
1449 with flags. In this function, we detect such flags and use the
1450 information in one of the related operands to do the encoding. The 'one'
1451 operand is not any operand but one of the operands that has the enough
1452 information for such an encoding. */
1455 do_special_encoding (struct aarch64_inst
*inst
)
1458 aarch64_insn value
= 0;
1460 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1462 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1463 if (inst
->opcode
->flags
& F_COND
)
1465 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1467 if (inst
->opcode
->flags
& F_SF
)
1469 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1470 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1471 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1473 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1474 if (inst
->opcode
->flags
& F_N
)
1475 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1477 if (inst
->opcode
->flags
& F_LSE_SZ
)
1479 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1480 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1481 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1483 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1485 if (inst
->opcode
->flags
& F_SIZEQ
)
1486 encode_sizeq (inst
);
1487 if (inst
->opcode
->flags
& F_FPTYPE
)
1489 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1490 switch (inst
->operands
[idx
].qualifier
)
1492 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1493 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1494 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1495 default: assert (0);
1497 insert_field (FLD_type
, &inst
->value
, value
, 0);
1499 if (inst
->opcode
->flags
& F_SSIZE
)
1501 enum aarch64_opnd_qualifier qualifier
;
1502 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1503 qualifier
= inst
->operands
[idx
].qualifier
;
1504 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1505 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1506 value
= aarch64_get_qualifier_standard_value (qualifier
);
1507 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1509 if (inst
->opcode
->flags
& F_T
)
1511 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1512 aarch64_field field
= {0, 0};
1513 enum aarch64_opnd_qualifier qualifier
;
1516 qualifier
= inst
->operands
[idx
].qualifier
;
1517 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1518 == AARCH64_OPND_CLASS_SIMD_REG
1519 && qualifier
>= AARCH64_OPND_QLF_V_8B
1520 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1531 value
= aarch64_get_qualifier_standard_value (qualifier
);
1532 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1533 num
= (int) value
>> 1;
1534 assert (num
>= 0 && num
<= 3);
1535 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1536 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1538 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1540 /* Use Rt to encode in the case of e.g.
1541 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1542 enum aarch64_opnd_qualifier qualifier
;
1543 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1545 /* Otherwise use the result operand, which has to be a integer
1548 assert (idx
== 0 || idx
== 1);
1549 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1550 == AARCH64_OPND_CLASS_INT_REG
);
1551 qualifier
= inst
->operands
[idx
].qualifier
;
1552 insert_field (FLD_Q
, &inst
->value
,
1553 aarch64_get_qualifier_standard_value (qualifier
), 0);
1555 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1557 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1558 enum aarch64_opnd_qualifier qualifier
;
1559 aarch64_field field
= {0, 0};
1560 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1561 == AARCH64_OPND_CLASS_INT_REG
);
1562 gen_sub_field (FLD_opc
, 0, 1, &field
);
1563 qualifier
= inst
->operands
[0].qualifier
;
1564 insert_field_2 (&field
, &inst
->value
,
1565 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1567 /* Miscellaneous encoding as the last step. */
1568 if (inst
->opcode
->flags
& F_MISC
)
1569 do_misc_encoding (inst
);
1571 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1574 /* Some instructions (including all SVE ones) use the instruction class
1575 to describe how a qualifiers_list index is represented in the instruction
1576 encoding. If INST is such an instruction, encode the chosen qualifier
1580 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1582 switch (inst
->opcode
->iclass
)
1585 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1586 0, 2, FLD_SVE_M_14
, FLD_size
);
1590 case sve_shift_pred
:
1591 case sve_shift_unpred
:
1592 /* For indices and shift amounts, the variant is encoded as
1593 part of the immediate. */
1597 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1598 and depend on the immediate. They don't have a separate
1603 /* sve_misc instructions have only a single variant. */
1607 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1608 0, 2, FLD_SVE_M_16
, FLD_size
);
1612 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1617 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1621 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1625 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1633 /* Converters converting an alias opcode instruction to its real form. */
1635 /* ROR <Wd>, <Ws>, #<shift>
1637 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1639 convert_ror_to_extr (aarch64_inst
*inst
)
1641 copy_operand_info (inst
, 3, 2);
1642 copy_operand_info (inst
, 2, 1);
1645 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1647 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1649 convert_xtl_to_shll (aarch64_inst
*inst
)
1651 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1652 inst
->operands
[2].imm
.value
= 0;
1656 LSR <Xd>, <Xn>, #<shift>
1658 UBFM <Xd>, <Xn>, #<shift>, #63. */
1660 convert_sr_to_bfm (aarch64_inst
*inst
)
1662 inst
->operands
[3].imm
.value
=
1663 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1666 /* Convert MOV to ORR. */
1668 convert_mov_to_orr (aarch64_inst
*inst
)
1670 /* MOV <Vd>.<T>, <Vn>.<T>
1672 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1673 copy_operand_info (inst
, 2, 1);
1676 /* When <imms> >= <immr>, the instruction written:
1677 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1679 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1682 convert_bfx_to_bfm (aarch64_inst
*inst
)
1686 /* Convert the operand. */
1687 lsb
= inst
->operands
[2].imm
.value
;
1688 width
= inst
->operands
[3].imm
.value
;
1689 inst
->operands
[2].imm
.value
= lsb
;
1690 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1693 /* When <imms> < <immr>, the instruction written:
1694 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1696 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1699 convert_bfi_to_bfm (aarch64_inst
*inst
)
1703 /* Convert the operand. */
1704 lsb
= inst
->operands
[2].imm
.value
;
1705 width
= inst
->operands
[3].imm
.value
;
1706 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1708 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1709 inst
->operands
[3].imm
.value
= width
- 1;
1713 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1714 inst
->operands
[3].imm
.value
= width
- 1;
1718 /* The instruction written:
1719 BFC <Xd>, #<lsb>, #<width>
1721 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1724 convert_bfc_to_bfm (aarch64_inst
*inst
)
1729 copy_operand_info (inst
, 3, 2);
1730 copy_operand_info (inst
, 2, 1);
1731 copy_operand_info (inst
, 1, 0);
1732 inst
->operands
[1].reg
.regno
= 0x1f;
1734 /* Convert the immediate operand. */
1735 lsb
= inst
->operands
[2].imm
.value
;
1736 width
= inst
->operands
[3].imm
.value
;
1737 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1739 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1740 inst
->operands
[3].imm
.value
= width
- 1;
1744 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1745 inst
->operands
[3].imm
.value
= width
- 1;
1749 /* The instruction written:
1750 LSL <Xd>, <Xn>, #<shift>
1752 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1755 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1757 int64_t shift
= inst
->operands
[2].imm
.value
;
1759 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1761 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1762 inst
->operands
[3].imm
.value
= 31 - shift
;
1766 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1767 inst
->operands
[3].imm
.value
= 63 - shift
;
1771 /* CINC <Wd>, <Wn>, <cond>
1773 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1776 convert_to_csel (aarch64_inst
*inst
)
1778 copy_operand_info (inst
, 3, 2);
1779 copy_operand_info (inst
, 2, 1);
1780 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1783 /* CSET <Wd>, <cond>
1785 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1788 convert_cset_to_csinc (aarch64_inst
*inst
)
1790 copy_operand_info (inst
, 3, 1);
1791 copy_operand_info (inst
, 2, 0);
1792 copy_operand_info (inst
, 1, 0);
1793 inst
->operands
[1].reg
.regno
= 0x1f;
1794 inst
->operands
[2].reg
.regno
= 0x1f;
1795 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1800 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1803 convert_mov_to_movewide (aarch64_inst
*inst
)
1806 uint32_t shift_amount
;
1809 switch (inst
->opcode
->op
)
1811 case OP_MOV_IMM_WIDE
:
1812 value
= inst
->operands
[1].imm
.value
;
1814 case OP_MOV_IMM_WIDEN
:
1815 value
= ~inst
->operands
[1].imm
.value
;
1820 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1821 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1822 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1823 /* The constraint check should have guaranteed this wouldn't happen. */
1825 value
>>= shift_amount
;
1827 inst
->operands
[1].imm
.value
= value
;
1828 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1829 inst
->operands
[1].shifter
.amount
= shift_amount
;
1834 ORR <Wd>, WZR, #<imm>. */
1837 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1839 copy_operand_info (inst
, 2, 1);
1840 inst
->operands
[1].reg
.regno
= 0x1f;
1841 inst
->operands
[1].skip
= 0;
1844 /* Some alias opcodes are assembled by being converted to their real-form. */
1847 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1849 const aarch64_opcode
*alias
= inst
->opcode
;
1851 if ((alias
->flags
& F_CONV
) == 0)
1852 goto convert_to_real_return
;
1858 convert_sr_to_bfm (inst
);
1861 convert_lsl_to_ubfm (inst
);
1866 convert_to_csel (inst
);
1870 convert_cset_to_csinc (inst
);
1875 convert_bfx_to_bfm (inst
);
1880 convert_bfi_to_bfm (inst
);
1883 convert_bfc_to_bfm (inst
);
1886 convert_mov_to_orr (inst
);
1888 case OP_MOV_IMM_WIDE
:
1889 case OP_MOV_IMM_WIDEN
:
1890 convert_mov_to_movewide (inst
);
1892 case OP_MOV_IMM_LOG
:
1893 convert_mov_to_movebitmask (inst
);
1896 convert_ror_to_extr (inst
);
1902 convert_xtl_to_shll (inst
);
1908 convert_to_real_return
:
1909 aarch64_replace_opcode (inst
, real
);
1912 /* Encode *INST_ORI of the opcode code OPCODE.
1913 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1914 matched operand qualifier sequence in *QLF_SEQ. */
1917 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1918 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1919 aarch64_opnd_qualifier_t
*qlf_seq
,
1920 aarch64_operand_error
*mismatch_detail
)
1923 const aarch64_opcode
*aliased
;
1924 aarch64_inst copy
, *inst
;
1926 DEBUG_TRACE ("enter with %s", opcode
->name
);
1928 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1932 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1933 if (inst
->opcode
== NULL
)
1934 inst
->opcode
= opcode
;
1936 /* Constrain the operands.
1937 After passing this, the encoding is guaranteed to succeed. */
1938 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1940 DEBUG_TRACE ("FAIL since operand constraint not met");
1944 /* Get the base value.
1945 Note: this has to be before the aliasing handling below in order to
1946 get the base value from the alias opcode before we move on to the
1947 aliased opcode for encoding. */
1948 inst
->value
= opcode
->opcode
;
1950 /* No need to do anything else if the opcode does not have any operand. */
1951 if (aarch64_num_of_operands (opcode
) == 0)
1954 /* Assign operand indexes and check types. Also put the matched
1955 operand qualifiers in *QLF_SEQ to return. */
1956 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1958 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1959 inst
->operands
[i
].idx
= i
;
1960 if (qlf_seq
!= NULL
)
1961 *qlf_seq
= inst
->operands
[i
].qualifier
;
1964 aliased
= aarch64_find_real_opcode (opcode
);
1965 /* If the opcode is an alias and it does not ask for direct encoding by
1966 itself, the instruction will be transformed to the form of real opcode
1967 and the encoding will be carried out using the rules for the aliased
1969 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1971 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1972 aliased
->name
, opcode
->name
);
1973 /* Convert the operands to the form of the real opcode. */
1974 convert_to_real (inst
, aliased
);
1978 aarch64_opnd_info
*info
= inst
->operands
;
1980 /* Call the inserter of each operand. */
1981 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1983 const aarch64_operand
*opnd
;
1984 enum aarch64_opnd type
= opcode
->operands
[i
];
1985 if (type
== AARCH64_OPND_NIL
)
1989 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1992 opnd
= &aarch64_operands
[type
];
1993 if (operand_has_inserter (opnd
)
1994 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
1999 /* Call opcode encoders indicated by flags. */
2000 if (opcode_has_special_coder (opcode
))
2001 do_special_encoding (inst
);
2003 /* Possibly use the instruction class to encode the chosen qualifier
2005 aarch64_encode_variant_using_iclass (inst
);
2008 DEBUG_TRACE ("exit with %s", opcode
->name
);
2010 *code
= inst
->value
;