1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
42 const aarch64_field
*field
;
43 enum aarch64_field_kind kind
;
47 num
= va_arg (va
, uint32_t);
51 kind
= va_arg (va
, enum aarch64_field_kind
);
52 field
= &fields
[kind
];
53 insert_field (kind
, code
, value
, mask
);
54 value
>>= field
->width
;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
67 enum aarch64_field_kind kind
;
69 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
70 if (self
->fields
[i
] != FLD_NIL
)
72 kind
= self
->fields
[i
];
73 insert_field (kind
, code
, value
, 0);
74 value
>>= fields
[kind
].width
;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
84 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
86 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
95 aarch64_insn
*code
, const aarch64_inst
*inst
)
98 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
99 /* index and/or type */
100 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
102 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
103 if (info
->type
== AARCH64_OPND_En
104 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info
->idx
== 1); /* Vn */
108 aarch64_insn value
= info
->reglane
.index
<< pos
;
109 insert_field (FLD_imm4
, code
, value
, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
121 insert_field (FLD_imm5
, code
, value
, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 unsigned reglane_index
= info
->reglane
.index
;
130 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
131 /* Complex operand takes two elements. */
134 switch (info
->qualifier
)
136 case AARCH64_OPND_QLF_S_H
:
138 assert (reglane_index
< 8);
139 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
141 case AARCH64_OPND_QLF_S_S
:
143 assert (reglane_index
< 4);
144 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
146 case AARCH64_OPND_QLF_S_D
:
148 assert (reglane_index
< 2);
149 insert_field (FLD_H
, code
, reglane_index
, 0);
158 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
160 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
162 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
165 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
167 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
171 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
172 in AdvSIMD load/store instructions. */
174 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
175 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
176 const aarch64_inst
*inst
)
178 aarch64_insn value
= 0;
179 /* Number of elements in each structure to be loaded/stored. */
180 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
183 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
188 switch (info
->reglist
.num_regs
)
190 case 1: value
= 0x7; break;
191 case 2: value
= 0xa; break;
192 case 3: value
= 0x6; break;
193 case 4: value
= 0x2; break;
198 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
209 insert_field (FLD_opcode
, code
, value
, 0);
214 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215 single structure to all lanes instructions. */
217 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
218 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
219 const aarch64_inst
*inst
)
222 /* The opcode dependent area stores the number of elements in
223 each structure to be loaded/stored. */
224 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
227 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
229 value
= (aarch64_insn
) 0;
230 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
231 /* OP_LD1R does not have alternating variant, but have "two consecutive"
233 value
= (aarch64_insn
) 1;
234 insert_field (FLD_S
, code
, value
, 0);
239 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240 operand e.g. Vt in AdvSIMD load/store single element instructions. */
242 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
243 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
244 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
246 aarch64_field field
= {0, 0};
247 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
248 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
250 assert (info
->reglist
.has_index
);
253 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
254 /* Encode the index, opcode<2:1> and size. */
255 switch (info
->qualifier
)
257 case AARCH64_OPND_QLF_S_B
:
258 /* Index encoded in "Q:S:size". */
259 QSsize
= info
->reglist
.index
;
262 case AARCH64_OPND_QLF_S_H
:
263 /* Index encoded in "Q:S:size<1>". */
264 QSsize
= info
->reglist
.index
<< 1;
267 case AARCH64_OPND_QLF_S_S
:
268 /* Index encoded in "Q:S". */
269 QSsize
= info
->reglist
.index
<< 2;
272 case AARCH64_OPND_QLF_S_D
:
273 /* Index encoded in "Q". */
274 QSsize
= info
->reglist
.index
<< 3 | 0x1;
280 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
281 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
282 insert_field_2 (&field
, code
, opcodeh2
, 0);
287 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289 or SSHR <V><d>, <V><n>, #<shift>. */
291 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
292 const aarch64_opnd_info
*info
,
293 aarch64_insn
*code
, const aarch64_inst
*inst
)
295 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
298 if (inst
->opcode
->iclass
== asimdshf
)
302 0000 x SEE AdvSIMD modified immediate
311 Q
= (val
& 0x1) ? 1 : 0;
312 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
316 assert (info
->type
== AARCH64_OPND_IMM_VLSR
317 || info
->type
== AARCH64_OPND_IMM_VLSL
);
319 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
322 0000 SEE AdvSIMD modified immediate
323 0001 (16-UInt(immh:immb))
324 001x (32-UInt(immh:immb))
325 01xx (64-UInt(immh:immb))
326 1xxx (128-UInt(immh:immb)) */
327 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
331 0000 SEE AdvSIMD modified immediate
332 0001 (UInt(immh:immb)-8)
333 001x (UInt(immh:immb)-16)
334 01xx (UInt(immh:immb)-32)
335 1xxx (UInt(immh:immb)-64) */
336 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
337 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
342 /* Insert fields for e.g. the immediate operands in
343 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
345 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
347 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
351 imm
= info
->imm
.value
;
352 if (operand_need_shift_by_two (self
))
354 insert_all_fields (self
, code
, imm
);
358 /* Insert immediate and its shift amount for e.g. the last operand in
359 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
361 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
362 aarch64_insn
*code
, const aarch64_inst
*inst
)
365 aarch64_ins_imm (self
, info
, code
, inst
);
367 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
371 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
374 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
375 const aarch64_opnd_info
*info
,
377 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
379 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
380 uint64_t imm
= info
->imm
.value
;
381 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
382 int amount
= info
->shifter
.amount
;
383 aarch64_field field
= {0, 0};
385 /* a:b:c:d:e:f:g:h */
386 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
388 /* Either MOVI <Dd>, #<imm>
389 or MOVI <Vd>.2D, #<imm>.
390 <imm> is a 64-bit immediate
391 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392 encoded in "a:b:c:d:e:f:g:h". */
393 imm
= aarch64_shrink_expanded_imm8 (imm
);
394 assert ((int)imm
>= 0);
396 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
398 if (kind
== AARCH64_MOD_NONE
)
401 /* shift amount partially in cmode */
402 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
403 if (kind
== AARCH64_MOD_LSL
)
405 /* AARCH64_MOD_LSL: shift zeros. */
406 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
407 assert (esize
== 4 || esize
== 2 || esize
== 1);
408 /* For 8-bit move immediate, the optional LSL #0 does not require
414 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
416 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
420 /* AARCH64_MOD_MSL: shift ones. */
422 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
424 insert_field_2 (&field
, code
, amount
, 0);
429 /* Insert fields for an 8-bit floating-point immediate. */
431 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
433 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
435 insert_all_fields (self
, code
, info
->imm
.value
);
439 /* Insert 1-bit rotation immediate (#90 or #270). */
441 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
442 const aarch64_opnd_info
*info
,
443 aarch64_insn
*code
, const aarch64_inst
*inst
)
445 uint64_t rot
= (info
->imm
.value
- 90) / 180;
447 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
451 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
453 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
454 const aarch64_opnd_info
*info
,
455 aarch64_insn
*code
, const aarch64_inst
*inst
)
457 uint64_t rot
= info
->imm
.value
/ 90;
459 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
463 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
464 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
466 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
468 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
470 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
474 /* Insert arithmetic immediate for e.g. the last operand in
475 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
477 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
478 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
481 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
482 insert_field (self
->fields
[0], code
, value
, 0);
483 /* imm12 (unsigned) */
484 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
488 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
489 the operand should be inverted before encoding. */
491 aarch64_ins_limm_1 (const aarch64_operand
*self
,
492 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
493 const aarch64_inst
*inst
, bfd_boolean invert_p
)
496 uint64_t imm
= info
->imm
.value
;
497 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
501 /* The constraint check should have guaranteed this wouldn't happen. */
502 assert (aarch64_logical_immediate_p (imm
, esize
, &value
));
504 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
509 /* Insert logical/bitmask immediate for e.g. the last operand in
510 ORR <Wd|WSP>, <Wn>, #<imm>. */
512 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
513 aarch64_insn
*code
, const aarch64_inst
*inst
)
515 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
516 inst
->opcode
->op
== OP_BIC
);
519 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
521 aarch64_ins_inv_limm (const aarch64_operand
*self
,
522 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
523 const aarch64_inst
*inst
)
525 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
);
528 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
529 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
531 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
532 aarch64_insn
*code
, const aarch64_inst
*inst
)
534 aarch64_insn value
= 0;
536 assert (info
->idx
== 0);
539 aarch64_ins_regno (self
, info
, code
, inst
);
540 if (inst
->opcode
->iclass
== ldstpair_indexed
541 || inst
->opcode
->iclass
== ldstnapair_offs
542 || inst
->opcode
->iclass
== ldstpair_off
543 || inst
->opcode
->iclass
== loadlit
)
546 switch (info
->qualifier
)
548 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
549 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
550 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
553 insert_field (FLD_ldst_size
, code
, value
, 0);
558 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
559 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
565 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
567 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
568 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
569 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
572 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
576 /* Encode the address operand for e.g.
577 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
579 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
580 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
581 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
584 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
587 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
589 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
591 if (kind
== AARCH64_MOD_LSL
)
592 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
593 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
595 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
596 S
= info
->shifter
.amount
!= 0;
598 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
602 Must be #0 if <extend> is explicitly LSL. */
603 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
604 insert_field (FLD_S
, code
, S
, 0);
609 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
611 aarch64_ins_addr_simm (const aarch64_operand
*self
,
612 const aarch64_opnd_info
*info
,
614 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
619 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
620 /* simm (imm9 or imm7) */
621 imm
= info
->addr
.offset
.imm
;
622 if (self
->fields
[0] == FLD_imm7
)
623 /* scaled immediate in ld/st pair instructions.. */
624 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
625 insert_field (self
->fields
[0], code
, imm
, 0);
626 /* pre/post- index */
627 if (info
->addr
.writeback
)
629 assert (inst
->opcode
->iclass
!= ldst_unscaled
630 && inst
->opcode
->iclass
!= ldstnapair_offs
631 && inst
->opcode
->iclass
!= ldstpair_off
632 && inst
->opcode
->iclass
!= ldst_unpriv
);
633 assert (info
->addr
.preind
!= info
->addr
.postind
);
634 if (info
->addr
.preind
)
635 insert_field (self
->fields
[1], code
, 1, 0);
641 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
643 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
644 const aarch64_opnd_info
*info
,
646 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
651 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
653 imm
= info
->addr
.offset
.imm
>> 3;
654 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
655 insert_field (self
->fields
[2], code
, imm
, 0);
657 if (info
->addr
.writeback
)
659 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
660 insert_field (self
->fields
[3], code
, 1, 0);
665 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
667 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
668 const aarch64_opnd_info
*info
,
670 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
672 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
675 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
677 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
681 /* Encode the address operand for e.g.
682 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
684 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
685 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
686 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
689 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
691 if (info
->addr
.offset
.is_reg
)
692 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
694 insert_field (FLD_Rm
, code
, 0x1f, 0);
698 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
700 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
701 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
702 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
705 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
709 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
711 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
712 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
713 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
715 /* op0:op1:CRn:CRm:op2 */
716 insert_fields (code
, info
->sysreg
, inst
->opcode
->mask
, 5,
717 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
721 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
723 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
724 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
725 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
728 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
733 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
735 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
736 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
737 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
739 /* op1:CRn:CRm:op2 */
740 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
741 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
745 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
748 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
749 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
750 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
753 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
757 /* Encode the prefetch operation option operand for e.g.
758 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
761 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
762 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
763 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
766 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
770 /* Encode the hint number for instructions that alias HINT but take an
774 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
775 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
776 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
779 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
783 /* Encode the extended register operand for e.g.
784 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
786 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
787 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
788 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
790 enum aarch64_modifier_kind kind
;
793 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
795 kind
= info
->shifter
.kind
;
796 if (kind
== AARCH64_MOD_LSL
)
797 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
798 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
799 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
801 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
806 /* Encode the shifted register operand for e.g.
807 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
809 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
810 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
811 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
814 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
816 insert_field (FLD_shift
, code
,
817 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
819 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
824 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
825 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
826 SELF's operand-dependent value. fields[0] specifies the field that
827 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
829 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
830 const aarch64_opnd_info
*info
,
832 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
834 int factor
= 1 + get_operand_specific_data (self
);
835 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
836 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
840 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
841 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
842 SELF's operand-dependent value. fields[0] specifies the field that
843 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
845 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
846 const aarch64_opnd_info
*info
,
848 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
850 int factor
= 1 + get_operand_specific_data (self
);
851 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
852 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
856 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
857 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
858 SELF's operand-dependent value. fields[0] specifies the field that
859 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
860 and imm3 fields, with imm3 being the less-significant part. */
862 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
863 const aarch64_opnd_info
*info
,
865 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
867 int factor
= 1 + get_operand_specific_data (self
);
868 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
869 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
870 2, FLD_imm3
, FLD_SVE_imm6
);
874 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
875 is a 4-bit signed number and where <shift> is SELF's operand-dependent
876 value. fields[0] specifies the base register field. */
878 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
879 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
880 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
882 int factor
= 1 << get_operand_specific_data (self
);
883 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
884 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
888 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
889 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
890 value. fields[0] specifies the base register field. */
892 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
893 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
894 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
896 int factor
= 1 << get_operand_specific_data (self
);
897 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
898 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
902 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
903 is SELF's operand-dependent value. fields[0] specifies the base
904 register field and fields[1] specifies the offset register field. */
906 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
907 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
908 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
910 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
911 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
915 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
916 <shift> is SELF's operand-dependent value. fields[0] specifies the
917 base register field, fields[1] specifies the offset register field and
918 fields[2] is a single-bit field that selects SXTW over UXTW. */
920 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
921 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
922 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
924 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
925 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
926 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
927 insert_field (self
->fields
[2], code
, 0, 0);
929 insert_field (self
->fields
[2], code
, 1, 0);
933 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
934 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
935 fields[0] specifies the base register field. */
937 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
938 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
939 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
941 int factor
= 1 << get_operand_specific_data (self
);
942 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
943 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
947 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
948 where <modifier> is fixed by the instruction and where <msz> is a
949 2-bit unsigned number. fields[0] specifies the base register field
950 and fields[1] specifies the offset register field. */
952 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
953 const aarch64_opnd_info
*info
, aarch64_insn
*code
)
955 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
956 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
957 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
961 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
962 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
963 field and fields[1] specifies the offset register field. */
965 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
966 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
967 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
969 return aarch64_ext_sve_addr_zz (self
, info
, code
);
972 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
973 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
974 field and fields[1] specifies the offset register field. */
976 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
977 const aarch64_opnd_info
*info
,
979 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
981 return aarch64_ext_sve_addr_zz (self
, info
, code
);
984 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
985 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
986 field and fields[1] specifies the offset register field. */
988 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
989 const aarch64_opnd_info
*info
,
991 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
993 return aarch64_ext_sve_addr_zz (self
, info
, code
);
996 /* Encode an SVE ADD/SUB immediate. */
998 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
999 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1000 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1002 if (info
->shifter
.amount
== 8)
1003 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1004 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1005 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1007 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1011 /* Encode an SVE CPY/DUP immediate. */
1013 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1014 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1015 const aarch64_inst
*inst
)
1017 return aarch64_ins_sve_aimm (self
, info
, code
, inst
);
1020 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1021 array specifies which field to use for Zn. MM is encoded in the
1022 concatenation of imm5 and SVE_tszh, with imm5 being the less
1023 significant part. */
1025 aarch64_ins_sve_index (const aarch64_operand
*self
,
1026 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1027 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1029 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1030 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1031 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1032 2, FLD_imm5
, FLD_SVE_tszh
);
1036 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1038 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1039 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1040 const aarch64_inst
*inst
)
1042 return aarch64_ins_limm (self
, info
, code
, inst
);
1045 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1046 and where MM occupies the most-significant part. The operand-dependent
1047 value specifies the number of bits in Zn. */
1049 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1050 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1051 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1053 unsigned int reg_bits
= get_operand_specific_data (self
);
1054 assert (info
->reglane
.regno
< (1U << reg_bits
));
1055 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1056 insert_all_fields (self
, code
, val
);
1060 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1063 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1064 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1065 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1067 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1071 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1072 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1075 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1076 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1077 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1079 insert_all_fields (self
, code
, info
->imm
.value
);
1080 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1084 /* Encode an SVE shift left immediate. */
1086 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1087 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1088 const aarch64_inst
*inst
)
1090 const aarch64_opnd_info
*prev_operand
;
1093 assert (info
->idx
> 0);
1094 prev_operand
= &inst
->operands
[info
->idx
- 1];
1095 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1096 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1100 /* Encode an SVE shift right immediate. */
1102 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1103 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1104 const aarch64_inst
*inst
)
1106 const aarch64_opnd_info
*prev_operand
;
1109 assert (info
->idx
> 0);
1110 prev_operand
= &inst
->operands
[info
->idx
- 1];
1111 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1112 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1116 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1117 The fields array specifies which field to use. */
1119 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1120 const aarch64_opnd_info
*info
,
1122 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1124 if (info
->imm
.value
== 0x3f000000)
1125 insert_field (self
->fields
[0], code
, 0, 0);
1127 insert_field (self
->fields
[0], code
, 1, 0);
1131 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1132 The fields array specifies which field to use. */
1134 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1135 const aarch64_opnd_info
*info
,
1137 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1139 if (info
->imm
.value
== 0x3f000000)
1140 insert_field (self
->fields
[0], code
, 0, 0);
1142 insert_field (self
->fields
[0], code
, 1, 0);
1146 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1147 The fields array specifies which field to use. */
1149 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1150 const aarch64_opnd_info
*info
,
1152 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1154 if (info
->imm
.value
== 0)
1155 insert_field (self
->fields
[0], code
, 0, 0);
1157 insert_field (self
->fields
[0], code
, 1, 0);
1161 /* Miscellaneous encoding functions. */
1163 /* Encode size[0], i.e. bit 22, for
1164 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1167 encode_asimd_fcvt (aarch64_inst
*inst
)
1170 aarch64_field field
= {0, 0};
1171 enum aarch64_opnd_qualifier qualifier
;
1173 switch (inst
->opcode
->op
)
1177 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1178 qualifier
= inst
->operands
[1].qualifier
;
1182 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1183 qualifier
= inst
->operands
[0].qualifier
;
1188 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1189 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1190 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1191 gen_sub_field (FLD_size
, 0, 1, &field
);
1192 insert_field_2 (&field
, &inst
->value
, value
, 0);
1195 /* Encode size[0], i.e. bit 22, for
1196 e.g. FCVTXN <Vb><d>, <Va><n>. */
1199 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1201 aarch64_insn val
= 1;
1202 aarch64_field field
= {0, 0};
1203 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1204 gen_sub_field (FLD_size
, 0, 1, &field
);
1205 insert_field_2 (&field
, &inst
->value
, val
, 0);
1208 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1210 encode_fcvt (aarch64_inst
*inst
)
1213 const aarch64_field field
= {15, 2};
1216 switch (inst
->operands
[0].qualifier
)
1218 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1219 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1220 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1223 insert_field_2 (&field
, &inst
->value
, val
, 0);
1228 /* Return the index in qualifiers_list that INST is using. Should only
1229 be called once the qualifiers are known to be valid. */
1232 aarch64_get_variant (struct aarch64_inst
*inst
)
1234 int i
, nops
, variant
;
1236 nops
= aarch64_num_of_operands (inst
->opcode
);
1237 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1239 for (i
= 0; i
< nops
; ++i
)
1240 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1241 != inst
->operands
[i
].qualifier
)
1249 /* Do miscellaneous encodings that are not common enough to be driven by
1253 do_misc_encoding (aarch64_inst
*inst
)
1257 switch (inst
->opcode
->op
)
1266 encode_asimd_fcvt (inst
);
1269 encode_asisd_fcvtxn (inst
);
1273 /* Copy Pn to Pm and Pg. */
1274 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1275 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1276 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1279 /* Copy Zd to Zm. */
1280 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1281 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1284 /* Fill in the zero immediate. */
1285 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1286 2, FLD_imm5
, FLD_SVE_tszh
);
1289 /* Copy Zn to Zm. */
1290 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1291 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1296 /* Copy Pd to Pm. */
1297 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1298 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1300 case OP_MOVZS_P_P_P
:
1302 /* Copy Pn to Pm. */
1303 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1304 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1306 case OP_NOTS_P_P_P_Z
:
1307 case OP_NOT_P_P_P_Z
:
1308 /* Copy Pg to Pm. */
1309 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1310 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1316 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1318 encode_sizeq (aarch64_inst
*inst
)
1321 enum aarch64_field_kind kind
;
1324 /* Get the index of the operand whose information we are going to use
1325 to encode the size and Q fields.
1326 This is deduced from the possible valid qualifier lists. */
1327 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1328 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1329 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1330 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1332 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1334 if (inst
->opcode
->iclass
== asisdlse
1335 || inst
->opcode
->iclass
== asisdlsep
1336 || inst
->opcode
->iclass
== asisdlso
1337 || inst
->opcode
->iclass
== asisdlsop
)
1338 kind
= FLD_vldst_size
;
1341 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1344 /* Opcodes that have fields shared by multiple operands are usually flagged
1345 with flags. In this function, we detect such flags and use the
1346 information in one of the related operands to do the encoding. The 'one'
1347 operand is not any operand but one of the operands that has the enough
1348 information for such an encoding. */
1351 do_special_encoding (struct aarch64_inst
*inst
)
1354 aarch64_insn value
= 0;
1356 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1358 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1359 if (inst
->opcode
->flags
& F_COND
)
1361 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1363 if (inst
->opcode
->flags
& F_SF
)
1365 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1366 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1367 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1369 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1370 if (inst
->opcode
->flags
& F_N
)
1371 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1373 if (inst
->opcode
->flags
& F_LSE_SZ
)
1375 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1376 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1377 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1379 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1381 if (inst
->opcode
->flags
& F_SIZEQ
)
1382 encode_sizeq (inst
);
1383 if (inst
->opcode
->flags
& F_FPTYPE
)
1385 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1386 switch (inst
->operands
[idx
].qualifier
)
1388 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1389 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1390 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1391 default: assert (0);
1393 insert_field (FLD_type
, &inst
->value
, value
, 0);
1395 if (inst
->opcode
->flags
& F_SSIZE
)
1397 enum aarch64_opnd_qualifier qualifier
;
1398 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1399 qualifier
= inst
->operands
[idx
].qualifier
;
1400 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1401 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1402 value
= aarch64_get_qualifier_standard_value (qualifier
);
1403 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1405 if (inst
->opcode
->flags
& F_T
)
1407 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1408 aarch64_field field
= {0, 0};
1409 enum aarch64_opnd_qualifier qualifier
;
1412 qualifier
= inst
->operands
[idx
].qualifier
;
1413 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1414 == AARCH64_OPND_CLASS_SIMD_REG
1415 && qualifier
>= AARCH64_OPND_QLF_V_8B
1416 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1427 value
= aarch64_get_qualifier_standard_value (qualifier
);
1428 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1429 num
= (int) value
>> 1;
1430 assert (num
>= 0 && num
<= 3);
1431 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1432 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1434 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1436 /* Use Rt to encode in the case of e.g.
1437 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1438 enum aarch64_opnd_qualifier qualifier
;
1439 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1441 /* Otherwise use the result operand, which has to be a integer
1444 assert (idx
== 0 || idx
== 1);
1445 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1446 == AARCH64_OPND_CLASS_INT_REG
);
1447 qualifier
= inst
->operands
[idx
].qualifier
;
1448 insert_field (FLD_Q
, &inst
->value
,
1449 aarch64_get_qualifier_standard_value (qualifier
), 0);
1451 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1453 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1454 enum aarch64_opnd_qualifier qualifier
;
1455 aarch64_field field
= {0, 0};
1456 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1457 == AARCH64_OPND_CLASS_INT_REG
);
1458 gen_sub_field (FLD_opc
, 0, 1, &field
);
1459 qualifier
= inst
->operands
[0].qualifier
;
1460 insert_field_2 (&field
, &inst
->value
,
1461 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1463 /* Miscellaneous encoding as the last step. */
1464 if (inst
->opcode
->flags
& F_MISC
)
1465 do_misc_encoding (inst
);
1467 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1470 /* Some instructions (including all SVE ones) use the instruction class
1471 to describe how a qualifiers_list index is represented in the instruction
1472 encoding. If INST is such an instruction, encode the chosen qualifier
1476 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1478 switch (inst
->opcode
->iclass
)
1481 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1482 0, 2, FLD_SVE_M_14
, FLD_size
);
1486 case sve_shift_pred
:
1487 case sve_shift_unpred
:
1488 /* For indices and shift amounts, the variant is encoded as
1489 part of the immediate. */
1493 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1494 and depend on the immediate. They don't have a separate
1499 /* sve_misc instructions have only a single variant. */
1503 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1504 0, 2, FLD_SVE_M_16
, FLD_size
);
1508 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1513 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1517 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1521 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1529 /* Converters converting an alias opcode instruction to its real form. */
1531 /* ROR <Wd>, <Ws>, #<shift>
1533 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1535 convert_ror_to_extr (aarch64_inst
*inst
)
1537 copy_operand_info (inst
, 3, 2);
1538 copy_operand_info (inst
, 2, 1);
1541 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1543 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1545 convert_xtl_to_shll (aarch64_inst
*inst
)
1547 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1548 inst
->operands
[2].imm
.value
= 0;
1552 LSR <Xd>, <Xn>, #<shift>
1554 UBFM <Xd>, <Xn>, #<shift>, #63. */
1556 convert_sr_to_bfm (aarch64_inst
*inst
)
1558 inst
->operands
[3].imm
.value
=
1559 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1562 /* Convert MOV to ORR. */
1564 convert_mov_to_orr (aarch64_inst
*inst
)
1566 /* MOV <Vd>.<T>, <Vn>.<T>
1568 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1569 copy_operand_info (inst
, 2, 1);
1572 /* When <imms> >= <immr>, the instruction written:
1573 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1575 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1578 convert_bfx_to_bfm (aarch64_inst
*inst
)
1582 /* Convert the operand. */
1583 lsb
= inst
->operands
[2].imm
.value
;
1584 width
= inst
->operands
[3].imm
.value
;
1585 inst
->operands
[2].imm
.value
= lsb
;
1586 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1589 /* When <imms> < <immr>, the instruction written:
1590 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1592 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1595 convert_bfi_to_bfm (aarch64_inst
*inst
)
1599 /* Convert the operand. */
1600 lsb
= inst
->operands
[2].imm
.value
;
1601 width
= inst
->operands
[3].imm
.value
;
1602 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1604 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1605 inst
->operands
[3].imm
.value
= width
- 1;
1609 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1610 inst
->operands
[3].imm
.value
= width
- 1;
1614 /* The instruction written:
1615 BFC <Xd>, #<lsb>, #<width>
1617 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1620 convert_bfc_to_bfm (aarch64_inst
*inst
)
1625 copy_operand_info (inst
, 3, 2);
1626 copy_operand_info (inst
, 2, 1);
1627 copy_operand_info (inst
, 1, 0);
1628 inst
->operands
[1].reg
.regno
= 0x1f;
1630 /* Convert the immediate operand. */
1631 lsb
= inst
->operands
[2].imm
.value
;
1632 width
= inst
->operands
[3].imm
.value
;
1633 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1635 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1636 inst
->operands
[3].imm
.value
= width
- 1;
1640 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1641 inst
->operands
[3].imm
.value
= width
- 1;
1645 /* The instruction written:
1646 LSL <Xd>, <Xn>, #<shift>
1648 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1651 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1653 int64_t shift
= inst
->operands
[2].imm
.value
;
1655 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1657 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1658 inst
->operands
[3].imm
.value
= 31 - shift
;
1662 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1663 inst
->operands
[3].imm
.value
= 63 - shift
;
1667 /* CINC <Wd>, <Wn>, <cond>
1669 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1672 convert_to_csel (aarch64_inst
*inst
)
1674 copy_operand_info (inst
, 3, 2);
1675 copy_operand_info (inst
, 2, 1);
1676 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1679 /* CSET <Wd>, <cond>
1681 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1684 convert_cset_to_csinc (aarch64_inst
*inst
)
1686 copy_operand_info (inst
, 3, 1);
1687 copy_operand_info (inst
, 2, 0);
1688 copy_operand_info (inst
, 1, 0);
1689 inst
->operands
[1].reg
.regno
= 0x1f;
1690 inst
->operands
[2].reg
.regno
= 0x1f;
1691 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1696 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1699 convert_mov_to_movewide (aarch64_inst
*inst
)
1702 uint32_t shift_amount
;
1705 switch (inst
->opcode
->op
)
1707 case OP_MOV_IMM_WIDE
:
1708 value
= inst
->operands
[1].imm
.value
;
1710 case OP_MOV_IMM_WIDEN
:
1711 value
= ~inst
->operands
[1].imm
.value
;
1716 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1717 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1718 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1719 /* The constraint check should have guaranteed this wouldn't happen. */
1721 value
>>= shift_amount
;
1723 inst
->operands
[1].imm
.value
= value
;
1724 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1725 inst
->operands
[1].shifter
.amount
= shift_amount
;
1730 ORR <Wd>, WZR, #<imm>. */
1733 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1735 copy_operand_info (inst
, 2, 1);
1736 inst
->operands
[1].reg
.regno
= 0x1f;
1737 inst
->operands
[1].skip
= 0;
1740 /* Some alias opcodes are assembled by being converted to their real-form. */
1743 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1745 const aarch64_opcode
*alias
= inst
->opcode
;
1747 if ((alias
->flags
& F_CONV
) == 0)
1748 goto convert_to_real_return
;
1754 convert_sr_to_bfm (inst
);
1757 convert_lsl_to_ubfm (inst
);
1762 convert_to_csel (inst
);
1766 convert_cset_to_csinc (inst
);
1771 convert_bfx_to_bfm (inst
);
1776 convert_bfi_to_bfm (inst
);
1779 convert_bfc_to_bfm (inst
);
1782 convert_mov_to_orr (inst
);
1784 case OP_MOV_IMM_WIDE
:
1785 case OP_MOV_IMM_WIDEN
:
1786 convert_mov_to_movewide (inst
);
1788 case OP_MOV_IMM_LOG
:
1789 convert_mov_to_movebitmask (inst
);
1792 convert_ror_to_extr (inst
);
1798 convert_xtl_to_shll (inst
);
1804 convert_to_real_return
:
1805 aarch64_replace_opcode (inst
, real
);
1808 /* Encode *INST_ORI of the opcode code OPCODE.
1809 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1810 matched operand qualifier sequence in *QLF_SEQ. */
1813 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1814 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1815 aarch64_opnd_qualifier_t
*qlf_seq
,
1816 aarch64_operand_error
*mismatch_detail
)
1819 const aarch64_opcode
*aliased
;
1820 aarch64_inst copy
, *inst
;
1822 DEBUG_TRACE ("enter with %s", opcode
->name
);
1824 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1828 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1829 if (inst
->opcode
== NULL
)
1830 inst
->opcode
= opcode
;
1832 /* Constrain the operands.
1833 After passing this, the encoding is guaranteed to succeed. */
1834 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1836 DEBUG_TRACE ("FAIL since operand constraint not met");
1840 /* Get the base value.
1841 Note: this has to be before the aliasing handling below in order to
1842 get the base value from the alias opcode before we move on to the
1843 aliased opcode for encoding. */
1844 inst
->value
= opcode
->opcode
;
1846 /* No need to do anything else if the opcode does not have any operand. */
1847 if (aarch64_num_of_operands (opcode
) == 0)
1850 /* Assign operand indexes and check types. Also put the matched
1851 operand qualifiers in *QLF_SEQ to return. */
1852 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1854 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1855 inst
->operands
[i
].idx
= i
;
1856 if (qlf_seq
!= NULL
)
1857 *qlf_seq
= inst
->operands
[i
].qualifier
;
1860 aliased
= aarch64_find_real_opcode (opcode
);
1861 /* If the opcode is an alias and it does not ask for direct encoding by
1862 itself, the instruction will be transformed to the form of real opcode
1863 and the encoding will be carried out using the rules for the aliased
1865 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1867 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1868 aliased
->name
, opcode
->name
);
1869 /* Convert the operands to the form of the real opcode. */
1870 convert_to_real (inst
, aliased
);
1874 aarch64_opnd_info
*info
= inst
->operands
;
1876 /* Call the inserter of each operand. */
1877 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1879 const aarch64_operand
*opnd
;
1880 enum aarch64_opnd type
= opcode
->operands
[i
];
1881 if (type
== AARCH64_OPND_NIL
)
1885 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1888 opnd
= &aarch64_operands
[type
];
1889 if (operand_has_inserter (opnd
))
1890 aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
);
1893 /* Call opcode encoders indicated by flags. */
1894 if (opcode_has_special_coder (opcode
))
1895 do_special_encoding (inst
);
1897 /* Possibly use the instruction class to encode the chosen qualifier
1899 aarch64_encode_variant_using_iclass (inst
);
1902 DEBUG_TRACE ("exit with %s", opcode
->name
);
1904 *code
= inst
->value
;