1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2017 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
42 const aarch64_field
*field
;
43 enum aarch64_field_kind kind
;
47 num
= va_arg (va
, uint32_t);
51 kind
= va_arg (va
, enum aarch64_field_kind
);
52 field
= &fields
[kind
];
53 insert_field (kind
, code
, value
, mask
);
54 value
>>= field
->width
;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
67 enum aarch64_field_kind kind
;
69 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
70 if (self
->fields
[i
] != FLD_NIL
)
72 kind
= self
->fields
[i
];
73 insert_field (kind
, code
, value
, 0);
74 value
>>= fields
[kind
].width
;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
84 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
86 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
95 aarch64_insn
*code
, const aarch64_inst
*inst
)
98 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
99 /* index and/or type */
100 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
102 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
103 if (info
->type
== AARCH64_OPND_En
104 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info
->idx
== 1); /* Vn */
108 aarch64_insn value
= info
->reglane
.index
<< pos
;
109 insert_field (FLD_imm4
, code
, value
, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
121 insert_field (FLD_imm5
, code
, value
, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 unsigned reglane_index
= info
->reglane
.index
;
130 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
131 /* Complex operand takes two elements. */
134 switch (info
->qualifier
)
136 case AARCH64_OPND_QLF_S_H
:
138 assert (reglane_index
< 8);
139 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
141 case AARCH64_OPND_QLF_S_S
:
143 assert (reglane_index
< 4);
144 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
146 case AARCH64_OPND_QLF_S_D
:
148 assert (reglane_index
< 2);
149 insert_field (FLD_H
, code
, reglane_index
, 0);
158 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
160 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
162 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
165 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
167 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
171 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
172 in AdvSIMD load/store instructions. */
174 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
175 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
176 const aarch64_inst
*inst
)
178 aarch64_insn value
= 0;
179 /* Number of elements in each structure to be loaded/stored. */
180 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
183 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
188 switch (info
->reglist
.num_regs
)
190 case 1: value
= 0x7; break;
191 case 2: value
= 0xa; break;
192 case 3: value
= 0x6; break;
193 case 4: value
= 0x2; break;
198 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
209 insert_field (FLD_opcode
, code
, value
, 0);
214 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215 single structure to all lanes instructions. */
217 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
218 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
219 const aarch64_inst
*inst
)
222 /* The opcode dependent area stores the number of elements in
223 each structure to be loaded/stored. */
224 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
227 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
229 value
= (aarch64_insn
) 0;
230 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
231 /* OP_LD1R does not have alternating variant, but have "two consecutive"
233 value
= (aarch64_insn
) 1;
234 insert_field (FLD_S
, code
, value
, 0);
239 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240 operand e.g. Vt in AdvSIMD load/store single element instructions. */
242 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
243 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
244 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
246 aarch64_field field
= {0, 0};
247 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
248 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
250 assert (info
->reglist
.has_index
);
253 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
254 /* Encode the index, opcode<2:1> and size. */
255 switch (info
->qualifier
)
257 case AARCH64_OPND_QLF_S_B
:
258 /* Index encoded in "Q:S:size". */
259 QSsize
= info
->reglist
.index
;
262 case AARCH64_OPND_QLF_S_H
:
263 /* Index encoded in "Q:S:size<1>". */
264 QSsize
= info
->reglist
.index
<< 1;
267 case AARCH64_OPND_QLF_S_S
:
268 /* Index encoded in "Q:S". */
269 QSsize
= info
->reglist
.index
<< 2;
272 case AARCH64_OPND_QLF_S_D
:
273 /* Index encoded in "Q". */
274 QSsize
= info
->reglist
.index
<< 3 | 0x1;
280 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
281 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
282 insert_field_2 (&field
, code
, opcodeh2
, 0);
287 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289 or SSHR <V><d>, <V><n>, #<shift>. */
291 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
292 const aarch64_opnd_info
*info
,
293 aarch64_insn
*code
, const aarch64_inst
*inst
)
295 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
298 if (inst
->opcode
->iclass
== asimdshf
)
302 0000 x SEE AdvSIMD modified immediate
311 Q
= (val
& 0x1) ? 1 : 0;
312 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
316 assert (info
->type
== AARCH64_OPND_IMM_VLSR
317 || info
->type
== AARCH64_OPND_IMM_VLSL
);
319 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
322 0000 SEE AdvSIMD modified immediate
323 0001 (16-UInt(immh:immb))
324 001x (32-UInt(immh:immb))
325 01xx (64-UInt(immh:immb))
326 1xxx (128-UInt(immh:immb)) */
327 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
331 0000 SEE AdvSIMD modified immediate
332 0001 (UInt(immh:immb)-8)
333 001x (UInt(immh:immb)-16)
334 01xx (UInt(immh:immb)-32)
335 1xxx (UInt(immh:immb)-64) */
336 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
337 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
342 /* Insert fields for e.g. the immediate operands in
343 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
345 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
347 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
351 imm
= info
->imm
.value
;
352 if (operand_need_shift_by_two (self
))
354 insert_all_fields (self
, code
, imm
);
358 /* Insert immediate and its shift amount for e.g. the last operand in
359 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
361 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
362 aarch64_insn
*code
, const aarch64_inst
*inst
)
365 aarch64_ins_imm (self
, info
, code
, inst
);
367 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
371 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
374 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
375 const aarch64_opnd_info
*info
,
377 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
379 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
380 uint64_t imm
= info
->imm
.value
;
381 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
382 int amount
= info
->shifter
.amount
;
383 aarch64_field field
= {0, 0};
385 /* a:b:c:d:e:f:g:h */
386 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
388 /* Either MOVI <Dd>, #<imm>
389 or MOVI <Vd>.2D, #<imm>.
390 <imm> is a 64-bit immediate
391 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392 encoded in "a:b:c:d:e:f:g:h". */
393 imm
= aarch64_shrink_expanded_imm8 (imm
);
394 assert ((int)imm
>= 0);
396 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
398 if (kind
== AARCH64_MOD_NONE
)
401 /* shift amount partially in cmode */
402 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
403 if (kind
== AARCH64_MOD_LSL
)
405 /* AARCH64_MOD_LSL: shift zeros. */
406 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
407 assert (esize
== 4 || esize
== 2 || esize
== 1);
408 /* For 8-bit move immediate, the optional LSL #0 does not require
414 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
416 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
420 /* AARCH64_MOD_MSL: shift ones. */
422 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
424 insert_field_2 (&field
, code
, amount
, 0);
429 /* Insert fields for an 8-bit floating-point immediate. */
431 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
433 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
435 insert_all_fields (self
, code
, info
->imm
.value
);
439 /* Insert 1-bit rotation immediate (#90 or #270). */
441 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
442 const aarch64_opnd_info
*info
,
443 aarch64_insn
*code
, const aarch64_inst
*inst
)
445 uint64_t rot
= (info
->imm
.value
- 90) / 180;
447 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
451 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
453 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
454 const aarch64_opnd_info
*info
,
455 aarch64_insn
*code
, const aarch64_inst
*inst
)
457 uint64_t rot
= info
->imm
.value
/ 90;
459 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
463 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
464 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
466 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
468 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
470 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
474 /* Insert arithmetic immediate for e.g. the last operand in
475 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
477 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
478 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
481 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
482 insert_field (self
->fields
[0], code
, value
, 0);
483 /* imm12 (unsigned) */
484 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
488 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
489 the operand should be inverted before encoding. */
491 aarch64_ins_limm_1 (const aarch64_operand
*self
,
492 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
493 const aarch64_inst
*inst
, bfd_boolean invert_p
)
496 uint64_t imm
= info
->imm
.value
;
497 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
501 if (aarch64_logical_immediate_p (imm
, esize
, &value
) == FALSE
)
502 /* The constraint check should have guaranteed this wouldn't happen. */
505 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
510 /* Insert logical/bitmask immediate for e.g. the last operand in
511 ORR <Wd|WSP>, <Wn>, #<imm>. */
513 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
514 aarch64_insn
*code
, const aarch64_inst
*inst
)
516 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
517 inst
->opcode
->op
== OP_BIC
);
520 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
522 aarch64_ins_inv_limm (const aarch64_operand
*self
,
523 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
524 const aarch64_inst
*inst
)
526 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
);
529 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
530 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
532 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
533 aarch64_insn
*code
, const aarch64_inst
*inst
)
535 aarch64_insn value
= 0;
537 assert (info
->idx
== 0);
540 aarch64_ins_regno (self
, info
, code
, inst
);
541 if (inst
->opcode
->iclass
== ldstpair_indexed
542 || inst
->opcode
->iclass
== ldstnapair_offs
543 || inst
->opcode
->iclass
== ldstpair_off
544 || inst
->opcode
->iclass
== loadlit
)
547 switch (info
->qualifier
)
549 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
550 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
551 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
554 insert_field (FLD_ldst_size
, code
, value
, 0);
559 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
560 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
566 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
568 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
569 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
570 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
573 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
577 /* Encode the address operand for e.g.
578 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
580 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
581 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
582 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
585 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
588 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
590 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
592 if (kind
== AARCH64_MOD_LSL
)
593 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
594 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
596 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
597 S
= info
->shifter
.amount
!= 0;
599 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
603 Must be #0 if <extend> is explicitly LSL. */
604 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
605 insert_field (FLD_S
, code
, S
, 0);
610 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
612 aarch64_ins_addr_simm (const aarch64_operand
*self
,
613 const aarch64_opnd_info
*info
,
615 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
620 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
621 /* simm (imm9 or imm7) */
622 imm
= info
->addr
.offset
.imm
;
623 if (self
->fields
[0] == FLD_imm7
)
624 /* scaled immediate in ld/st pair instructions.. */
625 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
626 insert_field (self
->fields
[0], code
, imm
, 0);
627 /* pre/post- index */
628 if (info
->addr
.writeback
)
630 assert (inst
->opcode
->iclass
!= ldst_unscaled
631 && inst
->opcode
->iclass
!= ldstnapair_offs
632 && inst
->opcode
->iclass
!= ldstpair_off
633 && inst
->opcode
->iclass
!= ldst_unpriv
);
634 assert (info
->addr
.preind
!= info
->addr
.postind
);
635 if (info
->addr
.preind
)
636 insert_field (self
->fields
[1], code
, 1, 0);
642 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
644 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
645 const aarch64_opnd_info
*info
,
647 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
652 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
654 imm
= info
->addr
.offset
.imm
>> 3;
655 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
656 insert_field (self
->fields
[2], code
, imm
, 0);
658 if (info
->addr
.writeback
)
660 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
661 insert_field (self
->fields
[3], code
, 1, 0);
666 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
668 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
669 const aarch64_opnd_info
*info
,
671 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
673 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
676 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
678 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
682 /* Encode the address operand for e.g.
683 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
685 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
686 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
687 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
690 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
692 if (info
->addr
.offset
.is_reg
)
693 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
695 insert_field (FLD_Rm
, code
, 0x1f, 0);
699 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
701 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
702 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
703 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
706 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
710 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
712 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
713 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
714 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
716 /* op0:op1:CRn:CRm:op2 */
717 insert_fields (code
, info
->sysreg
, inst
->opcode
->mask
, 5,
718 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
722 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
724 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
725 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
726 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
729 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
734 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
736 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
737 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
738 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
740 /* op1:CRn:CRm:op2 */
741 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
742 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
746 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
749 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
750 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
751 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
754 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
758 /* Encode the prefetch operation option operand for e.g.
759 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
762 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
763 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
764 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
767 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
771 /* Encode the hint number for instructions that alias HINT but take an
775 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
776 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
777 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
780 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
784 /* Encode the extended register operand for e.g.
785 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
787 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
788 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
789 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
791 enum aarch64_modifier_kind kind
;
794 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
796 kind
= info
->shifter
.kind
;
797 if (kind
== AARCH64_MOD_LSL
)
798 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
799 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
800 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
802 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
807 /* Encode the shifted register operand for e.g.
808 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
810 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
811 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
812 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
815 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
817 insert_field (FLD_shift
, code
,
818 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
820 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
825 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
826 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
827 SELF's operand-dependent value. fields[0] specifies the field that
828 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
830 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
831 const aarch64_opnd_info
*info
,
833 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
835 int factor
= 1 + get_operand_specific_data (self
);
836 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
837 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
841 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
842 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
843 SELF's operand-dependent value. fields[0] specifies the field that
844 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
846 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
847 const aarch64_opnd_info
*info
,
849 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
851 int factor
= 1 + get_operand_specific_data (self
);
852 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
853 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
857 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
858 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
859 SELF's operand-dependent value. fields[0] specifies the field that
860 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
861 and imm3 fields, with imm3 being the less-significant part. */
863 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
864 const aarch64_opnd_info
*info
,
866 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
868 int factor
= 1 + get_operand_specific_data (self
);
869 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
870 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
871 2, FLD_imm3
, FLD_SVE_imm6
);
875 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
876 is a 4-bit signed number and where <shift> is SELF's operand-dependent
877 value. fields[0] specifies the base register field. */
879 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
880 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
881 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
883 int factor
= 1 << get_operand_specific_data (self
);
884 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
885 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
889 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
890 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
891 value. fields[0] specifies the base register field. */
893 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
894 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
895 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
897 int factor
= 1 << get_operand_specific_data (self
);
898 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
899 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
903 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
904 is SELF's operand-dependent value. fields[0] specifies the base
905 register field and fields[1] specifies the offset register field. */
907 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
908 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
909 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
911 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
912 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
916 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
917 <shift> is SELF's operand-dependent value. fields[0] specifies the
918 base register field, fields[1] specifies the offset register field and
919 fields[2] is a single-bit field that selects SXTW over UXTW. */
921 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
922 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
923 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
925 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
926 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
927 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
928 insert_field (self
->fields
[2], code
, 0, 0);
930 insert_field (self
->fields
[2], code
, 1, 0);
934 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
935 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
936 fields[0] specifies the base register field. */
938 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
939 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
940 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
942 int factor
= 1 << get_operand_specific_data (self
);
943 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
944 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
948 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
949 where <modifier> is fixed by the instruction and where <msz> is a
950 2-bit unsigned number. fields[0] specifies the base register field
951 and fields[1] specifies the offset register field. */
953 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
954 const aarch64_opnd_info
*info
, aarch64_insn
*code
)
956 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
957 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
958 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
962 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
963 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
964 field and fields[1] specifies the offset register field. */
966 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
967 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
968 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
970 return aarch64_ext_sve_addr_zz (self
, info
, code
);
973 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
974 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
975 field and fields[1] specifies the offset register field. */
977 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
978 const aarch64_opnd_info
*info
,
980 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
982 return aarch64_ext_sve_addr_zz (self
, info
, code
);
985 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
986 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
987 field and fields[1] specifies the offset register field. */
989 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
990 const aarch64_opnd_info
*info
,
992 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
994 return aarch64_ext_sve_addr_zz (self
, info
, code
);
997 /* Encode an SVE ADD/SUB immediate. */
999 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1000 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1001 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1003 if (info
->shifter
.amount
== 8)
1004 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1005 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1006 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1008 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1012 /* Encode an SVE CPY/DUP immediate. */
1014 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1015 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1016 const aarch64_inst
*inst
)
1018 return aarch64_ins_sve_aimm (self
, info
, code
, inst
);
1021 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1022 array specifies which field to use for Zn. MM is encoded in the
1023 concatenation of imm5 and SVE_tszh, with imm5 being the less
1024 significant part. */
1026 aarch64_ins_sve_index (const aarch64_operand
*self
,
1027 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1028 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1030 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1031 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1032 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1033 2, FLD_imm5
, FLD_SVE_tszh
);
1037 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1039 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1040 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1041 const aarch64_inst
*inst
)
1043 return aarch64_ins_limm (self
, info
, code
, inst
);
1046 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1047 and where MM occupies the most-significant part. The operand-dependent
1048 value specifies the number of bits in Zn. */
1050 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1051 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1052 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1054 unsigned int reg_bits
= get_operand_specific_data (self
);
1055 assert (info
->reglane
.regno
< (1U << reg_bits
));
1056 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1057 insert_all_fields (self
, code
, val
);
1061 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1064 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1065 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1066 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1068 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1072 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1073 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1076 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1077 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1078 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1080 insert_all_fields (self
, code
, info
->imm
.value
);
1081 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1085 /* Encode an SVE shift left immediate. */
1087 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1088 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1089 const aarch64_inst
*inst
)
1091 const aarch64_opnd_info
*prev_operand
;
1094 assert (info
->idx
> 0);
1095 prev_operand
= &inst
->operands
[info
->idx
- 1];
1096 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1097 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1101 /* Encode an SVE shift right immediate. */
1103 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1104 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1105 const aarch64_inst
*inst
)
1107 const aarch64_opnd_info
*prev_operand
;
1110 assert (info
->idx
> 0);
1111 prev_operand
= &inst
->operands
[info
->idx
- 1];
1112 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1113 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1117 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1118 The fields array specifies which field to use. */
1120 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1121 const aarch64_opnd_info
*info
,
1123 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1125 if (info
->imm
.value
== 0x3f000000)
1126 insert_field (self
->fields
[0], code
, 0, 0);
1128 insert_field (self
->fields
[0], code
, 1, 0);
1132 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1133 The fields array specifies which field to use. */
1135 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1136 const aarch64_opnd_info
*info
,
1138 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1140 if (info
->imm
.value
== 0x3f000000)
1141 insert_field (self
->fields
[0], code
, 0, 0);
1143 insert_field (self
->fields
[0], code
, 1, 0);
1147 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1148 The fields array specifies which field to use. */
1150 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1151 const aarch64_opnd_info
*info
,
1153 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1155 if (info
->imm
.value
== 0)
1156 insert_field (self
->fields
[0], code
, 0, 0);
1158 insert_field (self
->fields
[0], code
, 1, 0);
1162 /* Miscellaneous encoding functions. */
1164 /* Encode size[0], i.e. bit 22, for
1165 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1168 encode_asimd_fcvt (aarch64_inst
*inst
)
1171 aarch64_field field
= {0, 0};
1172 enum aarch64_opnd_qualifier qualifier
;
1174 switch (inst
->opcode
->op
)
1178 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1179 qualifier
= inst
->operands
[1].qualifier
;
1183 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1184 qualifier
= inst
->operands
[0].qualifier
;
1189 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1190 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1191 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1192 gen_sub_field (FLD_size
, 0, 1, &field
);
1193 insert_field_2 (&field
, &inst
->value
, value
, 0);
1196 /* Encode size[0], i.e. bit 22, for
1197 e.g. FCVTXN <Vb><d>, <Va><n>. */
1200 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1202 aarch64_insn val
= 1;
1203 aarch64_field field
= {0, 0};
1204 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1205 gen_sub_field (FLD_size
, 0, 1, &field
);
1206 insert_field_2 (&field
, &inst
->value
, val
, 0);
1209 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1211 encode_fcvt (aarch64_inst
*inst
)
1214 const aarch64_field field
= {15, 2};
1217 switch (inst
->operands
[0].qualifier
)
1219 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1220 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1221 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1224 insert_field_2 (&field
, &inst
->value
, val
, 0);
1229 /* Return the index in qualifiers_list that INST is using. Should only
1230 be called once the qualifiers are known to be valid. */
1233 aarch64_get_variant (struct aarch64_inst
*inst
)
1235 int i
, nops
, variant
;
1237 nops
= aarch64_num_of_operands (inst
->opcode
);
1238 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1240 for (i
= 0; i
< nops
; ++i
)
1241 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1242 != inst
->operands
[i
].qualifier
)
1250 /* Do miscellaneous encodings that are not common enough to be driven by
1254 do_misc_encoding (aarch64_inst
*inst
)
1258 switch (inst
->opcode
->op
)
1267 encode_asimd_fcvt (inst
);
1270 encode_asisd_fcvtxn (inst
);
1274 /* Copy Pn to Pm and Pg. */
1275 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1276 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1277 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1280 /* Copy Zd to Zm. */
1281 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1282 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1285 /* Fill in the zero immediate. */
1286 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1287 2, FLD_imm5
, FLD_SVE_tszh
);
1290 /* Copy Zn to Zm. */
1291 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1292 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1297 /* Copy Pd to Pm. */
1298 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1299 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1301 case OP_MOVZS_P_P_P
:
1303 /* Copy Pn to Pm. */
1304 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1305 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1307 case OP_NOTS_P_P_P_Z
:
1308 case OP_NOT_P_P_P_Z
:
1309 /* Copy Pg to Pm. */
1310 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1311 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1317 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1319 encode_sizeq (aarch64_inst
*inst
)
1322 enum aarch64_field_kind kind
;
1325 /* Get the index of the operand whose information we are going to use
1326 to encode the size and Q fields.
1327 This is deduced from the possible valid qualifier lists. */
1328 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1329 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1330 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1331 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1333 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1335 if (inst
->opcode
->iclass
== asisdlse
1336 || inst
->opcode
->iclass
== asisdlsep
1337 || inst
->opcode
->iclass
== asisdlso
1338 || inst
->opcode
->iclass
== asisdlsop
)
1339 kind
= FLD_vldst_size
;
1342 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1345 /* Opcodes that have fields shared by multiple operands are usually flagged
1346 with flags. In this function, we detect such flags and use the
1347 information in one of the related operands to do the encoding. The 'one'
1348 operand is not any operand but one of the operands that has the enough
1349 information for such an encoding. */
1352 do_special_encoding (struct aarch64_inst
*inst
)
1355 aarch64_insn value
= 0;
1357 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1359 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1360 if (inst
->opcode
->flags
& F_COND
)
1362 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1364 if (inst
->opcode
->flags
& F_SF
)
1366 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1367 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1368 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1370 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1371 if (inst
->opcode
->flags
& F_N
)
1372 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1374 if (inst
->opcode
->flags
& F_LSE_SZ
)
1376 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1377 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1378 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1380 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1382 if (inst
->opcode
->flags
& F_SIZEQ
)
1383 encode_sizeq (inst
);
1384 if (inst
->opcode
->flags
& F_FPTYPE
)
1386 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1387 switch (inst
->operands
[idx
].qualifier
)
1389 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1390 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1391 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1392 default: assert (0);
1394 insert_field (FLD_type
, &inst
->value
, value
, 0);
1396 if (inst
->opcode
->flags
& F_SSIZE
)
1398 enum aarch64_opnd_qualifier qualifier
;
1399 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1400 qualifier
= inst
->operands
[idx
].qualifier
;
1401 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1402 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1403 value
= aarch64_get_qualifier_standard_value (qualifier
);
1404 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1406 if (inst
->opcode
->flags
& F_T
)
1408 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1409 aarch64_field field
= {0, 0};
1410 enum aarch64_opnd_qualifier qualifier
;
1413 qualifier
= inst
->operands
[idx
].qualifier
;
1414 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1415 == AARCH64_OPND_CLASS_SIMD_REG
1416 && qualifier
>= AARCH64_OPND_QLF_V_8B
1417 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1428 value
= aarch64_get_qualifier_standard_value (qualifier
);
1429 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1430 num
= (int) value
>> 1;
1431 assert (num
>= 0 && num
<= 3);
1432 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1433 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1435 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1437 /* Use Rt to encode in the case of e.g.
1438 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1439 enum aarch64_opnd_qualifier qualifier
;
1440 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1442 /* Otherwise use the result operand, which has to be a integer
1445 assert (idx
== 0 || idx
== 1);
1446 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1447 == AARCH64_OPND_CLASS_INT_REG
);
1448 qualifier
= inst
->operands
[idx
].qualifier
;
1449 insert_field (FLD_Q
, &inst
->value
,
1450 aarch64_get_qualifier_standard_value (qualifier
), 0);
1452 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1454 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1455 enum aarch64_opnd_qualifier qualifier
;
1456 aarch64_field field
= {0, 0};
1457 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1458 == AARCH64_OPND_CLASS_INT_REG
);
1459 gen_sub_field (FLD_opc
, 0, 1, &field
);
1460 qualifier
= inst
->operands
[0].qualifier
;
1461 insert_field_2 (&field
, &inst
->value
,
1462 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1464 /* Miscellaneous encoding as the last step. */
1465 if (inst
->opcode
->flags
& F_MISC
)
1466 do_misc_encoding (inst
);
1468 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1471 /* Some instructions (including all SVE ones) use the instruction class
1472 to describe how a qualifiers_list index is represented in the instruction
1473 encoding. If INST is such an instruction, encode the chosen qualifier
1477 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1479 switch (inst
->opcode
->iclass
)
1482 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1483 0, 2, FLD_SVE_M_14
, FLD_size
);
1487 case sve_shift_pred
:
1488 case sve_shift_unpred
:
1489 /* For indices and shift amounts, the variant is encoded as
1490 part of the immediate. */
1494 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1495 and depend on the immediate. They don't have a separate
1500 /* sve_misc instructions have only a single variant. */
1504 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1505 0, 2, FLD_SVE_M_16
, FLD_size
);
1509 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1514 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1518 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1522 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1530 /* Converters converting an alias opcode instruction to its real form. */
1532 /* ROR <Wd>, <Ws>, #<shift>
1534 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1536 convert_ror_to_extr (aarch64_inst
*inst
)
1538 copy_operand_info (inst
, 3, 2);
1539 copy_operand_info (inst
, 2, 1);
1542 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1544 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1546 convert_xtl_to_shll (aarch64_inst
*inst
)
1548 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1549 inst
->operands
[2].imm
.value
= 0;
1553 LSR <Xd>, <Xn>, #<shift>
1555 UBFM <Xd>, <Xn>, #<shift>, #63. */
1557 convert_sr_to_bfm (aarch64_inst
*inst
)
1559 inst
->operands
[3].imm
.value
=
1560 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1563 /* Convert MOV to ORR. */
1565 convert_mov_to_orr (aarch64_inst
*inst
)
1567 /* MOV <Vd>.<T>, <Vn>.<T>
1569 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1570 copy_operand_info (inst
, 2, 1);
1573 /* When <imms> >= <immr>, the instruction written:
1574 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1576 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1579 convert_bfx_to_bfm (aarch64_inst
*inst
)
1583 /* Convert the operand. */
1584 lsb
= inst
->operands
[2].imm
.value
;
1585 width
= inst
->operands
[3].imm
.value
;
1586 inst
->operands
[2].imm
.value
= lsb
;
1587 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1590 /* When <imms> < <immr>, the instruction written:
1591 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1593 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1596 convert_bfi_to_bfm (aarch64_inst
*inst
)
1600 /* Convert the operand. */
1601 lsb
= inst
->operands
[2].imm
.value
;
1602 width
= inst
->operands
[3].imm
.value
;
1603 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1605 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1606 inst
->operands
[3].imm
.value
= width
- 1;
1610 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1611 inst
->operands
[3].imm
.value
= width
- 1;
1615 /* The instruction written:
1616 BFC <Xd>, #<lsb>, #<width>
1618 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1621 convert_bfc_to_bfm (aarch64_inst
*inst
)
1626 copy_operand_info (inst
, 3, 2);
1627 copy_operand_info (inst
, 2, 1);
1628 copy_operand_info (inst
, 1, 0);
1629 inst
->operands
[1].reg
.regno
= 0x1f;
1631 /* Convert the immediate operand. */
1632 lsb
= inst
->operands
[2].imm
.value
;
1633 width
= inst
->operands
[3].imm
.value
;
1634 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1636 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1637 inst
->operands
[3].imm
.value
= width
- 1;
1641 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1642 inst
->operands
[3].imm
.value
= width
- 1;
1646 /* The instruction written:
1647 LSL <Xd>, <Xn>, #<shift>
1649 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1652 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1654 int64_t shift
= inst
->operands
[2].imm
.value
;
1656 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1658 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1659 inst
->operands
[3].imm
.value
= 31 - shift
;
1663 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1664 inst
->operands
[3].imm
.value
= 63 - shift
;
1668 /* CINC <Wd>, <Wn>, <cond>
1670 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1673 convert_to_csel (aarch64_inst
*inst
)
1675 copy_operand_info (inst
, 3, 2);
1676 copy_operand_info (inst
, 2, 1);
1677 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1680 /* CSET <Wd>, <cond>
1682 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1685 convert_cset_to_csinc (aarch64_inst
*inst
)
1687 copy_operand_info (inst
, 3, 1);
1688 copy_operand_info (inst
, 2, 0);
1689 copy_operand_info (inst
, 1, 0);
1690 inst
->operands
[1].reg
.regno
= 0x1f;
1691 inst
->operands
[2].reg
.regno
= 0x1f;
1692 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1697 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1700 convert_mov_to_movewide (aarch64_inst
*inst
)
1703 uint32_t shift_amount
;
1706 switch (inst
->opcode
->op
)
1708 case OP_MOV_IMM_WIDE
:
1709 value
= inst
->operands
[1].imm
.value
;
1711 case OP_MOV_IMM_WIDEN
:
1712 value
= ~inst
->operands
[1].imm
.value
;
1717 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1718 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1719 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1720 /* The constraint check should have guaranteed this wouldn't happen. */
1722 value
>>= shift_amount
;
1724 inst
->operands
[1].imm
.value
= value
;
1725 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1726 inst
->operands
[1].shifter
.amount
= shift_amount
;
1731 ORR <Wd>, WZR, #<imm>. */
1734 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1736 copy_operand_info (inst
, 2, 1);
1737 inst
->operands
[1].reg
.regno
= 0x1f;
1738 inst
->operands
[1].skip
= 0;
1741 /* Some alias opcodes are assembled by being converted to their real-form. */
1744 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1746 const aarch64_opcode
*alias
= inst
->opcode
;
1748 if ((alias
->flags
& F_CONV
) == 0)
1749 goto convert_to_real_return
;
1755 convert_sr_to_bfm (inst
);
1758 convert_lsl_to_ubfm (inst
);
1763 convert_to_csel (inst
);
1767 convert_cset_to_csinc (inst
);
1772 convert_bfx_to_bfm (inst
);
1777 convert_bfi_to_bfm (inst
);
1780 convert_bfc_to_bfm (inst
);
1783 convert_mov_to_orr (inst
);
1785 case OP_MOV_IMM_WIDE
:
1786 case OP_MOV_IMM_WIDEN
:
1787 convert_mov_to_movewide (inst
);
1789 case OP_MOV_IMM_LOG
:
1790 convert_mov_to_movebitmask (inst
);
1793 convert_ror_to_extr (inst
);
1799 convert_xtl_to_shll (inst
);
1805 convert_to_real_return
:
1806 aarch64_replace_opcode (inst
, real
);
1809 /* Encode *INST_ORI of the opcode code OPCODE.
1810 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1811 matched operand qualifier sequence in *QLF_SEQ. */
1814 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1815 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1816 aarch64_opnd_qualifier_t
*qlf_seq
,
1817 aarch64_operand_error
*mismatch_detail
)
1820 const aarch64_opcode
*aliased
;
1821 aarch64_inst copy
, *inst
;
1823 DEBUG_TRACE ("enter with %s", opcode
->name
);
1825 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1829 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1830 if (inst
->opcode
== NULL
)
1831 inst
->opcode
= opcode
;
1833 /* Constrain the operands.
1834 After passing this, the encoding is guaranteed to succeed. */
1835 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1837 DEBUG_TRACE ("FAIL since operand constraint not met");
1841 /* Get the base value.
1842 Note: this has to be before the aliasing handling below in order to
1843 get the base value from the alias opcode before we move on to the
1844 aliased opcode for encoding. */
1845 inst
->value
= opcode
->opcode
;
1847 /* No need to do anything else if the opcode does not have any operand. */
1848 if (aarch64_num_of_operands (opcode
) == 0)
1851 /* Assign operand indexes and check types. Also put the matched
1852 operand qualifiers in *QLF_SEQ to return. */
1853 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1855 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1856 inst
->operands
[i
].idx
= i
;
1857 if (qlf_seq
!= NULL
)
1858 *qlf_seq
= inst
->operands
[i
].qualifier
;
1861 aliased
= aarch64_find_real_opcode (opcode
);
1862 /* If the opcode is an alias and it does not ask for direct encoding by
1863 itself, the instruction will be transformed to the form of real opcode
1864 and the encoding will be carried out using the rules for the aliased
1866 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1868 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1869 aliased
->name
, opcode
->name
);
1870 /* Convert the operands to the form of the real opcode. */
1871 convert_to_real (inst
, aliased
);
1875 aarch64_opnd_info
*info
= inst
->operands
;
1877 /* Call the inserter of each operand. */
1878 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1880 const aarch64_operand
*opnd
;
1881 enum aarch64_opnd type
= opcode
->operands
[i
];
1882 if (type
== AARCH64_OPND_NIL
)
1886 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1889 opnd
= &aarch64_operands
[type
];
1890 if (operand_has_inserter (opnd
))
1891 aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
);
1894 /* Call opcode encoders indicated by flags. */
1895 if (opcode_has_special_coder (opcode
))
1896 do_special_encoding (inst
);
1898 /* Possibly use the instruction class to encode the chosen qualifier
1900 aarch64_encode_variant_using_iclass (inst
);
1903 DEBUG_TRACE ("exit with %s", opcode
->name
);
1905 *code
= inst
->value
;