1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
39 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
42 const aarch64_field
*field
;
43 enum aarch64_field_kind kind
;
47 num
= va_arg (va
, uint32_t);
51 kind
= va_arg (va
, enum aarch64_field_kind
);
52 field
= &fields
[kind
];
53 insert_field (kind
, code
, value
, mask
);
54 value
>>= field
->width
;
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
63 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
67 enum aarch64_field_kind kind
;
69 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
70 if (self
->fields
[i
] != FLD_NIL
)
72 kind
= self
->fields
[i
];
73 insert_field (kind
, code
, value
, 0);
74 value
>>= fields
[kind
].width
;
78 /* Operand inserters. */
80 /* Insert register number. */
82 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
84 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
86 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
90 /* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
95 aarch64_insn
*code
, const aarch64_inst
*inst
)
98 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
99 /* index and/or type */
100 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
102 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
103 if (info
->type
== AARCH64_OPND_En
104 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info
->idx
== 1); /* Vn */
108 aarch64_insn value
= info
->reglane
.index
<< pos
;
109 insert_field (FLD_imm4
, code
, value
, 0);
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
120 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
121 insert_field (FLD_imm5
, code
, value
, 0);
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 unsigned index
= info
->reglane
.index
;
130 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
131 /* Complex operand takes two elements. */
134 switch (info
->qualifier
)
136 case AARCH64_OPND_QLF_S_H
:
139 insert_fields (code
, index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
141 case AARCH64_OPND_QLF_S_S
:
144 insert_fields (code
, index
, 0, 2, FLD_L
, FLD_H
);
146 case AARCH64_OPND_QLF_S_D
:
149 insert_field (FLD_H
, code
, index
, 0);
158 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
160 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
162 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
165 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
167 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
171 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
172 in AdvSIMD load/store instructions. */
174 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
175 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
176 const aarch64_inst
*inst
)
178 aarch64_insn value
= 0;
179 /* Number of elements in each structure to be loaded/stored. */
180 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
183 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
188 switch (info
->reglist
.num_regs
)
190 case 1: value
= 0x7; break;
191 case 2: value
= 0xa; break;
192 case 3: value
= 0x6; break;
193 case 4: value
= 0x2; break;
198 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
209 insert_field (FLD_opcode
, code
, value
, 0);
214 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
215 single structure to all lanes instructions. */
217 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
218 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
219 const aarch64_inst
*inst
)
222 /* The opcode dependent area stores the number of elements in
223 each structure to be loaded/stored. */
224 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
227 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
229 value
= (aarch64_insn
) 0;
230 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
231 /* OP_LD1R does not have alternating variant, but have "two consecutive"
233 value
= (aarch64_insn
) 1;
234 insert_field (FLD_S
, code
, value
, 0);
239 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
240 operand e.g. Vt in AdvSIMD load/store single element instructions. */
242 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
243 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
244 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
246 aarch64_field field
= {0, 0};
247 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
248 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
250 assert (info
->reglist
.has_index
);
253 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
254 /* Encode the index, opcode<2:1> and size. */
255 switch (info
->qualifier
)
257 case AARCH64_OPND_QLF_S_B
:
258 /* Index encoded in "Q:S:size". */
259 QSsize
= info
->reglist
.index
;
262 case AARCH64_OPND_QLF_S_H
:
263 /* Index encoded in "Q:S:size<1>". */
264 QSsize
= info
->reglist
.index
<< 1;
267 case AARCH64_OPND_QLF_S_S
:
268 /* Index encoded in "Q:S". */
269 QSsize
= info
->reglist
.index
<< 2;
272 case AARCH64_OPND_QLF_S_D
:
273 /* Index encoded in "Q". */
274 QSsize
= info
->reglist
.index
<< 3 | 0x1;
280 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
281 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
282 insert_field_2 (&field
, code
, opcodeh2
, 0);
287 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
288 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
289 or SSHR <V><d>, <V><n>, #<shift>. */
291 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
292 const aarch64_opnd_info
*info
,
293 aarch64_insn
*code
, const aarch64_inst
*inst
)
295 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
298 if (inst
->opcode
->iclass
== asimdshf
)
302 0000 x SEE AdvSIMD modified immediate
311 Q
= (val
& 0x1) ? 1 : 0;
312 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
316 assert (info
->type
== AARCH64_OPND_IMM_VLSR
317 || info
->type
== AARCH64_OPND_IMM_VLSL
);
319 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
322 0000 SEE AdvSIMD modified immediate
323 0001 (16-UInt(immh:immb))
324 001x (32-UInt(immh:immb))
325 01xx (64-UInt(immh:immb))
326 1xxx (128-UInt(immh:immb)) */
327 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
331 0000 SEE AdvSIMD modified immediate
332 0001 (UInt(immh:immb)-8)
333 001x (UInt(immh:immb)-16)
334 01xx (UInt(immh:immb)-32)
335 1xxx (UInt(immh:immb)-64) */
336 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
337 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
342 /* Insert fields for e.g. the immediate operands in
343 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
345 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
347 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
351 imm
= info
->imm
.value
;
352 if (operand_need_shift_by_two (self
))
354 insert_all_fields (self
, code
, imm
);
358 /* Insert immediate and its shift amount for e.g. the last operand in
359 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
361 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
362 aarch64_insn
*code
, const aarch64_inst
*inst
)
365 aarch64_ins_imm (self
, info
, code
, inst
);
367 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
371 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
372 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
374 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
375 const aarch64_opnd_info
*info
,
377 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
379 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
380 uint64_t imm
= info
->imm
.value
;
381 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
382 int amount
= info
->shifter
.amount
;
383 aarch64_field field
= {0, 0};
385 /* a:b:c:d:e:f:g:h */
386 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
388 /* Either MOVI <Dd>, #<imm>
389 or MOVI <Vd>.2D, #<imm>.
390 <imm> is a 64-bit immediate
391 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
392 encoded in "a:b:c:d:e:f:g:h". */
393 imm
= aarch64_shrink_expanded_imm8 (imm
);
394 assert ((int)imm
>= 0);
396 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
398 if (kind
== AARCH64_MOD_NONE
)
401 /* shift amount partially in cmode */
402 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
403 if (kind
== AARCH64_MOD_LSL
)
405 /* AARCH64_MOD_LSL: shift zeros. */
406 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
407 assert (esize
== 4 || esize
== 2 || esize
== 1);
408 /* For 8-bit move immediate, the optional LSL #0 does not require
414 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
416 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
420 /* AARCH64_MOD_MSL: shift ones. */
422 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
424 insert_field_2 (&field
, code
, amount
, 0);
429 /* Insert fields for an 8-bit floating-point immediate. */
431 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
433 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
435 insert_all_fields (self
, code
, info
->imm
.value
);
439 /* Insert field rot for the rotate immediate in
440 FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */
442 aarch64_ins_imm_rotate (const aarch64_operand
*self
,
443 const aarch64_opnd_info
*info
,
444 aarch64_insn
*code
, const aarch64_inst
*inst
)
446 uint64_t rot
= info
->imm
.value
/ 90;
450 case AARCH64_OPND_IMM_ROT1
:
451 case AARCH64_OPND_IMM_ROT2
:
459 case AARCH64_OPND_IMM_ROT3
:
469 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
474 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
475 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
477 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
479 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
481 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
485 /* Insert arithmetic immediate for e.g. the last operand in
486 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
488 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
489 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
492 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
493 insert_field (self
->fields
[0], code
, value
, 0);
494 /* imm12 (unsigned) */
495 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
499 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
500 the operand should be inverted before encoding. */
502 aarch64_ins_limm_1 (const aarch64_operand
*self
,
503 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
504 const aarch64_inst
*inst
, bfd_boolean invert_p
)
507 uint64_t imm
= info
->imm
.value
;
508 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
512 if (aarch64_logical_immediate_p (imm
, esize
, &value
) == FALSE
)
513 /* The constraint check should have guaranteed this wouldn't happen. */
516 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
521 /* Insert logical/bitmask immediate for e.g. the last operand in
522 ORR <Wd|WSP>, <Wn>, #<imm>. */
524 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
525 aarch64_insn
*code
, const aarch64_inst
*inst
)
527 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
528 inst
->opcode
->op
== OP_BIC
);
531 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
533 aarch64_ins_inv_limm (const aarch64_operand
*self
,
534 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
535 const aarch64_inst
*inst
)
537 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
);
540 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
541 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
543 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
544 aarch64_insn
*code
, const aarch64_inst
*inst
)
546 aarch64_insn value
= 0;
548 assert (info
->idx
== 0);
551 aarch64_ins_regno (self
, info
, code
, inst
);
552 if (inst
->opcode
->iclass
== ldstpair_indexed
553 || inst
->opcode
->iclass
== ldstnapair_offs
554 || inst
->opcode
->iclass
== ldstpair_off
555 || inst
->opcode
->iclass
== loadlit
)
558 switch (info
->qualifier
)
560 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
561 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
562 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
565 insert_field (FLD_ldst_size
, code
, value
, 0);
570 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
571 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
577 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
579 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
580 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
581 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
584 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
588 /* Encode the address operand for e.g.
589 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
591 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
592 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
593 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
596 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
599 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
601 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
603 if (kind
== AARCH64_MOD_LSL
)
604 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
605 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
607 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
608 S
= info
->shifter
.amount
!= 0;
610 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
614 Must be #0 if <extend> is explicitly LSL. */
615 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
616 insert_field (FLD_S
, code
, S
, 0);
621 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
623 aarch64_ins_addr_simm (const aarch64_operand
*self
,
624 const aarch64_opnd_info
*info
,
626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
631 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
632 /* simm (imm9 or imm7) */
633 imm
= info
->addr
.offset
.imm
;
634 if (self
->fields
[0] == FLD_imm7
)
635 /* scaled immediate in ld/st pair instructions.. */
636 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
637 insert_field (self
->fields
[0], code
, imm
, 0);
638 /* pre/post- index */
639 if (info
->addr
.writeback
)
641 assert (inst
->opcode
->iclass
!= ldst_unscaled
642 && inst
->opcode
->iclass
!= ldstnapair_offs
643 && inst
->opcode
->iclass
!= ldstpair_off
644 && inst
->opcode
->iclass
!= ldst_unpriv
);
645 assert (info
->addr
.preind
!= info
->addr
.postind
);
646 if (info
->addr
.preind
)
647 insert_field (self
->fields
[1], code
, 1, 0);
653 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
655 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
656 const aarch64_opnd_info
*info
,
658 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
663 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
665 imm
= info
->addr
.offset
.imm
>> 3;
666 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
667 insert_field (self
->fields
[2], code
, imm
, 0);
669 if (info
->addr
.writeback
)
671 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
672 insert_field (self
->fields
[3], code
, 1, 0);
677 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
679 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
680 const aarch64_opnd_info
*info
,
682 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
684 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
687 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
689 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
693 /* Encode the address operand for e.g.
694 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
696 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
697 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
698 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
701 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
703 if (info
->addr
.offset
.is_reg
)
704 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
706 insert_field (FLD_Rm
, code
, 0x1f, 0);
710 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
712 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
713 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
714 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
717 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
721 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
723 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
724 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
725 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
727 /* op0:op1:CRn:CRm:op2 */
728 insert_fields (code
, info
->sysreg
, inst
->opcode
->mask
, 5,
729 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
733 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
735 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
736 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
737 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
740 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
745 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
747 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
748 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
749 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
751 /* op1:CRn:CRm:op2 */
752 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
753 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
757 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
760 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
761 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
762 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
765 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
769 /* Encode the prefetch operation option operand for e.g.
770 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
773 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
774 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
775 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
778 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
782 /* Encode the hint number for instructions that alias HINT but take an
786 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
787 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
788 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
791 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
795 /* Encode the extended register operand for e.g.
796 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
798 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
799 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
800 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
802 enum aarch64_modifier_kind kind
;
805 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
807 kind
= info
->shifter
.kind
;
808 if (kind
== AARCH64_MOD_LSL
)
809 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
810 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
811 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
813 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
818 /* Encode the shifted register operand for e.g.
819 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
821 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
822 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
823 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
826 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
828 insert_field (FLD_shift
, code
,
829 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
831 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
836 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
837 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
838 SELF's operand-dependent value. fields[0] specifies the field that
839 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
841 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
842 const aarch64_opnd_info
*info
,
844 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
846 int factor
= 1 + get_operand_specific_data (self
);
847 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
848 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
852 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
853 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
854 SELF's operand-dependent value. fields[0] specifies the field that
855 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
857 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
858 const aarch64_opnd_info
*info
,
860 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
862 int factor
= 1 + get_operand_specific_data (self
);
863 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
864 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
868 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
869 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
870 SELF's operand-dependent value. fields[0] specifies the field that
871 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
872 and imm3 fields, with imm3 being the less-significant part. */
874 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
875 const aarch64_opnd_info
*info
,
877 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
879 int factor
= 1 + get_operand_specific_data (self
);
880 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
881 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
882 2, FLD_imm3
, FLD_SVE_imm6
);
886 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
887 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
888 value. fields[0] specifies the base register field. */
890 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
891 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
892 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
894 int factor
= 1 << get_operand_specific_data (self
);
895 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
896 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
900 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
901 is SELF's operand-dependent value. fields[0] specifies the base
902 register field and fields[1] specifies the offset register field. */
904 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
905 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
906 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
908 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
909 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
913 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
914 <shift> is SELF's operand-dependent value. fields[0] specifies the
915 base register field, fields[1] specifies the offset register field and
916 fields[2] is a single-bit field that selects SXTW over UXTW. */
918 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
919 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
920 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
922 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
923 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
924 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
925 insert_field (self
->fields
[2], code
, 0, 0);
927 insert_field (self
->fields
[2], code
, 1, 0);
931 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
932 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
933 fields[0] specifies the base register field. */
935 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
936 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
937 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
939 int factor
= 1 << get_operand_specific_data (self
);
940 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
941 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
945 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
946 where <modifier> is fixed by the instruction and where <msz> is a
947 2-bit unsigned number. fields[0] specifies the base register field
948 and fields[1] specifies the offset register field. */
950 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
951 const aarch64_opnd_info
*info
, aarch64_insn
*code
)
953 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
954 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
955 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
959 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
960 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
961 field and fields[1] specifies the offset register field. */
963 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
964 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
965 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
967 return aarch64_ext_sve_addr_zz (self
, info
, code
);
970 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
971 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
972 field and fields[1] specifies the offset register field. */
974 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
975 const aarch64_opnd_info
*info
,
977 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
979 return aarch64_ext_sve_addr_zz (self
, info
, code
);
982 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
983 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
984 field and fields[1] specifies the offset register field. */
986 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
987 const aarch64_opnd_info
*info
,
989 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
991 return aarch64_ext_sve_addr_zz (self
, info
, code
);
994 /* Encode an SVE ADD/SUB immediate. */
996 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
997 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
998 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1000 if (info
->shifter
.amount
== 8)
1001 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1002 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1003 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1005 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1009 /* Encode an SVE CPY/DUP immediate. */
1011 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1012 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1013 const aarch64_inst
*inst
)
1015 return aarch64_ins_sve_aimm (self
, info
, code
, inst
);
1018 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1019 array specifies which field to use for Zn. MM is encoded in the
1020 concatenation of imm5 and SVE_tszh, with imm5 being the less
1021 significant part. */
1023 aarch64_ins_sve_index (const aarch64_operand
*self
,
1024 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1025 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1027 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1028 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1029 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1030 2, FLD_imm5
, FLD_SVE_tszh
);
1034 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1036 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1037 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1038 const aarch64_inst
*inst
)
1040 return aarch64_ins_limm (self
, info
, code
, inst
);
1043 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1046 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1047 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1048 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1050 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1054 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1055 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1058 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1059 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1060 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1062 insert_all_fields (self
, code
, info
->imm
.value
);
1063 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1067 /* Encode an SVE shift left immediate. */
1069 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1070 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1071 const aarch64_inst
*inst
)
1073 const aarch64_opnd_info
*prev_operand
;
1076 assert (info
->idx
> 0);
1077 prev_operand
= &inst
->operands
[info
->idx
- 1];
1078 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1079 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1083 /* Encode an SVE shift right immediate. */
1085 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1086 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1087 const aarch64_inst
*inst
)
1089 const aarch64_opnd_info
*prev_operand
;
1092 assert (info
->idx
> 0);
1093 prev_operand
= &inst
->operands
[info
->idx
- 1];
1094 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1095 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1099 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1100 The fields array specifies which field to use. */
1102 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1103 const aarch64_opnd_info
*info
,
1105 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1107 if (info
->imm
.value
== 0x3f000000)
1108 insert_field (self
->fields
[0], code
, 0, 0);
1110 insert_field (self
->fields
[0], code
, 1, 0);
1114 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1115 The fields array specifies which field to use. */
1117 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1118 const aarch64_opnd_info
*info
,
1120 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1122 if (info
->imm
.value
== 0x3f000000)
1123 insert_field (self
->fields
[0], code
, 0, 0);
1125 insert_field (self
->fields
[0], code
, 1, 0);
1129 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1130 The fields array specifies which field to use. */
1132 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1133 const aarch64_opnd_info
*info
,
1135 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
1137 if (info
->imm
.value
== 0)
1138 insert_field (self
->fields
[0], code
, 0, 0);
1140 insert_field (self
->fields
[0], code
, 1, 0);
1144 /* Miscellaneous encoding functions. */
1146 /* Encode size[0], i.e. bit 22, for
1147 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1150 encode_asimd_fcvt (aarch64_inst
*inst
)
1153 aarch64_field field
= {0, 0};
1154 enum aarch64_opnd_qualifier qualifier
;
1156 switch (inst
->opcode
->op
)
1160 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1161 qualifier
= inst
->operands
[1].qualifier
;
1165 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1166 qualifier
= inst
->operands
[0].qualifier
;
1171 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1172 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1173 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1174 gen_sub_field (FLD_size
, 0, 1, &field
);
1175 insert_field_2 (&field
, &inst
->value
, value
, 0);
1178 /* Encode size[0], i.e. bit 22, for
1179 e.g. FCVTXN <Vb><d>, <Va><n>. */
1182 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1184 aarch64_insn val
= 1;
1185 aarch64_field field
= {0, 0};
1186 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1187 gen_sub_field (FLD_size
, 0, 1, &field
);
1188 insert_field_2 (&field
, &inst
->value
, val
, 0);
1191 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1193 encode_fcvt (aarch64_inst
*inst
)
1196 const aarch64_field field
= {15, 2};
1199 switch (inst
->operands
[0].qualifier
)
1201 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1202 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1203 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1206 insert_field_2 (&field
, &inst
->value
, val
, 0);
1211 /* Return the index in qualifiers_list that INST is using. Should only
1212 be called once the qualifiers are known to be valid. */
1215 aarch64_get_variant (struct aarch64_inst
*inst
)
1217 int i
, nops
, variant
;
1219 nops
= aarch64_num_of_operands (inst
->opcode
);
1220 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1222 for (i
= 0; i
< nops
; ++i
)
1223 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1224 != inst
->operands
[i
].qualifier
)
1232 /* Do miscellaneous encodings that are not common enough to be driven by
1236 do_misc_encoding (aarch64_inst
*inst
)
1240 switch (inst
->opcode
->op
)
1249 encode_asimd_fcvt (inst
);
1252 encode_asisd_fcvtxn (inst
);
1256 /* Copy Pn to Pm and Pg. */
1257 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1258 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1259 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1262 /* Copy Zd to Zm. */
1263 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1264 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1267 /* Fill in the zero immediate. */
1268 insert_field (FLD_SVE_tsz
, &inst
->value
,
1269 1 << aarch64_get_variant (inst
), 0);
1272 /* Copy Zn to Zm. */
1273 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1274 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1279 /* Copy Pd to Pm. */
1280 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1281 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1283 case OP_MOVZS_P_P_P
:
1285 /* Copy Pn to Pm. */
1286 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1287 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1289 case OP_NOTS_P_P_P_Z
:
1290 case OP_NOT_P_P_P_Z
:
1291 /* Copy Pg to Pm. */
1292 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1293 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1299 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1301 encode_sizeq (aarch64_inst
*inst
)
1304 enum aarch64_field_kind kind
;
1307 /* Get the index of the operand whose information we are going to use
1308 to encode the size and Q fields.
1309 This is deduced from the possible valid qualifier lists. */
1310 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1311 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1312 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1313 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1315 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1317 if (inst
->opcode
->iclass
== asisdlse
1318 || inst
->opcode
->iclass
== asisdlsep
1319 || inst
->opcode
->iclass
== asisdlso
1320 || inst
->opcode
->iclass
== asisdlsop
)
1321 kind
= FLD_vldst_size
;
1324 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1327 /* Opcodes that have fields shared by multiple operands are usually flagged
1328 with flags. In this function, we detect such flags and use the
1329 information in one of the related operands to do the encoding. The 'one'
1330 operand is not any operand but one of the operands that has the enough
1331 information for such an encoding. */
1334 do_special_encoding (struct aarch64_inst
*inst
)
1337 aarch64_insn value
= 0;
1339 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1341 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1342 if (inst
->opcode
->flags
& F_COND
)
1344 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1346 if (inst
->opcode
->flags
& F_SF
)
1348 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1349 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1350 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1352 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1353 if (inst
->opcode
->flags
& F_N
)
1354 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1356 if (inst
->opcode
->flags
& F_LSE_SZ
)
1358 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1359 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1360 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1362 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1364 if (inst
->opcode
->flags
& F_SIZEQ
)
1365 encode_sizeq (inst
);
1366 if (inst
->opcode
->flags
& F_FPTYPE
)
1368 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1369 switch (inst
->operands
[idx
].qualifier
)
1371 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1372 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1373 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1374 default: assert (0);
1376 insert_field (FLD_type
, &inst
->value
, value
, 0);
1378 if (inst
->opcode
->flags
& F_SSIZE
)
1380 enum aarch64_opnd_qualifier qualifier
;
1381 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1382 qualifier
= inst
->operands
[idx
].qualifier
;
1383 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1384 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1385 value
= aarch64_get_qualifier_standard_value (qualifier
);
1386 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1388 if (inst
->opcode
->flags
& F_T
)
1390 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1391 aarch64_field field
= {0, 0};
1392 enum aarch64_opnd_qualifier qualifier
;
1395 qualifier
= inst
->operands
[idx
].qualifier
;
1396 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1397 == AARCH64_OPND_CLASS_SIMD_REG
1398 && qualifier
>= AARCH64_OPND_QLF_V_8B
1399 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1410 value
= aarch64_get_qualifier_standard_value (qualifier
);
1411 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1412 num
= (int) value
>> 1;
1413 assert (num
>= 0 && num
<= 3);
1414 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1415 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1417 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1419 /* Use Rt to encode in the case of e.g.
1420 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1421 enum aarch64_opnd_qualifier qualifier
;
1422 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1424 /* Otherwise use the result operand, which has to be a integer
1427 assert (idx
== 0 || idx
== 1);
1428 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1429 == AARCH64_OPND_CLASS_INT_REG
);
1430 qualifier
= inst
->operands
[idx
].qualifier
;
1431 insert_field (FLD_Q
, &inst
->value
,
1432 aarch64_get_qualifier_standard_value (qualifier
), 0);
1434 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1436 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1437 enum aarch64_opnd_qualifier qualifier
;
1438 aarch64_field field
= {0, 0};
1439 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1440 == AARCH64_OPND_CLASS_INT_REG
);
1441 gen_sub_field (FLD_opc
, 0, 1, &field
);
1442 qualifier
= inst
->operands
[0].qualifier
;
1443 insert_field_2 (&field
, &inst
->value
,
1444 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1446 /* Miscellaneous encoding as the last step. */
1447 if (inst
->opcode
->flags
& F_MISC
)
1448 do_misc_encoding (inst
);
1450 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1453 /* Some instructions (including all SVE ones) use the instruction class
1454 to describe how a qualifiers_list index is represented in the instruction
1455 encoding. If INST is such an instruction, encode the chosen qualifier
1459 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1461 switch (inst
->opcode
->iclass
)
1464 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1465 0, 2, FLD_SVE_M_14
, FLD_size
);
1469 case sve_shift_pred
:
1470 case sve_shift_unpred
:
1471 /* For indices and shift amounts, the variant is encoded as
1472 part of the immediate. */
1476 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1477 and depend on the immediate. They don't have a separate
1482 /* sve_misc instructions have only a single variant. */
1486 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1487 0, 2, FLD_SVE_M_16
, FLD_size
);
1491 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1496 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1500 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1504 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1512 /* Converters converting an alias opcode instruction to its real form. */
1514 /* ROR <Wd>, <Ws>, #<shift>
1516 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1518 convert_ror_to_extr (aarch64_inst
*inst
)
1520 copy_operand_info (inst
, 3, 2);
1521 copy_operand_info (inst
, 2, 1);
1524 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1526 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1528 convert_xtl_to_shll (aarch64_inst
*inst
)
1530 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1531 inst
->operands
[2].imm
.value
= 0;
1535 LSR <Xd>, <Xn>, #<shift>
1537 UBFM <Xd>, <Xn>, #<shift>, #63. */
1539 convert_sr_to_bfm (aarch64_inst
*inst
)
1541 inst
->operands
[3].imm
.value
=
1542 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1545 /* Convert MOV to ORR. */
1547 convert_mov_to_orr (aarch64_inst
*inst
)
1549 /* MOV <Vd>.<T>, <Vn>.<T>
1551 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1552 copy_operand_info (inst
, 2, 1);
1555 /* When <imms> >= <immr>, the instruction written:
1556 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1558 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1561 convert_bfx_to_bfm (aarch64_inst
*inst
)
1565 /* Convert the operand. */
1566 lsb
= inst
->operands
[2].imm
.value
;
1567 width
= inst
->operands
[3].imm
.value
;
1568 inst
->operands
[2].imm
.value
= lsb
;
1569 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1572 /* When <imms> < <immr>, the instruction written:
1573 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1575 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1578 convert_bfi_to_bfm (aarch64_inst
*inst
)
1582 /* Convert the operand. */
1583 lsb
= inst
->operands
[2].imm
.value
;
1584 width
= inst
->operands
[3].imm
.value
;
1585 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1587 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1588 inst
->operands
[3].imm
.value
= width
- 1;
1592 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1593 inst
->operands
[3].imm
.value
= width
- 1;
1597 /* The instruction written:
1598 BFC <Xd>, #<lsb>, #<width>
1600 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1603 convert_bfc_to_bfm (aarch64_inst
*inst
)
1608 copy_operand_info (inst
, 3, 2);
1609 copy_operand_info (inst
, 2, 1);
1610 copy_operand_info (inst
, 0, 0);
1611 inst
->operands
[1].reg
.regno
= 0x1f;
1613 /* Convert the immedate operand. */
1614 lsb
= inst
->operands
[2].imm
.value
;
1615 width
= inst
->operands
[3].imm
.value
;
1616 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1618 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1619 inst
->operands
[3].imm
.value
= width
- 1;
1623 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1624 inst
->operands
[3].imm
.value
= width
- 1;
1628 /* The instruction written:
1629 LSL <Xd>, <Xn>, #<shift>
1631 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1634 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1636 int64_t shift
= inst
->operands
[2].imm
.value
;
1638 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1640 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1641 inst
->operands
[3].imm
.value
= 31 - shift
;
1645 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1646 inst
->operands
[3].imm
.value
= 63 - shift
;
1650 /* CINC <Wd>, <Wn>, <cond>
1652 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1655 convert_to_csel (aarch64_inst
*inst
)
1657 copy_operand_info (inst
, 3, 2);
1658 copy_operand_info (inst
, 2, 1);
1659 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1662 /* CSET <Wd>, <cond>
1664 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1667 convert_cset_to_csinc (aarch64_inst
*inst
)
1669 copy_operand_info (inst
, 3, 1);
1670 copy_operand_info (inst
, 2, 0);
1671 copy_operand_info (inst
, 1, 0);
1672 inst
->operands
[1].reg
.regno
= 0x1f;
1673 inst
->operands
[2].reg
.regno
= 0x1f;
1674 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1679 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1682 convert_mov_to_movewide (aarch64_inst
*inst
)
1685 uint32_t shift_amount
;
1688 switch (inst
->opcode
->op
)
1690 case OP_MOV_IMM_WIDE
:
1691 value
= inst
->operands
[1].imm
.value
;
1693 case OP_MOV_IMM_WIDEN
:
1694 value
= ~inst
->operands
[1].imm
.value
;
1699 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1700 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1701 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1702 /* The constraint check should have guaranteed this wouldn't happen. */
1704 value
>>= shift_amount
;
1706 inst
->operands
[1].imm
.value
= value
;
1707 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1708 inst
->operands
[1].shifter
.amount
= shift_amount
;
1713 ORR <Wd>, WZR, #<imm>. */
1716 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1718 copy_operand_info (inst
, 2, 1);
1719 inst
->operands
[1].reg
.regno
= 0x1f;
1720 inst
->operands
[1].skip
= 0;
1723 /* Some alias opcodes are assembled by being converted to their real-form. */
1726 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1728 const aarch64_opcode
*alias
= inst
->opcode
;
1730 if ((alias
->flags
& F_CONV
) == 0)
1731 goto convert_to_real_return
;
1737 convert_sr_to_bfm (inst
);
1740 convert_lsl_to_ubfm (inst
);
1745 convert_to_csel (inst
);
1749 convert_cset_to_csinc (inst
);
1754 convert_bfx_to_bfm (inst
);
1759 convert_bfi_to_bfm (inst
);
1762 convert_bfc_to_bfm (inst
);
1765 convert_mov_to_orr (inst
);
1767 case OP_MOV_IMM_WIDE
:
1768 case OP_MOV_IMM_WIDEN
:
1769 convert_mov_to_movewide (inst
);
1771 case OP_MOV_IMM_LOG
:
1772 convert_mov_to_movebitmask (inst
);
1775 convert_ror_to_extr (inst
);
1781 convert_xtl_to_shll (inst
);
1787 convert_to_real_return
:
1788 aarch64_replace_opcode (inst
, real
);
1791 /* Encode *INST_ORI of the opcode code OPCODE.
1792 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1793 matched operand qualifier sequence in *QLF_SEQ. */
1796 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1797 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1798 aarch64_opnd_qualifier_t
*qlf_seq
,
1799 aarch64_operand_error
*mismatch_detail
)
1802 const aarch64_opcode
*aliased
;
1803 aarch64_inst copy
, *inst
;
1805 DEBUG_TRACE ("enter with %s", opcode
->name
);
1807 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1811 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1812 if (inst
->opcode
== NULL
)
1813 inst
->opcode
= opcode
;
1815 /* Constrain the operands.
1816 After passing this, the encoding is guaranteed to succeed. */
1817 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1819 DEBUG_TRACE ("FAIL since operand constraint not met");
1823 /* Get the base value.
1824 Note: this has to be before the aliasing handling below in order to
1825 get the base value from the alias opcode before we move on to the
1826 aliased opcode for encoding. */
1827 inst
->value
= opcode
->opcode
;
1829 /* No need to do anything else if the opcode does not have any operand. */
1830 if (aarch64_num_of_operands (opcode
) == 0)
1833 /* Assign operand indexes and check types. Also put the matched
1834 operand qualifiers in *QLF_SEQ to return. */
1835 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1837 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1838 inst
->operands
[i
].idx
= i
;
1839 if (qlf_seq
!= NULL
)
1840 *qlf_seq
= inst
->operands
[i
].qualifier
;
1843 aliased
= aarch64_find_real_opcode (opcode
);
1844 /* If the opcode is an alias and it does not ask for direct encoding by
1845 itself, the instruction will be transformed to the form of real opcode
1846 and the encoding will be carried out using the rules for the aliased
1848 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1850 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1851 aliased
->name
, opcode
->name
);
1852 /* Convert the operands to the form of the real opcode. */
1853 convert_to_real (inst
, aliased
);
1857 aarch64_opnd_info
*info
= inst
->operands
;
1859 /* Call the inserter of each operand. */
1860 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1862 const aarch64_operand
*opnd
;
1863 enum aarch64_opnd type
= opcode
->operands
[i
];
1864 if (type
== AARCH64_OPND_NIL
)
1868 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1871 opnd
= &aarch64_operands
[type
];
1872 if (operand_has_inserter (opnd
))
1873 aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
);
1876 /* Call opcode encoders indicated by flags. */
1877 if (opcode_has_special_coder (opcode
))
1878 do_special_encoding (inst
);
1880 /* Possibly use the instruction class to encode the chosen qualifier
1882 aarch64_encode_variant_using_iclass (inst
);
1885 DEBUG_TRACE ("exit with %s", opcode
->name
);
1887 *code
= inst
->value
;