1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "aarch64-asm.h"
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
35 the order of M, L, H. */
38 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
41 const aarch64_field
*field
;
42 enum aarch64_field_kind kind
;
46 num
= va_arg (va
, uint32_t);
50 kind
= va_arg (va
, enum aarch64_field_kind
);
51 field
= &fields
[kind
];
52 insert_field (kind
, code
, value
, mask
);
53 value
>>= field
->width
;
58 /* Operand inserters. */
60 /* Insert register number. */
62 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
64 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
66 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
74 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
75 aarch64_insn
*code
, const aarch64_inst
*inst
)
78 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
79 /* index and/or type */
80 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
82 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
83 if (info
->type
== AARCH64_OPND_En
84 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info
->idx
== 1); /* Vn */
88 aarch64_insn value
= info
->reglane
.index
<< pos
;
89 insert_field (FLD_imm4
, code
, value
, 0);
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
100 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
101 insert_field (FLD_imm5
, code
, value
, 0);
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info
->qualifier
)
110 case AARCH64_OPND_QLF_S_H
:
112 insert_fields (code
, info
->reglane
.index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
114 case AARCH64_OPND_QLF_S_S
:
116 insert_fields (code
, info
->reglane
.index
, 0, 2, FLD_L
, FLD_H
);
118 case AARCH64_OPND_QLF_S_D
:
120 insert_field (FLD_H
, code
, info
->reglane
.index
, 0);
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
131 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
133 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
136 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
138 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
145 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
146 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
147 const aarch64_inst
*inst
)
149 aarch64_insn value
= 0;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
154 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
159 switch (info
->reglist
.num_regs
)
161 case 1: value
= 0x7; break;
162 case 2: value
= 0xa; break;
163 case 3: value
= 0x6; break;
164 case 4: value
= 0x2; break;
169 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
180 insert_field (FLD_opcode
, code
, value
, 0);
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
188 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
189 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
190 const aarch64_inst
*inst
)
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
198 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
200 value
= (aarch64_insn
) 0;
201 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
204 value
= (aarch64_insn
) 1;
205 insert_field (FLD_S
, code
, value
, 0);
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
213 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
214 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
215 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
217 aarch64_field field
= {0, 0};
218 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
219 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
221 assert (info
->reglist
.has_index
);
224 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info
->qualifier
)
228 case AARCH64_OPND_QLF_S_B
:
229 /* Index encoded in "Q:S:size". */
230 QSsize
= info
->reglist
.index
;
233 case AARCH64_OPND_QLF_S_H
:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize
= info
->reglist
.index
<< 1;
238 case AARCH64_OPND_QLF_S_S
:
239 /* Index encoded in "Q:S". */
240 QSsize
= info
->reglist
.index
<< 2;
243 case AARCH64_OPND_QLF_S_D
:
244 /* Index encoded in "Q". */
245 QSsize
= info
->reglist
.index
<< 3 | 0x1;
251 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
252 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
253 insert_field_2 (&field
, code
, opcodeh2
, 0);
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
263 const aarch64_opnd_info
*info
,
264 aarch64_insn
*code
, const aarch64_inst
*inst
)
266 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
269 if (inst
->opcode
->iclass
== asimdshf
)
273 0000 x SEE AdvSIMD modified immediate
282 Q
= (val
& 0x1) ? 1 : 0;
283 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
287 assert (info
->type
== AARCH64_OPND_IMM_VLSR
288 || info
->type
== AARCH64_OPND_IMM_VLSL
);
290 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
308 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
316 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
318 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
321 /* Maximum of two fields to insert. */
322 assert (self
->fields
[2] == FLD_NIL
);
324 imm
= info
->imm
.value
;
325 if (operand_need_shift_by_two (self
))
327 if (self
->fields
[1] == FLD_NIL
)
328 insert_field (self
->fields
[0], code
, imm
, 0);
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code
, imm
, 0, 2, self
->fields
[1], self
->fields
[0]);
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
338 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
339 aarch64_insn
*code
, const aarch64_inst
*inst
)
342 aarch64_ins_imm (self
, info
, code
, inst
);
344 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
348 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
349 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
351 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
352 const aarch64_opnd_info
*info
,
354 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
356 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
357 uint64_t imm
= info
->imm
.value
;
358 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
359 int amount
= info
->shifter
.amount
;
360 aarch64_field field
= {0, 0};
362 /* a:b:c:d:e:f:g:h */
363 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
365 /* Either MOVI <Dd>, #<imm>
366 or MOVI <Vd>.2D, #<imm>.
367 <imm> is a 64-bit immediate
368 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
369 encoded in "a:b:c:d:e:f:g:h". */
370 imm
= aarch64_shrink_expanded_imm8 (imm
);
371 assert ((int)imm
>= 0);
374 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
376 if (kind
== AARCH64_MOD_NONE
)
379 /* shift amount partially in cmode */
380 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
381 if (kind
== AARCH64_MOD_LSL
)
383 /* AARCH64_MOD_LSL: shift zeros. */
384 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
385 assert (esize
== 4 || esize
== 2 || esize
== 1);
386 /* For 8-bit move immediate, the optional LSL #0 does not require
392 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
394 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
398 /* AARCH64_MOD_MSL: shift ones. */
400 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
402 insert_field_2 (&field
, code
, amount
, 0);
407 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
408 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
410 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
412 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
414 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
418 /* Insert arithmetic immediate for e.g. the last operand in
419 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
421 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
422 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
425 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
426 insert_field (self
->fields
[0], code
, value
, 0);
427 /* imm12 (unsigned) */
428 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
432 /* Insert logical/bitmask immediate for e.g. the last operand in
433 ORR <Wd|WSP>, <Wn>, #<imm>. */
435 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
436 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
439 uint64_t imm
= info
->imm
.value
;
440 int is32
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
) == 4;
442 if (inst
->opcode
->op
== OP_BIC
)
444 if (aarch64_logical_immediate_p (imm
, is32
, &value
) == FALSE
)
445 /* The constraint check should have guaranteed this wouldn't happen. */
448 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
453 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
454 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
456 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
457 aarch64_insn
*code
, const aarch64_inst
*inst
)
459 aarch64_insn value
= 0;
461 assert (info
->idx
== 0);
464 aarch64_ins_regno (self
, info
, code
, inst
);
465 if (inst
->opcode
->iclass
== ldstpair_indexed
466 || inst
->opcode
->iclass
== ldstnapair_offs
467 || inst
->opcode
->iclass
== ldstpair_off
468 || inst
->opcode
->iclass
== loadlit
)
471 switch (info
->qualifier
)
473 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
474 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
475 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
478 insert_field (FLD_ldst_size
, code
, value
, 0);
483 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
484 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
490 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
492 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
493 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
494 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
497 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
501 /* Encode the address operand for e.g.
502 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
504 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
505 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
506 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
509 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
512 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
514 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
516 if (kind
== AARCH64_MOD_LSL
)
517 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
518 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
520 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
521 S
= info
->shifter
.amount
!= 0;
523 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
527 Must be #0 if <extend> is explicitly LSL. */
528 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
529 insert_field (FLD_S
, code
, S
, 0);
534 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
536 aarch64_ins_addr_simm (const aarch64_operand
*self
,
537 const aarch64_opnd_info
*info
,
539 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
544 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
545 /* simm (imm9 or imm7) */
546 imm
= info
->addr
.offset
.imm
;
547 if (self
->fields
[0] == FLD_imm7
)
548 /* scaled immediate in ld/st pair instructions.. */
549 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
550 insert_field (self
->fields
[0], code
, imm
, 0);
551 /* pre/post- index */
552 if (info
->addr
.writeback
)
554 assert (inst
->opcode
->iclass
!= ldst_unscaled
555 && inst
->opcode
->iclass
!= ldstnapair_offs
556 && inst
->opcode
->iclass
!= ldstpair_off
557 && inst
->opcode
->iclass
!= ldst_unpriv
);
558 assert (info
->addr
.preind
!= info
->addr
.postind
);
559 if (info
->addr
.preind
)
560 insert_field (self
->fields
[1], code
, 1, 0);
566 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
568 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
569 const aarch64_opnd_info
*info
,
571 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
573 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
576 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
578 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
582 /* Encode the address operand for e.g.
583 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
585 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
586 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
587 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
590 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
592 if (info
->addr
.offset
.is_reg
)
593 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
595 insert_field (FLD_Rm
, code
, 0x1f, 0);
599 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
601 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
602 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
603 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
606 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
610 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
612 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
613 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
614 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
616 /* op0:op1:CRn:CRm:op2 */
617 insert_fields (code
, info
->sysreg
, inst
->opcode
->mask
, 5,
618 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
622 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
624 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
625 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
629 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
634 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
636 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
637 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
638 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
640 /* op1:CRn:CRm:op2 */
641 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
642 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
646 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
649 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
650 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
651 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
654 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
658 /* Encode the prefetch operation option operand for e.g.
659 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
662 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
663 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
664 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
667 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
671 /* Encode the extended register operand for e.g.
672 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
674 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
675 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
676 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
678 enum aarch64_modifier_kind kind
;
681 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
683 kind
= info
->shifter
.kind
;
684 if (kind
== AARCH64_MOD_LSL
)
685 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
686 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
687 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
689 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
694 /* Encode the shifted register operand for e.g.
695 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
697 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
698 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
699 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
702 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
704 insert_field (FLD_shift
, code
,
705 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
707 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
712 /* Miscellaneous encoding functions. */
714 /* Encode size[0], i.e. bit 22, for
715 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
718 encode_asimd_fcvt (aarch64_inst
*inst
)
721 aarch64_field field
= {0, 0};
722 enum aarch64_opnd_qualifier qualifier
;
724 switch (inst
->opcode
->op
)
728 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
729 qualifier
= inst
->operands
[1].qualifier
;
733 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
734 qualifier
= inst
->operands
[0].qualifier
;
739 assert (qualifier
== AARCH64_OPND_QLF_V_4S
740 || qualifier
== AARCH64_OPND_QLF_V_2D
);
741 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
742 gen_sub_field (FLD_size
, 0, 1, &field
);
743 insert_field_2 (&field
, &inst
->value
, value
, 0);
746 /* Encode size[0], i.e. bit 22, for
747 e.g. FCVTXN <Vb><d>, <Va><n>. */
750 encode_asisd_fcvtxn (aarch64_inst
*inst
)
752 aarch64_insn val
= 1;
753 aarch64_field field
= {0, 0};
754 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
755 gen_sub_field (FLD_size
, 0, 1, &field
);
756 insert_field_2 (&field
, &inst
->value
, val
, 0);
759 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
761 encode_fcvt (aarch64_inst
*inst
)
764 const aarch64_field field
= {15, 2};
767 switch (inst
->operands
[0].qualifier
)
769 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
770 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
771 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
774 insert_field_2 (&field
, &inst
->value
, val
, 0);
779 /* Do miscellaneous encodings that are not common enough to be driven by
783 do_misc_encoding (aarch64_inst
*inst
)
785 switch (inst
->opcode
->op
)
794 encode_asimd_fcvt (inst
);
797 encode_asisd_fcvtxn (inst
);
803 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
805 encode_sizeq (aarch64_inst
*inst
)
808 enum aarch64_field_kind kind
;
811 /* Get the index of the operand whose information we are going to use
812 to encode the size and Q fields.
813 This is deduced from the possible valid qualifier lists. */
814 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
815 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
816 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
817 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
819 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
821 if (inst
->opcode
->iclass
== asisdlse
822 || inst
->opcode
->iclass
== asisdlsep
823 || inst
->opcode
->iclass
== asisdlso
824 || inst
->opcode
->iclass
== asisdlsop
)
825 kind
= FLD_vldst_size
;
828 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
831 /* Opcodes that have fields shared by multiple operands are usually flagged
832 with flags. In this function, we detect such flags and use the
833 information in one of the related operands to do the encoding. The 'one'
834 operand is not any operand but one of the operands that has the enough
835 information for such an encoding. */
838 do_special_encoding (struct aarch64_inst
*inst
)
841 aarch64_insn value
= 0;
843 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
845 /* Condition for truly conditional executed instructions, e.g. b.cond. */
846 if (inst
->opcode
->flags
& F_COND
)
848 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
850 if (inst
->opcode
->flags
& F_SF
)
852 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
853 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
854 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
856 insert_field (FLD_sf
, &inst
->value
, value
, 0);
857 if (inst
->opcode
->flags
& F_N
)
858 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
860 if (inst
->opcode
->flags
& F_SIZEQ
)
862 if (inst
->opcode
->flags
& F_FPTYPE
)
864 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
865 switch (inst
->operands
[idx
].qualifier
)
867 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
868 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
869 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
872 insert_field (FLD_type
, &inst
->value
, value
, 0);
874 if (inst
->opcode
->flags
& F_SSIZE
)
876 enum aarch64_opnd_qualifier qualifier
;
877 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
878 qualifier
= inst
->operands
[idx
].qualifier
;
879 assert (qualifier
>= AARCH64_OPND_QLF_S_B
880 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
881 value
= aarch64_get_qualifier_standard_value (qualifier
);
882 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
884 if (inst
->opcode
->flags
& F_T
)
886 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
887 aarch64_field field
= {0, 0};
888 enum aarch64_opnd_qualifier qualifier
;
891 qualifier
= inst
->operands
[idx
].qualifier
;
892 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
893 == AARCH64_OPND_CLASS_SIMD_REG
894 && qualifier
>= AARCH64_OPND_QLF_V_8B
895 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
906 value
= aarch64_get_qualifier_standard_value (qualifier
);
907 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
908 num
= (int) value
>> 1;
909 assert (num
>= 0 && num
<= 3);
910 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
911 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
913 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
915 /* Use Rt to encode in the case of e.g.
916 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
917 enum aarch64_opnd_qualifier qualifier
;
918 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
920 /* Otherwise use the result operand, which has to be a integer
923 assert (idx
== 0 || idx
== 1);
924 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
925 == AARCH64_OPND_CLASS_INT_REG
);
926 qualifier
= inst
->operands
[idx
].qualifier
;
927 insert_field (FLD_Q
, &inst
->value
,
928 aarch64_get_qualifier_standard_value (qualifier
), 0);
930 if (inst
->opcode
->flags
& F_LDS_SIZE
)
932 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
933 enum aarch64_opnd_qualifier qualifier
;
934 aarch64_field field
= {0, 0};
935 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
936 == AARCH64_OPND_CLASS_INT_REG
);
937 gen_sub_field (FLD_opc
, 0, 1, &field
);
938 qualifier
= inst
->operands
[0].qualifier
;
939 insert_field_2 (&field
, &inst
->value
,
940 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
942 /* Miscellaneous encoding as the last step. */
943 if (inst
->opcode
->flags
& F_MISC
)
944 do_misc_encoding (inst
);
946 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
949 /* Converters converting an alias opcode instruction to its real form. */
951 /* ROR <Wd>, <Ws>, #<shift>
953 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
955 convert_ror_to_extr (aarch64_inst
*inst
)
957 copy_operand_info (inst
, 3, 2);
958 copy_operand_info (inst
, 2, 1);
962 LSR <Xd>, <Xn>, #<shift>
964 UBFM <Xd>, <Xn>, #<shift>, #63. */
966 convert_sr_to_bfm (aarch64_inst
*inst
)
968 inst
->operands
[3].imm
.value
=
969 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
972 /* Convert MOV to ORR. */
974 convert_mov_to_orr (aarch64_inst
*inst
)
976 /* MOV <Vd>.<T>, <Vn>.<T>
978 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
979 copy_operand_info (inst
, 2, 1);
982 /* When <imms> >= <immr>, the instruction written:
983 SBFX <Xd>, <Xn>, #<lsb>, #<width>
985 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
988 convert_bfx_to_bfm (aarch64_inst
*inst
)
992 /* Convert the operand. */
993 lsb
= inst
->operands
[2].imm
.value
;
994 width
= inst
->operands
[3].imm
.value
;
995 inst
->operands
[2].imm
.value
= lsb
;
996 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
999 /* When <imms> < <immr>, the instruction written:
1000 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1002 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1005 convert_bfi_to_bfm (aarch64_inst
*inst
)
1009 /* Convert the operand. */
1010 lsb
= inst
->operands
[2].imm
.value
;
1011 width
= inst
->operands
[3].imm
.value
;
1012 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1014 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1015 inst
->operands
[3].imm
.value
= width
- 1;
1019 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1020 inst
->operands
[3].imm
.value
= width
- 1;
1024 /* The instruction written:
1025 LSL <Xd>, <Xn>, #<shift>
1027 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1030 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1032 int64_t shift
= inst
->operands
[2].imm
.value
;
1034 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1036 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1037 inst
->operands
[3].imm
.value
= 31 - shift
;
1041 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1042 inst
->operands
[3].imm
.value
= 63 - shift
;
1046 /* CINC <Wd>, <Wn>, <cond>
1048 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1051 convert_to_csel (aarch64_inst
*inst
)
1053 copy_operand_info (inst
, 3, 2);
1054 copy_operand_info (inst
, 2, 1);
1055 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1058 /* CSET <Wd>, <cond>
1060 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1063 convert_cset_to_csinc (aarch64_inst
*inst
)
1065 copy_operand_info (inst
, 3, 1);
1066 copy_operand_info (inst
, 2, 0);
1067 copy_operand_info (inst
, 1, 0);
1068 inst
->operands
[1].reg
.regno
= 0x1f;
1069 inst
->operands
[2].reg
.regno
= 0x1f;
1070 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1075 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1078 convert_mov_to_movewide (aarch64_inst
*inst
)
1081 uint32_t shift_amount
;
1084 switch (inst
->opcode
->op
)
1086 case OP_MOV_IMM_WIDE
:
1087 value
= inst
->operands
[1].imm
.value
;
1089 case OP_MOV_IMM_WIDEN
:
1090 value
= ~inst
->operands
[1].imm
.value
;
1095 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1096 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1097 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1098 /* The constraint check should have guaranteed this wouldn't happen. */
1100 value
>>= shift_amount
;
1102 inst
->operands
[1].imm
.value
= value
;
1103 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1104 inst
->operands
[1].shifter
.amount
= shift_amount
;
1109 ORR <Wd>, WZR, #<imm>. */
1112 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1114 copy_operand_info (inst
, 2, 1);
1115 inst
->operands
[1].reg
.regno
= 0x1f;
1116 inst
->operands
[1].skip
= 0;
1119 /* Some alias opcodes are assembled by being converted to their real-form. */
1122 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1124 const aarch64_opcode
*alias
= inst
->opcode
;
1126 if ((alias
->flags
& F_CONV
) == 0)
1127 goto convert_to_real_return
;
1133 convert_sr_to_bfm (inst
);
1136 convert_lsl_to_ubfm (inst
);
1141 convert_to_csel (inst
);
1145 convert_cset_to_csinc (inst
);
1150 convert_bfx_to_bfm (inst
);
1155 convert_bfi_to_bfm (inst
);
1158 convert_mov_to_orr (inst
);
1160 case OP_MOV_IMM_WIDE
:
1161 case OP_MOV_IMM_WIDEN
:
1162 convert_mov_to_movewide (inst
);
1164 case OP_MOV_IMM_LOG
:
1165 convert_mov_to_movebitmask (inst
);
1168 convert_ror_to_extr (inst
);
1174 convert_to_real_return
:
1175 aarch64_replace_opcode (inst
, real
);
1178 /* Encode *INST_ORI of the opcode code OPCODE.
1179 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1180 matched operand qualifier sequence in *QLF_SEQ. */
1183 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1184 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1185 aarch64_opnd_qualifier_t
*qlf_seq
,
1186 aarch64_operand_error
*mismatch_detail
)
1189 const aarch64_opcode
*aliased
;
1190 aarch64_inst copy
, *inst
;
1192 DEBUG_TRACE ("enter with %s", opcode
->name
);
1194 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1198 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1199 if (inst
->opcode
== NULL
)
1200 inst
->opcode
= opcode
;
1202 /* Constrain the operands.
1203 After passing this, the encoding is guaranteed to succeed. */
1204 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1206 DEBUG_TRACE ("FAIL since operand constraint not met");
1210 /* Get the base value.
1211 Note: this has to be before the aliasing handling below in order to
1212 get the base value from the alias opcode before we move on to the
1213 aliased opcode for encoding. */
1214 inst
->value
= opcode
->opcode
;
1216 /* No need to do anything else if the opcode does not have any operand. */
1217 if (aarch64_num_of_operands (opcode
) == 0)
1220 /* Assign operand indexes and check types. Also put the matched
1221 operand qualifiers in *QLF_SEQ to return. */
1222 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1224 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1225 inst
->operands
[i
].idx
= i
;
1226 if (qlf_seq
!= NULL
)
1227 *qlf_seq
= inst
->operands
[i
].qualifier
;
1230 aliased
= aarch64_find_real_opcode (opcode
);
1231 /* If the opcode is an alias and it does not ask for direct encoding by
1232 itself, the instruction will be transformed to the form of real opcode
1233 and the encoding will be carried out using the rules for the aliased
1235 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1237 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1238 aliased
->name
, opcode
->name
);
1239 /* Convert the operands to the form of the real opcode. */
1240 convert_to_real (inst
, aliased
);
1244 aarch64_opnd_info
*info
= inst
->operands
;
1246 /* Call the inserter of each operand. */
1247 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1249 const aarch64_operand
*opnd
;
1250 enum aarch64_opnd type
= opcode
->operands
[i
];
1251 if (type
== AARCH64_OPND_NIL
)
1255 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1258 opnd
= &aarch64_operands
[type
];
1259 if (operand_has_inserter (opnd
))
1260 aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
);
1263 /* Call opcode encoders indicated by flags. */
1264 if (opcode_has_special_coder (opcode
))
1265 do_special_encoding (inst
);
1268 DEBUG_TRACE ("exit with %s", opcode
->name
);
1270 *code
= inst
->value
;