1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "aarch64-asm.h"
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
35 the order of M, L, H. */
38 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
41 const aarch64_field
*field
;
42 enum aarch64_field_kind kind
;
46 num
= va_arg (va
, uint32_t);
50 kind
= va_arg (va
, enum aarch64_field_kind
);
51 field
= &fields
[kind
];
52 insert_field (kind
, code
, value
, mask
);
53 value
>>= field
->width
;
58 /* Operand inserters. */
60 /* Insert register number. */
62 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
64 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
66 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
74 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
75 aarch64_insn
*code
, const aarch64_inst
*inst
)
78 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
79 /* index and/or type */
80 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
82 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
83 if (info
->type
== AARCH64_OPND_En
84 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info
->idx
== 1); /* Vn */
88 aarch64_insn value
= info
->reglane
.index
<< pos
;
89 insert_field (FLD_imm4
, code
, value
, 0);
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
100 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
101 insert_field (FLD_imm5
, code
, value
, 0);
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info
->qualifier
)
110 case AARCH64_OPND_QLF_S_H
:
112 insert_fields (code
, info
->reglane
.index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
114 case AARCH64_OPND_QLF_S_S
:
116 insert_fields (code
, info
->reglane
.index
, 0, 2, FLD_L
, FLD_H
);
118 case AARCH64_OPND_QLF_S_D
:
120 insert_field (FLD_H
, code
, info
->reglane
.index
, 0);
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
131 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
133 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
136 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
138 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
145 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
146 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
147 const aarch64_inst
*inst
)
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
154 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
159 switch (info
->reglist
.num_regs
)
161 case 1: value
= 0x7; break;
162 case 2: value
= 0xa; break;
163 case 3: value
= 0x6; break;
164 case 4: value
= 0x2; break;
169 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
180 insert_field (FLD_opcode
, code
, value
, 0);
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
188 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
189 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
190 const aarch64_inst
*inst
)
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
198 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
200 value
= (aarch64_insn
) 0;
201 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
204 value
= (aarch64_insn
) 1;
205 insert_field (FLD_S
, code
, value
, 0);
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
213 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
214 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
215 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
217 aarch64_field field
= {0, 0};
218 aarch64_insn QSsize
; /* fields Q:S:size. */
219 aarch64_insn opcodeh2
; /* opcode<2:1> */
221 assert (info
->reglist
.has_index
);
224 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info
->qualifier
)
228 case AARCH64_OPND_QLF_S_B
:
229 /* Index encoded in "Q:S:size". */
230 QSsize
= info
->reglist
.index
;
233 case AARCH64_OPND_QLF_S_H
:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize
= info
->reglist
.index
<< 1;
238 case AARCH64_OPND_QLF_S_S
:
239 /* Index encoded in "Q:S". */
240 QSsize
= info
->reglist
.index
<< 2;
243 case AARCH64_OPND_QLF_S_D
:
244 /* Index encoded in "Q". */
245 QSsize
= info
->reglist
.index
<< 3 | 0x1;
251 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
252 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
253 insert_field_2 (&field
, code
, opcodeh2
, 0);
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
263 const aarch64_opnd_info
*info
,
264 aarch64_insn
*code
, const aarch64_inst
*inst
)
266 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
269 if (inst
->opcode
->iclass
== asimdshf
)
273 0000 x SEE AdvSIMD modified immediate
282 Q
= (val
& 0x1) ? 1 : 0;
283 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
287 assert (info
->type
== AARCH64_OPND_IMM_VLSR
288 || info
->type
== AARCH64_OPND_IMM_VLSL
);
290 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
308 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
316 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
318 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
321 /* Maximum of two fields to insert. */
322 assert (self
->fields
[2] == FLD_NIL
);
324 imm
= info
->imm
.value
;
325 if (operand_need_shift_by_two (self
))
327 if (self
->fields
[1] == FLD_NIL
)
328 insert_field (self
->fields
[0], code
, imm
, 0);
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code
, imm
, 0, 2, self
->fields
[1], self
->fields
[0]);
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
338 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
340 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
343 aarch64_ins_imm (self
, info
, code
, inst
);
345 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
349 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
350 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
352 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
353 const aarch64_opnd_info
*info
,
355 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
357 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
358 uint64_t imm
= info
->imm
.value
;
359 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
360 int amount
= info
->shifter
.amount
;
361 aarch64_field field
= {0, 0};
363 /* a:b:c:d:e:f:g:h */
364 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
366 /* Either MOVI <Dd>, #<imm>
367 or MOVI <Vd>.2D, #<imm>.
368 <imm> is a 64-bit immediate
369 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
370 encoded in "a:b:c:d:e:f:g:h". */
371 imm
= aarch64_shrink_expanded_imm8 (imm
);
372 assert ((int)imm
>= 0);
375 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
377 if (kind
== AARCH64_MOD_NONE
)
380 /* shift amount partially in cmode */
381 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
382 if (kind
== AARCH64_MOD_LSL
)
384 /* AARCH64_MOD_LSL: shift zeros. */
385 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
386 assert (esize
== 4 || esize
== 2);
389 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
391 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
395 /* AARCH64_MOD_MSL: shift ones. */
397 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
399 insert_field_2 (&field
, code
, amount
, 0);
404 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
405 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
407 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
409 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
411 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
415 /* Insert arithmetic immediate for e.g. the last operand in
416 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
418 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
419 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
422 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
423 insert_field (self
->fields
[0], code
, value
, 0);
424 /* imm12 (unsigned) */
425 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
429 /* Insert logical/bitmask immediate for e.g. the last operand in
430 ORR <Wd|WSP>, <Wn>, #<imm>. */
432 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
433 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
436 uint64_t imm
= info
->imm
.value
;
437 int is32
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
) == 4;
439 if (inst
->opcode
->op
== OP_BIC
)
441 if (aarch64_logical_immediate_p (imm
, is32
, &value
) == FALSE
)
442 /* The constraint check should have guaranteed this wouldn't happen. */
445 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
450 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
451 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
453 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
454 aarch64_insn
*code
, const aarch64_inst
*inst
)
458 assert (info
->idx
== 0);
461 aarch64_ins_regno (self
, info
, code
, inst
);
462 if (inst
->opcode
->iclass
== ldstpair_indexed
463 || inst
->opcode
->iclass
== ldstnapair_offs
464 || inst
->opcode
->iclass
== ldstpair_off
465 || inst
->opcode
->iclass
== loadlit
)
468 switch (info
->qualifier
)
470 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
471 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
472 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
475 insert_field (FLD_ldst_size
, code
, value
, 0);
480 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
481 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
487 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
489 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
490 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
491 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
494 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
498 /* Encode the address operand for e.g.
499 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
501 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
502 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
503 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
506 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
509 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
511 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
513 if (kind
== AARCH64_MOD_LSL
)
514 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
515 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
517 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
518 S
= info
->shifter
.amount
!= 0;
520 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
524 Must be #0 if <extend> is explicitly LSL. */
525 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
526 insert_field (FLD_S
, code
, S
, 0);
531 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
533 aarch64_ins_addr_simm (const aarch64_operand
*self
,
534 const aarch64_opnd_info
*info
,
535 aarch64_insn
*code
, const aarch64_inst
*inst
)
540 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
541 /* simm (imm9 or imm7) */
542 imm
= info
->addr
.offset
.imm
;
543 if (self
->fields
[0] == FLD_imm7
)
544 /* scaled immediate in ld/st pair instructions.. */
545 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
546 insert_field (self
->fields
[0], code
, imm
, 0);
547 /* pre/post- index */
548 if (info
->addr
.writeback
)
550 assert (inst
->opcode
->iclass
!= ldst_unscaled
551 && inst
->opcode
->iclass
!= ldstnapair_offs
552 && inst
->opcode
->iclass
!= ldstpair_off
553 && inst
->opcode
->iclass
!= ldst_unpriv
);
554 assert (info
->addr
.preind
!= info
->addr
.postind
);
555 if (info
->addr
.preind
)
556 insert_field (self
->fields
[1], code
, 1, 0);
562 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
564 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
565 const aarch64_opnd_info
*info
,
567 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
569 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
572 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
574 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
578 /* Encode the address operand for e.g.
579 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
581 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
582 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
583 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
586 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
588 if (info
->addr
.offset
.is_reg
)
589 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
591 insert_field (FLD_Rm
, code
, 0x1f, 0);
595 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
597 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
598 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
599 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
602 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
606 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
608 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
609 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
610 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
612 /* op0:op1:CRn:CRm:op2 */
613 insert_fields (code
, info
->sysreg
, inst
->opcode
->mask
, 5,
614 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
618 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
620 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
621 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
622 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
625 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
630 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
632 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
633 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
634 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
636 /* op1:CRn:CRm:op2 */
637 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
638 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
642 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
645 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
646 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
647 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
650 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
654 /* Encode the prefetch operation option operand for e.g.
655 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
658 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
659 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
660 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
663 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
667 /* Encode the extended register operand for e.g.
668 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
670 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
671 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
672 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
674 enum aarch64_modifier_kind kind
;
677 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
679 kind
= info
->shifter
.kind
;
680 if (kind
== AARCH64_MOD_LSL
)
681 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
682 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
683 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
685 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
690 /* Encode the shifted register operand for e.g.
691 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
693 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
694 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
695 const aarch64_inst
*inst ATTRIBUTE_UNUSED
)
698 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
700 insert_field (FLD_shift
, code
,
701 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
703 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
708 /* Miscellaneous encoding functions. */
710 /* Encode size[0], i.e. bit 22, for
711 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
714 encode_asimd_fcvt (aarch64_inst
*inst
)
717 aarch64_field field
= {0, 0};
718 enum aarch64_opnd_qualifier qualifier
;
720 switch (inst
->opcode
->op
)
724 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
725 qualifier
= inst
->operands
[1].qualifier
;
729 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
730 qualifier
= inst
->operands
[0].qualifier
;
735 assert (qualifier
== AARCH64_OPND_QLF_V_4S
736 || qualifier
== AARCH64_OPND_QLF_V_2D
);
737 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
738 gen_sub_field (FLD_size
, 0, 1, &field
);
739 insert_field_2 (&field
, &inst
->value
, value
, 0);
742 /* Encode size[0], i.e. bit 22, for
743 e.g. FCVTXN <Vb><d>, <Va><n>. */
746 encode_asisd_fcvtxn (aarch64_inst
*inst
)
748 aarch64_insn val
= 1;
749 aarch64_field field
= {0, 0};
750 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
751 gen_sub_field (FLD_size
, 0, 1, &field
);
752 insert_field_2 (&field
, &inst
->value
, val
, 0);
755 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
757 encode_fcvt (aarch64_inst
*inst
)
760 const aarch64_field field
= {15, 2};
763 switch (inst
->operands
[0].qualifier
)
765 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
766 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
767 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
770 insert_field_2 (&field
, &inst
->value
, val
, 0);
775 /* Do miscellaneous encodings that are not common enough to be driven by
779 do_misc_encoding (aarch64_inst
*inst
)
781 switch (inst
->opcode
->op
)
790 encode_asimd_fcvt (inst
);
793 encode_asisd_fcvtxn (inst
);
799 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
801 encode_sizeq (aarch64_inst
*inst
)
804 enum aarch64_field_kind kind
;
807 /* Get the index of the operand whose information we are going to use
808 to encode the size and Q fields.
809 This is deduced from the possible valid qualifier lists. */
810 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
811 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
812 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
813 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
815 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
817 if (inst
->opcode
->iclass
== asisdlse
818 || inst
->opcode
->iclass
== asisdlsep
819 || inst
->opcode
->iclass
== asisdlso
820 || inst
->opcode
->iclass
== asisdlsop
)
821 kind
= FLD_vldst_size
;
824 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
827 /* Opcodes that have fields shared by multiple operands are usually flagged
828 with flags. In this function, we detect such flags and use the
829 information in one of the related operands to do the encoding. The 'one'
830 operand is not any operand but one of the operands that has the enough
831 information for such an encoding. */
834 do_special_encoding (struct aarch64_inst
*inst
)
839 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
841 /* Condition for truly conditional executed instructions, e.g. b.cond. */
842 if (inst
->opcode
->flags
& F_COND
)
844 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
846 if (inst
->opcode
->flags
& F_SF
)
848 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
849 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
850 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
852 insert_field (FLD_sf
, &inst
->value
, value
, 0);
853 if (inst
->opcode
->flags
& F_N
)
854 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
856 if (inst
->opcode
->flags
& F_SIZEQ
)
858 if (inst
->opcode
->flags
& F_FPTYPE
)
860 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
861 switch (inst
->operands
[idx
].qualifier
)
863 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
864 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
865 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
868 insert_field (FLD_type
, &inst
->value
, value
, 0);
870 if (inst
->opcode
->flags
& F_SSIZE
)
872 enum aarch64_opnd_qualifier qualifier
;
873 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
874 qualifier
= inst
->operands
[idx
].qualifier
;
875 assert (qualifier
>= AARCH64_OPND_QLF_S_B
876 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
877 value
= aarch64_get_qualifier_standard_value (qualifier
);
878 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
880 if (inst
->opcode
->flags
& F_T
)
882 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
883 aarch64_field field
= {0, 0};
884 enum aarch64_opnd_qualifier qualifier
;
887 qualifier
= inst
->operands
[idx
].qualifier
;
888 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
889 == AARCH64_OPND_CLASS_SIMD_REG
890 && qualifier
>= AARCH64_OPND_QLF_V_8B
891 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
902 value
= aarch64_get_qualifier_standard_value (qualifier
);
903 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
904 num
= (int) value
>> 1;
905 assert (num
>= 0 && num
<= 3);
906 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
907 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
909 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
911 /* Use Rt to encode in the case of e.g.
912 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
913 enum aarch64_opnd_qualifier qualifier
;
914 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
916 /* Otherwise use the result operand, which has to be a integer
919 assert (idx
== 0 || idx
== 1);
920 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
921 == AARCH64_OPND_CLASS_INT_REG
);
922 qualifier
= inst
->operands
[idx
].qualifier
;
923 insert_field (FLD_Q
, &inst
->value
,
924 aarch64_get_qualifier_standard_value (qualifier
), 0);
926 if (inst
->opcode
->flags
& F_LDS_SIZE
)
928 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
929 enum aarch64_opnd_qualifier qualifier
;
930 aarch64_field field
= {0, 0};
931 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
932 == AARCH64_OPND_CLASS_INT_REG
);
933 gen_sub_field (FLD_opc
, 0, 1, &field
);
934 qualifier
= inst
->operands
[0].qualifier
;
935 insert_field_2 (&field
, &inst
->value
,
936 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
938 /* Miscellaneous encoding as the last step. */
939 if (inst
->opcode
->flags
& F_MISC
)
940 do_misc_encoding (inst
);
942 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
945 /* Converters converting an alias opcode instruction to its real form. */
947 /* ROR <Wd>, <Ws>, #<shift>
949 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
951 convert_ror_to_extr (aarch64_inst
*inst
)
953 copy_operand_info (inst
, 3, 2);
954 copy_operand_info (inst
, 2, 1);
958 LSR <Xd>, <Xn>, #<shift>
960 UBFM <Xd>, <Xn>, #<shift>, #63. */
962 convert_sr_to_bfm (aarch64_inst
*inst
)
964 inst
->operands
[3].imm
.value
=
965 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
968 /* Convert MOV to ORR. */
970 convert_mov_to_orr (aarch64_inst
*inst
)
972 /* MOV <Vd>.<T>, <Vn>.<T>
974 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
975 copy_operand_info (inst
, 2, 1);
978 /* When <imms> >= <immr>, the instruction written:
979 SBFX <Xd>, <Xn>, #<lsb>, #<width>
981 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
984 convert_bfx_to_bfm (aarch64_inst
*inst
)
988 /* Convert the operand. */
989 lsb
= inst
->operands
[2].imm
.value
;
990 width
= inst
->operands
[3].imm
.value
;
991 inst
->operands
[2].imm
.value
= lsb
;
992 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
995 /* When <imms> < <immr>, the instruction written:
996 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
998 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1001 convert_bfi_to_bfm (aarch64_inst
*inst
)
1005 /* Convert the operand. */
1006 lsb
= inst
->operands
[2].imm
.value
;
1007 width
= inst
->operands
[3].imm
.value
;
1008 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1010 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1011 inst
->operands
[3].imm
.value
= width
- 1;
1015 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1016 inst
->operands
[3].imm
.value
= width
- 1;
1020 /* The instruction written:
1021 LSL <Xd>, <Xn>, #<shift>
1023 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1026 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1028 int64_t shift
= inst
->operands
[2].imm
.value
;
1030 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1032 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1033 inst
->operands
[3].imm
.value
= 31 - shift
;
1037 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1038 inst
->operands
[3].imm
.value
= 63 - shift
;
1042 /* CINC <Wd>, <Wn>, <cond>
1044 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1047 convert_to_csel (aarch64_inst
*inst
)
1049 copy_operand_info (inst
, 3, 2);
1050 copy_operand_info (inst
, 2, 1);
1051 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1054 /* CSET <Wd>, <cond>
1056 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1059 convert_cset_to_csinc (aarch64_inst
*inst
)
1061 copy_operand_info (inst
, 3, 1);
1062 copy_operand_info (inst
, 2, 0);
1063 copy_operand_info (inst
, 1, 0);
1064 inst
->operands
[1].reg
.regno
= 0x1f;
1065 inst
->operands
[2].reg
.regno
= 0x1f;
1066 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1071 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1074 convert_mov_to_movewide (aarch64_inst
*inst
)
1077 uint32_t shift_amount
;
1080 switch (inst
->opcode
->op
)
1082 case OP_MOV_IMM_WIDE
:
1083 value
= inst
->operands
[1].imm
.value
;
1085 case OP_MOV_IMM_WIDEN
:
1086 value
= ~inst
->operands
[1].imm
.value
;
1091 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1092 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1093 /* This should have been guaranteed by the constraint check. */
1094 assert (aarch64_wide_constant_p (value
, is32
, &shift_amount
) == TRUE
);
1095 value
>>= shift_amount
;
1097 inst
->operands
[1].imm
.value
= value
;
1098 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1099 inst
->operands
[1].shifter
.amount
= shift_amount
;
1104 ORR <Wd>, WZR, #<imm>. */
1107 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1109 copy_operand_info (inst
, 2, 1);
1110 inst
->operands
[1].reg
.regno
= 0x1f;
1111 inst
->operands
[1].skip
= 0;
1114 /* Some alias opcodes are assembled by being converted to their real-form. */
1117 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1119 const aarch64_opcode
*alias
= inst
->opcode
;
1121 if ((alias
->flags
& F_CONV
) == 0)
1122 goto convert_to_real_return
;
1128 convert_sr_to_bfm (inst
);
1131 convert_lsl_to_ubfm (inst
);
1136 convert_to_csel (inst
);
1140 convert_cset_to_csinc (inst
);
1145 convert_bfx_to_bfm (inst
);
1150 convert_bfi_to_bfm (inst
);
1153 convert_mov_to_orr (inst
);
1155 case OP_MOV_IMM_WIDE
:
1156 case OP_MOV_IMM_WIDEN
:
1157 convert_mov_to_movewide (inst
);
1159 case OP_MOV_IMM_LOG
:
1160 convert_mov_to_movebitmask (inst
);
1163 convert_ror_to_extr (inst
);
1169 convert_to_real_return
:
1170 aarch64_replace_opcode (inst
, real
);
1173 /* Encode *INST_ORI of the opcode code OPCODE.
1174 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1175 matched operand qualifier sequence in *QLF_SEQ. */
1178 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1179 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1180 aarch64_opnd_qualifier_t
*qlf_seq
,
1181 aarch64_operand_error
*mismatch_detail
)
1184 const aarch64_opcode
*aliased
;
1185 aarch64_inst copy
, *inst
;
1187 DEBUG_TRACE ("enter with %s", opcode
->name
);
1189 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1193 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1194 if (inst
->opcode
== NULL
)
1195 inst
->opcode
= opcode
;
1197 /* Constrain the operands.
1198 After passing this, the encoding is guaranteed to succeed. */
1199 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1201 DEBUG_TRACE ("FAIL since operand constraint not met");
1205 /* Get the base value.
1206 Note: this has to be before the aliasing handling below in order to
1207 get the base value from the alias opcode before we move on to the
1208 aliased opcode for encoding. */
1209 inst
->value
= opcode
->opcode
;
1211 /* No need to do anything else if the opcode does not have any operand. */
1212 if (aarch64_num_of_operands (opcode
) == 0)
1215 /* Assign operand indexes and check types. Also put the matched
1216 operand qualifiers in *QLF_SEQ to return. */
1217 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1219 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1220 inst
->operands
[i
].idx
= i
;
1221 if (qlf_seq
!= NULL
)
1222 *qlf_seq
= inst
->operands
[i
].qualifier
;
1225 aliased
= aarch64_find_real_opcode (opcode
);
1226 /* If the opcode is an alias and it does not ask for direct encoding by
1227 itself, the instruction will be transformed to the form of real opcode
1228 and the encoding will be carried out using the rules for the aliased
1230 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
1232 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1233 aliased
->name
, opcode
->name
);
1234 /* Convert the operands to the form of the real opcode. */
1235 convert_to_real (inst
, aliased
);
1239 aarch64_opnd_info
*info
= inst
->operands
;
1241 /* Call the inserter of each operand. */
1242 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
1244 const aarch64_operand
*opnd
;
1245 enum aarch64_opnd type
= opcode
->operands
[i
];
1246 if (type
== AARCH64_OPND_NIL
)
1250 DEBUG_TRACE ("skip the incomplete operand %d", i
);
1253 opnd
= &aarch64_operands
[type
];
1254 if (operand_has_inserter (opnd
))
1255 aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
);
1258 /* Call opcode encoders indicated by flags. */
1259 if (opcode_has_special_coder (opcode
))
1260 do_special_encoding (inst
);
1263 DEBUG_TRACE ("exit with %s", opcode
->name
);
1265 *code
= inst
->value
;