1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2019 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
64 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Operand inserters. */
81 /* Insert register number. */
83 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
85 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
86 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
88 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
92 /* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
96 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
97 aarch64_insn
*code
, const aarch64_inst
*inst
,
98 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
101 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
102 /* index and/or type */
103 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
105 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
106 if (info
->type
== AARCH64_OPND_En
107 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info
->idx
== 1); /* Vn */
111 aarch64_insn value
= info
->reglane
.index
<< pos
;
112 insert_field (FLD_imm4
, code
, value
, 0);
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
123 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
124 insert_field (FLD_imm5
, code
, value
, 0);
127 else if (inst
->opcode
->iclass
== dotproduct
)
129 unsigned reglane_index
= info
->reglane
.index
;
130 switch (info
->qualifier
)
132 case AARCH64_OPND_QLF_S_4B
:
133 case AARCH64_OPND_QLF_S_2H
:
135 assert (reglane_index
< 4);
136 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
142 else if (inst
->opcode
->iclass
== cryptosm3
)
144 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
145 unsigned reglane_index
= info
->reglane
.index
;
146 assert (reglane_index
< 4);
147 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
151 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
152 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
153 unsigned reglane_index
= info
->reglane
.index
;
155 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
156 /* Complex operand takes two elements. */
159 switch (info
->qualifier
)
161 case AARCH64_OPND_QLF_S_H
:
163 assert (reglane_index
< 8);
164 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
166 case AARCH64_OPND_QLF_S_S
:
168 assert (reglane_index
< 4);
169 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
171 case AARCH64_OPND_QLF_S_D
:
173 assert (reglane_index
< 2);
174 insert_field (FLD_H
, code
, reglane_index
, 0);
183 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
185 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
187 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
188 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
191 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
193 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
197 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
198 in AdvSIMD load/store instructions. */
200 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
201 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
202 const aarch64_inst
*inst
,
203 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
205 aarch64_insn value
= 0;
206 /* Number of elements in each structure to be loaded/stored. */
207 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
210 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
215 switch (info
->reglist
.num_regs
)
217 case 1: value
= 0x7; break;
218 case 2: value
= 0xa; break;
219 case 3: value
= 0x6; break;
220 case 4: value
= 0x2; break;
225 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
236 insert_field (FLD_opcode
, code
, value
, 0);
241 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
242 single structure to all lanes instructions. */
244 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
245 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
246 const aarch64_inst
*inst
,
247 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
250 /* The opcode dependent area stores the number of elements in
251 each structure to be loaded/stored. */
252 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
255 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
257 value
= (aarch64_insn
) 0;
258 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
259 /* OP_LD1R does not have alternating variant, but have "two consecutive"
261 value
= (aarch64_insn
) 1;
262 insert_field (FLD_S
, code
, value
, 0);
267 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
268 operand e.g. Vt in AdvSIMD load/store single element instructions. */
270 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
271 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
272 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
273 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
275 aarch64_field field
= {0, 0};
276 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
277 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
279 assert (info
->reglist
.has_index
);
282 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
283 /* Encode the index, opcode<2:1> and size. */
284 switch (info
->qualifier
)
286 case AARCH64_OPND_QLF_S_B
:
287 /* Index encoded in "Q:S:size". */
288 QSsize
= info
->reglist
.index
;
291 case AARCH64_OPND_QLF_S_H
:
292 /* Index encoded in "Q:S:size<1>". */
293 QSsize
= info
->reglist
.index
<< 1;
296 case AARCH64_OPND_QLF_S_S
:
297 /* Index encoded in "Q:S". */
298 QSsize
= info
->reglist
.index
<< 2;
301 case AARCH64_OPND_QLF_S_D
:
302 /* Index encoded in "Q". */
303 QSsize
= info
->reglist
.index
<< 3 | 0x1;
309 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
310 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
311 insert_field_2 (&field
, code
, opcodeh2
, 0);
316 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
317 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
318 or SSHR <V><d>, <V><n>, #<shift>. */
320 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
321 const aarch64_opnd_info
*info
,
322 aarch64_insn
*code
, const aarch64_inst
*inst
,
323 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
325 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
328 if (inst
->opcode
->iclass
== asimdshf
)
332 0000 x SEE AdvSIMD modified immediate
341 Q
= (val
& 0x1) ? 1 : 0;
342 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
346 assert (info
->type
== AARCH64_OPND_IMM_VLSR
347 || info
->type
== AARCH64_OPND_IMM_VLSL
);
349 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
352 0000 SEE AdvSIMD modified immediate
353 0001 (16-UInt(immh:immb))
354 001x (32-UInt(immh:immb))
355 01xx (64-UInt(immh:immb))
356 1xxx (128-UInt(immh:immb)) */
357 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
361 0000 SEE AdvSIMD modified immediate
362 0001 (UInt(immh:immb)-8)
363 001x (UInt(immh:immb)-16)
364 01xx (UInt(immh:immb)-32)
365 1xxx (UInt(immh:immb)-64) */
366 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
367 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
372 /* Insert fields for e.g. the immediate operands in
373 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
375 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
377 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
378 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
382 imm
= info
->imm
.value
;
383 if (operand_need_shift_by_two (self
))
385 if (operand_need_shift_by_four (self
))
387 insert_all_fields (self
, code
, imm
);
391 /* Insert immediate and its shift amount for e.g. the last operand in
392 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
394 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
395 aarch64_insn
*code
, const aarch64_inst
*inst
,
396 aarch64_operand_error
*errors
)
399 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
401 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
405 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
406 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
408 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
409 const aarch64_opnd_info
*info
,
411 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
412 aarch64_operand_error
*errors
415 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
416 uint64_t imm
= info
->imm
.value
;
417 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
418 int amount
= info
->shifter
.amount
;
419 aarch64_field field
= {0, 0};
421 /* a:b:c:d:e:f:g:h */
422 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
424 /* Either MOVI <Dd>, #<imm>
425 or MOVI <Vd>.2D, #<imm>.
426 <imm> is a 64-bit immediate
427 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
428 encoded in "a:b:c:d:e:f:g:h". */
429 imm
= aarch64_shrink_expanded_imm8 (imm
);
430 assert ((int)imm
>= 0);
432 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
434 if (kind
== AARCH64_MOD_NONE
)
437 /* shift amount partially in cmode */
438 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
439 if (kind
== AARCH64_MOD_LSL
)
441 /* AARCH64_MOD_LSL: shift zeros. */
442 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
443 assert (esize
== 4 || esize
== 2 || esize
== 1);
444 /* For 8-bit move immediate, the optional LSL #0 does not require
450 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
452 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
456 /* AARCH64_MOD_MSL: shift ones. */
458 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
460 insert_field_2 (&field
, code
, amount
, 0);
465 /* Insert fields for an 8-bit floating-point immediate. */
467 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
469 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
470 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
472 insert_all_fields (self
, code
, info
->imm
.value
);
476 /* Insert 1-bit rotation immediate (#90 or #270). */
478 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
479 const aarch64_opnd_info
*info
,
480 aarch64_insn
*code
, const aarch64_inst
*inst
,
481 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
483 uint64_t rot
= (info
->imm
.value
- 90) / 180;
485 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
489 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
491 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
492 const aarch64_opnd_info
*info
,
493 aarch64_insn
*code
, const aarch64_inst
*inst
,
494 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
496 uint64_t rot
= info
->imm
.value
/ 90;
498 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
502 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
503 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
505 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
507 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
508 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
510 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
514 /* Insert arithmetic immediate for e.g. the last operand in
515 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
517 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
518 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
519 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
522 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
523 insert_field (self
->fields
[0], code
, value
, 0);
524 /* imm12 (unsigned) */
525 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
529 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
530 the operand should be inverted before encoding. */
532 aarch64_ins_limm_1 (const aarch64_operand
*self
,
533 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
534 const aarch64_inst
*inst
, bfd_boolean invert_p
,
535 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
538 uint64_t imm
= info
->imm
.value
;
539 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
543 /* The constraint check should have guaranteed this wouldn't happen. */
544 assert (aarch64_logical_immediate_p (imm
, esize
, &value
));
546 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
551 /* Insert logical/bitmask immediate for e.g. the last operand in
552 ORR <Wd|WSP>, <Wn>, #<imm>. */
554 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
555 aarch64_insn
*code
, const aarch64_inst
*inst
,
556 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
558 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
559 inst
->opcode
->op
== OP_BIC
, errors
);
562 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
564 aarch64_ins_inv_limm (const aarch64_operand
*self
,
565 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
566 const aarch64_inst
*inst
,
567 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
569 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
, errors
);
572 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
573 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
575 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
576 aarch64_insn
*code
, const aarch64_inst
*inst
,
577 aarch64_operand_error
*errors
)
579 aarch64_insn value
= 0;
581 assert (info
->idx
== 0);
584 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
585 if (inst
->opcode
->iclass
== ldstpair_indexed
586 || inst
->opcode
->iclass
== ldstnapair_offs
587 || inst
->opcode
->iclass
== ldstpair_off
588 || inst
->opcode
->iclass
== loadlit
)
591 switch (info
->qualifier
)
593 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
594 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
595 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
598 insert_field (FLD_ldst_size
, code
, value
, 0);
603 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
604 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
610 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
612 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
613 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
614 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
615 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
618 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
622 /* Encode the address operand for e.g.
623 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
625 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
626 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
627 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
628 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
631 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
634 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
636 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
638 if (kind
== AARCH64_MOD_LSL
)
639 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
640 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
642 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
643 S
= info
->shifter
.amount
!= 0;
645 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
649 Must be #0 if <extend> is explicitly LSL. */
650 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
651 insert_field (FLD_S
, code
, S
, 0);
656 /* Encode the address operand for e.g.
657 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
659 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
660 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
661 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
662 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
665 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
668 int imm
= info
->addr
.offset
.imm
;
669 insert_field (self
->fields
[1], code
, imm
, 0);
672 if (info
->addr
.writeback
)
674 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
675 insert_field (self
->fields
[2], code
, 1, 0);
680 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
682 aarch64_ins_addr_simm (const aarch64_operand
*self
,
683 const aarch64_opnd_info
*info
,
685 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
686 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
691 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
692 /* simm (imm9 or imm7) */
693 imm
= info
->addr
.offset
.imm
;
694 if (self
->fields
[0] == FLD_imm7
695 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
696 /* scaled immediate in ld/st pair instructions.. */
697 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
698 insert_field (self
->fields
[0], code
, imm
, 0);
699 /* pre/post- index */
700 if (info
->addr
.writeback
)
702 assert (inst
->opcode
->iclass
!= ldst_unscaled
703 && inst
->opcode
->iclass
!= ldstnapair_offs
704 && inst
->opcode
->iclass
!= ldstpair_off
705 && inst
->opcode
->iclass
!= ldst_unpriv
);
706 assert (info
->addr
.preind
!= info
->addr
.postind
);
707 if (info
->addr
.preind
)
708 insert_field (self
->fields
[1], code
, 1, 0);
714 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
716 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
717 const aarch64_opnd_info
*info
,
719 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
720 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
725 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
727 imm
= info
->addr
.offset
.imm
>> 3;
728 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
729 insert_field (self
->fields
[2], code
, imm
, 0);
731 if (info
->addr
.writeback
)
733 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
734 insert_field (self
->fields
[3], code
, 1, 0);
739 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
741 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
742 const aarch64_opnd_info
*info
,
744 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
745 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
747 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
750 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
752 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
756 /* Encode the address operand for e.g.
757 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
759 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
760 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
761 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
762 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
765 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
767 if (info
->addr
.offset
.is_reg
)
768 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
770 insert_field (FLD_Rm
, code
, 0x1f, 0);
774 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
776 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
777 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
778 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
779 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
782 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
786 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
788 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
789 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
790 const aarch64_inst
*inst
,
791 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
793 /* If a system instruction check if we have any restrictions on which
794 registers it can use. */
795 if (inst
->opcode
->iclass
== ic_system
)
797 uint64_t opcode_flags
798 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
799 uint32_t sysreg_flags
800 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
802 /* Check to see if it's read-only, else check if it's write only.
803 if it's both or unspecified don't care. */
804 if (opcode_flags
== F_SYS_READ
806 && sysreg_flags
!= F_REG_READ
)
808 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
809 detail
->error
= _("specified register cannot be read from");
810 detail
->index
= info
->idx
;
811 detail
->non_fatal
= TRUE
;
813 else if (opcode_flags
== F_SYS_WRITE
815 && sysreg_flags
!= F_REG_WRITE
)
817 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
818 detail
->error
= _("specified register cannot be written to");
819 detail
->index
= info
->idx
;
820 detail
->non_fatal
= TRUE
;
823 /* op0:op1:CRn:CRm:op2 */
824 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
825 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
829 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
831 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
832 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
833 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
834 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
837 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
842 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
844 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
845 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
846 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
847 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
849 /* op1:CRn:CRm:op2 */
850 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
851 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
855 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
858 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
859 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
860 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
861 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
864 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
868 /* Encode the prefetch operation option operand for e.g.
869 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
872 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
873 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
874 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
875 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
878 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
882 /* Encode the hint number for instructions that alias HINT but take an
886 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
887 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
888 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
889 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
892 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
896 /* Encode the extended register operand for e.g.
897 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
899 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
900 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
901 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
902 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
904 enum aarch64_modifier_kind kind
;
907 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
909 kind
= info
->shifter
.kind
;
910 if (kind
== AARCH64_MOD_LSL
)
911 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
912 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
913 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
915 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
920 /* Encode the shifted register operand for e.g.
921 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
923 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
924 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
925 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
926 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
929 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
931 insert_field (FLD_shift
, code
,
932 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
934 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
939 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
940 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
941 SELF's operand-dependent value. fields[0] specifies the field that
942 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
944 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
945 const aarch64_opnd_info
*info
,
947 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
948 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
950 int factor
= 1 + get_operand_specific_data (self
);
951 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
952 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
956 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
957 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
958 SELF's operand-dependent value. fields[0] specifies the field that
959 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
961 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
962 const aarch64_opnd_info
*info
,
964 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
965 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
967 int factor
= 1 + get_operand_specific_data (self
);
968 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
969 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
973 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
974 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
975 SELF's operand-dependent value. fields[0] specifies the field that
976 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
977 and imm3 fields, with imm3 being the less-significant part. */
979 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
980 const aarch64_opnd_info
*info
,
982 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
983 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
985 int factor
= 1 + get_operand_specific_data (self
);
986 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
987 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
988 2, FLD_imm3
, FLD_SVE_imm6
);
992 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
993 is a 4-bit signed number and where <shift> is SELF's operand-dependent
994 value. fields[0] specifies the base register field. */
996 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
997 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
998 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
999 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1001 int factor
= 1 << get_operand_specific_data (self
);
1002 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1003 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1007 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1008 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1009 value. fields[0] specifies the base register field. */
1011 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1012 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1013 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1014 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1016 int factor
= 1 << get_operand_specific_data (self
);
1017 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1018 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1022 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1023 is SELF's operand-dependent value. fields[0] specifies the base
1024 register field and fields[1] specifies the offset register field. */
1026 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1027 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1028 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1029 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1031 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1032 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1036 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1037 <shift> is SELF's operand-dependent value. fields[0] specifies the
1038 base register field, fields[1] specifies the offset register field and
1039 fields[2] is a single-bit field that selects SXTW over UXTW. */
1041 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1042 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1043 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1044 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1046 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1047 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1048 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1049 insert_field (self
->fields
[2], code
, 0, 0);
1051 insert_field (self
->fields
[2], code
, 1, 0);
1055 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1056 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1057 fields[0] specifies the base register field. */
1059 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1060 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1061 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1062 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1064 int factor
= 1 << get_operand_specific_data (self
);
1065 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1066 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1070 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1071 where <modifier> is fixed by the instruction and where <msz> is a
1072 2-bit unsigned number. fields[0] specifies the base register field
1073 and fields[1] specifies the offset register field. */
1075 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1076 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1077 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1079 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1080 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1081 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1085 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1086 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1087 field and fields[1] specifies the offset register field. */
1089 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1090 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1091 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1092 aarch64_operand_error
*errors
)
1094 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1097 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1098 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1099 field and fields[1] specifies the offset register field. */
1101 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1102 const aarch64_opnd_info
*info
,
1104 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1105 aarch64_operand_error
*errors
)
1107 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1110 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1111 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1112 field and fields[1] specifies the offset register field. */
1114 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1115 const aarch64_opnd_info
*info
,
1117 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1118 aarch64_operand_error
*errors
)
1120 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1123 /* Encode an SVE ADD/SUB immediate. */
1125 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1126 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1127 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1128 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1130 if (info
->shifter
.amount
== 8)
1131 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1132 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1133 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1135 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1139 /* Encode an SVE CPY/DUP immediate. */
1141 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1142 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1143 const aarch64_inst
*inst
,
1144 aarch64_operand_error
*errors
)
1146 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1149 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1150 array specifies which field to use for Zn. MM is encoded in the
1151 concatenation of imm5 and SVE_tszh, with imm5 being the less
1152 significant part. */
1154 aarch64_ins_sve_index (const aarch64_operand
*self
,
1155 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1156 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1157 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1159 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1160 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1161 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1162 2, FLD_imm5
, FLD_SVE_tszh
);
1166 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1168 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1169 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1170 const aarch64_inst
*inst
,
1171 aarch64_operand_error
*errors
)
1173 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1176 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1177 and where MM occupies the most-significant part. The operand-dependent
1178 value specifies the number of bits in Zn. */
1180 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1181 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1182 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1183 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1185 unsigned int reg_bits
= get_operand_specific_data (self
);
1186 assert (info
->reglane
.regno
< (1U << reg_bits
));
1187 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1188 insert_all_fields (self
, code
, val
);
1192 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1195 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1196 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1197 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1198 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1200 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1204 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1205 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1208 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1209 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1210 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1211 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1213 insert_all_fields (self
, code
, info
->imm
.value
);
1214 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1218 /* Encode an SVE shift left immediate. */
1220 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1221 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1222 const aarch64_inst
*inst
,
1223 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1225 const aarch64_opnd_info
*prev_operand
;
1228 assert (info
->idx
> 0);
1229 prev_operand
= &inst
->operands
[info
->idx
- 1];
1230 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1231 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1235 /* Encode an SVE shift right immediate. */
1237 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1238 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1239 const aarch64_inst
*inst
,
1240 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1242 const aarch64_opnd_info
*prev_operand
;
1245 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1246 assert (info
->idx
>= (int)opnd_backshift
);
1247 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1248 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1249 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1253 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1254 The fields array specifies which field to use. */
1256 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1257 const aarch64_opnd_info
*info
,
1259 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1260 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1262 if (info
->imm
.value
== 0x3f000000)
1263 insert_field (self
->fields
[0], code
, 0, 0);
1265 insert_field (self
->fields
[0], code
, 1, 0);
1269 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1270 The fields array specifies which field to use. */
1272 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1273 const aarch64_opnd_info
*info
,
1275 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1276 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1278 if (info
->imm
.value
== 0x3f000000)
1279 insert_field (self
->fields
[0], code
, 0, 0);
1281 insert_field (self
->fields
[0], code
, 1, 0);
1285 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1286 The fields array specifies which field to use. */
1288 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1289 const aarch64_opnd_info
*info
,
1291 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1292 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1294 if (info
->imm
.value
== 0)
1295 insert_field (self
->fields
[0], code
, 0, 0);
1297 insert_field (self
->fields
[0], code
, 1, 0);
1301 /* Miscellaneous encoding functions. */
1303 /* Encode size[0], i.e. bit 22, for
1304 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1307 encode_asimd_fcvt (aarch64_inst
*inst
)
1310 aarch64_field field
= {0, 0};
1311 enum aarch64_opnd_qualifier qualifier
;
1313 switch (inst
->opcode
->op
)
1317 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1318 qualifier
= inst
->operands
[1].qualifier
;
1322 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1323 qualifier
= inst
->operands
[0].qualifier
;
1328 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1329 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1330 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1331 gen_sub_field (FLD_size
, 0, 1, &field
);
1332 insert_field_2 (&field
, &inst
->value
, value
, 0);
1335 /* Encode size[0], i.e. bit 22, for
1336 e.g. FCVTXN <Vb><d>, <Va><n>. */
1339 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1341 aarch64_insn val
= 1;
1342 aarch64_field field
= {0, 0};
1343 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1344 gen_sub_field (FLD_size
, 0, 1, &field
);
1345 insert_field_2 (&field
, &inst
->value
, val
, 0);
1348 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1350 encode_fcvt (aarch64_inst
*inst
)
1353 const aarch64_field field
= {15, 2};
1356 switch (inst
->operands
[0].qualifier
)
1358 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1359 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1360 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1363 insert_field_2 (&field
, &inst
->value
, val
, 0);
1368 /* Return the index in qualifiers_list that INST is using. Should only
1369 be called once the qualifiers are known to be valid. */
1372 aarch64_get_variant (struct aarch64_inst
*inst
)
1374 int i
, nops
, variant
;
1376 nops
= aarch64_num_of_operands (inst
->opcode
);
1377 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1379 for (i
= 0; i
< nops
; ++i
)
1380 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1381 != inst
->operands
[i
].qualifier
)
1389 /* Do miscellaneous encodings that are not common enough to be driven by
1393 do_misc_encoding (aarch64_inst
*inst
)
1397 switch (inst
->opcode
->op
)
1406 encode_asimd_fcvt (inst
);
1409 encode_asisd_fcvtxn (inst
);
1413 /* Copy Pn to Pm and Pg. */
1414 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1415 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1416 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1419 /* Copy Zd to Zm. */
1420 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1421 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1424 /* Fill in the zero immediate. */
1425 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1426 2, FLD_imm5
, FLD_SVE_tszh
);
1429 /* Copy Zn to Zm. */
1430 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1431 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1436 /* Copy Pd to Pm. */
1437 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1438 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1440 case OP_MOVZS_P_P_P
:
1442 /* Copy Pn to Pm. */
1443 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1444 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1446 case OP_NOTS_P_P_P_Z
:
1447 case OP_NOT_P_P_P_Z
:
1448 /* Copy Pg to Pm. */
1449 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1450 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1456 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1458 encode_sizeq (aarch64_inst
*inst
)
1461 enum aarch64_field_kind kind
;
1464 /* Get the index of the operand whose information we are going to use
1465 to encode the size and Q fields.
1466 This is deduced from the possible valid qualifier lists. */
1467 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1468 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1469 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1470 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1472 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1474 if (inst
->opcode
->iclass
== asisdlse
1475 || inst
->opcode
->iclass
== asisdlsep
1476 || inst
->opcode
->iclass
== asisdlso
1477 || inst
->opcode
->iclass
== asisdlsop
)
1478 kind
= FLD_vldst_size
;
1481 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1484 /* Opcodes that have fields shared by multiple operands are usually flagged
1485 with flags. In this function, we detect such flags and use the
1486 information in one of the related operands to do the encoding. The 'one'
1487 operand is not any operand but one of the operands that has the enough
1488 information for such an encoding. */
1491 do_special_encoding (struct aarch64_inst
*inst
)
1494 aarch64_insn value
= 0;
1496 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1498 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1499 if (inst
->opcode
->flags
& F_COND
)
1501 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1503 if (inst
->opcode
->flags
& F_SF
)
1505 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1506 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1507 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1509 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1510 if (inst
->opcode
->flags
& F_N
)
1511 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1513 if (inst
->opcode
->flags
& F_LSE_SZ
)
1515 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1516 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1517 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1519 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1521 if (inst
->opcode
->flags
& F_SIZEQ
)
1522 encode_sizeq (inst
);
1523 if (inst
->opcode
->flags
& F_FPTYPE
)
1525 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1526 switch (inst
->operands
[idx
].qualifier
)
1528 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1529 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1530 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1531 default: assert (0);
1533 insert_field (FLD_type
, &inst
->value
, value
, 0);
1535 if (inst
->opcode
->flags
& F_SSIZE
)
1537 enum aarch64_opnd_qualifier qualifier
;
1538 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1539 qualifier
= inst
->operands
[idx
].qualifier
;
1540 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1541 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1542 value
= aarch64_get_qualifier_standard_value (qualifier
);
1543 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1545 if (inst
->opcode
->flags
& F_T
)
1547 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1548 aarch64_field field
= {0, 0};
1549 enum aarch64_opnd_qualifier qualifier
;
1552 qualifier
= inst
->operands
[idx
].qualifier
;
1553 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1554 == AARCH64_OPND_CLASS_SIMD_REG
1555 && qualifier
>= AARCH64_OPND_QLF_V_8B
1556 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1567 value
= aarch64_get_qualifier_standard_value (qualifier
);
1568 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1569 num
= (int) value
>> 1;
1570 assert (num
>= 0 && num
<= 3);
1571 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1572 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1574 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1576 /* Use Rt to encode in the case of e.g.
1577 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1578 enum aarch64_opnd_qualifier qualifier
;
1579 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1581 /* Otherwise use the result operand, which has to be a integer
1584 assert (idx
== 0 || idx
== 1);
1585 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1586 == AARCH64_OPND_CLASS_INT_REG
);
1587 qualifier
= inst
->operands
[idx
].qualifier
;
1588 insert_field (FLD_Q
, &inst
->value
,
1589 aarch64_get_qualifier_standard_value (qualifier
), 0);
1591 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1593 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1594 enum aarch64_opnd_qualifier qualifier
;
1595 aarch64_field field
= {0, 0};
1596 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1597 == AARCH64_OPND_CLASS_INT_REG
);
1598 gen_sub_field (FLD_opc
, 0, 1, &field
);
1599 qualifier
= inst
->operands
[0].qualifier
;
1600 insert_field_2 (&field
, &inst
->value
,
1601 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1603 /* Miscellaneous encoding as the last step. */
1604 if (inst
->opcode
->flags
& F_MISC
)
1605 do_misc_encoding (inst
);
1607 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1610 /* Some instructions (including all SVE ones) use the instruction class
1611 to describe how a qualifiers_list index is represented in the instruction
1612 encoding. If INST is such an instruction, encode the chosen qualifier
1616 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1619 switch (inst
->opcode
->iclass
)
1622 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1623 0, 2, FLD_SVE_M_14
, FLD_size
);
1627 case sve_shift_pred
:
1628 case sve_shift_unpred
:
1629 case sve_shift_tsz_hsd
:
1630 case sve_shift_tsz_bhsd
:
1631 /* For indices and shift amounts, the variant is encoded as
1632 part of the immediate. */
1636 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1637 and depend on the immediate. They don't have a separate
1642 /* sve_misc instructions have only a single variant. */
1646 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1647 0, 2, FLD_SVE_M_16
, FLD_size
);
1651 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1656 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1660 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1665 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1669 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
1673 insert_field (FLD_SVE_size
, &inst
->value
,
1674 aarch64_get_variant (inst
) + 1, 0);
1677 case sve_size_tsz_bhs
:
1678 insert_fields (&inst
->value
,
1679 (1 << aarch64_get_variant (inst
)),
1680 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
1684 variant
= aarch64_get_variant (inst
) + 1;
1687 insert_field (FLD_size
, &inst
->value
, variant
, 0);
1695 /* Converters converting an alias opcode instruction to its real form. */
1697 /* ROR <Wd>, <Ws>, #<shift>
1699 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1701 convert_ror_to_extr (aarch64_inst
*inst
)
1703 copy_operand_info (inst
, 3, 2);
1704 copy_operand_info (inst
, 2, 1);
1707 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1709 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1711 convert_xtl_to_shll (aarch64_inst
*inst
)
1713 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1714 inst
->operands
[2].imm
.value
= 0;
1718 LSR <Xd>, <Xn>, #<shift>
1720 UBFM <Xd>, <Xn>, #<shift>, #63. */
1722 convert_sr_to_bfm (aarch64_inst
*inst
)
1724 inst
->operands
[3].imm
.value
=
1725 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1728 /* Convert MOV to ORR. */
1730 convert_mov_to_orr (aarch64_inst
*inst
)
1732 /* MOV <Vd>.<T>, <Vn>.<T>
1734 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1735 copy_operand_info (inst
, 2, 1);
1738 /* When <imms> >= <immr>, the instruction written:
1739 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1741 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1744 convert_bfx_to_bfm (aarch64_inst
*inst
)
1748 /* Convert the operand. */
1749 lsb
= inst
->operands
[2].imm
.value
;
1750 width
= inst
->operands
[3].imm
.value
;
1751 inst
->operands
[2].imm
.value
= lsb
;
1752 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1755 /* When <imms> < <immr>, the instruction written:
1756 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1758 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1761 convert_bfi_to_bfm (aarch64_inst
*inst
)
1765 /* Convert the operand. */
1766 lsb
= inst
->operands
[2].imm
.value
;
1767 width
= inst
->operands
[3].imm
.value
;
1768 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1770 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1771 inst
->operands
[3].imm
.value
= width
- 1;
1775 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1776 inst
->operands
[3].imm
.value
= width
- 1;
1780 /* The instruction written:
1781 BFC <Xd>, #<lsb>, #<width>
1783 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1786 convert_bfc_to_bfm (aarch64_inst
*inst
)
1791 copy_operand_info (inst
, 3, 2);
1792 copy_operand_info (inst
, 2, 1);
1793 copy_operand_info (inst
, 1, 0);
1794 inst
->operands
[1].reg
.regno
= 0x1f;
1796 /* Convert the immediate operand. */
1797 lsb
= inst
->operands
[2].imm
.value
;
1798 width
= inst
->operands
[3].imm
.value
;
1799 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1801 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1802 inst
->operands
[3].imm
.value
= width
- 1;
1806 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1807 inst
->operands
[3].imm
.value
= width
- 1;
1811 /* The instruction written:
1812 LSL <Xd>, <Xn>, #<shift>
1814 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1817 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1819 int64_t shift
= inst
->operands
[2].imm
.value
;
1821 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1823 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1824 inst
->operands
[3].imm
.value
= 31 - shift
;
1828 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1829 inst
->operands
[3].imm
.value
= 63 - shift
;
1833 /* CINC <Wd>, <Wn>, <cond>
1835 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1838 convert_to_csel (aarch64_inst
*inst
)
1840 copy_operand_info (inst
, 3, 2);
1841 copy_operand_info (inst
, 2, 1);
1842 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1845 /* CSET <Wd>, <cond>
1847 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1850 convert_cset_to_csinc (aarch64_inst
*inst
)
1852 copy_operand_info (inst
, 3, 1);
1853 copy_operand_info (inst
, 2, 0);
1854 copy_operand_info (inst
, 1, 0);
1855 inst
->operands
[1].reg
.regno
= 0x1f;
1856 inst
->operands
[2].reg
.regno
= 0x1f;
1857 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1862 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1865 convert_mov_to_movewide (aarch64_inst
*inst
)
1868 uint32_t shift_amount
;
1871 switch (inst
->opcode
->op
)
1873 case OP_MOV_IMM_WIDE
:
1874 value
= inst
->operands
[1].imm
.value
;
1876 case OP_MOV_IMM_WIDEN
:
1877 value
= ~inst
->operands
[1].imm
.value
;
1882 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1883 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1884 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1885 /* The constraint check should have guaranteed this wouldn't happen. */
1887 value
>>= shift_amount
;
1889 inst
->operands
[1].imm
.value
= value
;
1890 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1891 inst
->operands
[1].shifter
.amount
= shift_amount
;
1896 ORR <Wd>, WZR, #<imm>. */
1899 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1901 copy_operand_info (inst
, 2, 1);
1902 inst
->operands
[1].reg
.regno
= 0x1f;
1903 inst
->operands
[1].skip
= 0;
1906 /* Some alias opcodes are assembled by being converted to their real-form. */
1909 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1911 const aarch64_opcode
*alias
= inst
->opcode
;
1913 if ((alias
->flags
& F_CONV
) == 0)
1914 goto convert_to_real_return
;
1920 convert_sr_to_bfm (inst
);
1923 convert_lsl_to_ubfm (inst
);
1928 convert_to_csel (inst
);
1932 convert_cset_to_csinc (inst
);
1937 convert_bfx_to_bfm (inst
);
1942 convert_bfi_to_bfm (inst
);
1945 convert_bfc_to_bfm (inst
);
1948 convert_mov_to_orr (inst
);
1950 case OP_MOV_IMM_WIDE
:
1951 case OP_MOV_IMM_WIDEN
:
1952 convert_mov_to_movewide (inst
);
1954 case OP_MOV_IMM_LOG
:
1955 convert_mov_to_movebitmask (inst
);
1958 convert_ror_to_extr (inst
);
1964 convert_xtl_to_shll (inst
);
1970 convert_to_real_return
:
1971 aarch64_replace_opcode (inst
, real
);
1974 /* Encode *INST_ORI of the opcode code OPCODE.
1975 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1976 matched operand qualifier sequence in *QLF_SEQ. */
1979 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1980 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1981 aarch64_opnd_qualifier_t
*qlf_seq
,
1982 aarch64_operand_error
*mismatch_detail
,
1983 aarch64_instr_sequence
* insn_sequence
)
1986 const aarch64_opcode
*aliased
;
1987 aarch64_inst copy
, *inst
;
1989 DEBUG_TRACE ("enter with %s", opcode
->name
);
1991 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1995 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1996 if (inst
->opcode
== NULL
)
1997 inst
->opcode
= opcode
;
1999 /* Constrain the operands.
2000 After passing this, the encoding is guaranteed to succeed. */
2001 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2003 DEBUG_TRACE ("FAIL since operand constraint not met");
2007 /* Get the base value.
2008 Note: this has to be before the aliasing handling below in order to
2009 get the base value from the alias opcode before we move on to the
2010 aliased opcode for encoding. */
2011 inst
->value
= opcode
->opcode
;
2013 /* No need to do anything else if the opcode does not have any operand. */
2014 if (aarch64_num_of_operands (opcode
) == 0)
2017 /* Assign operand indexes and check types. Also put the matched
2018 operand qualifiers in *QLF_SEQ to return. */
2019 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2021 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2022 inst
->operands
[i
].idx
= i
;
2023 if (qlf_seq
!= NULL
)
2024 *qlf_seq
= inst
->operands
[i
].qualifier
;
2027 aliased
= aarch64_find_real_opcode (opcode
);
2028 /* If the opcode is an alias and it does not ask for direct encoding by
2029 itself, the instruction will be transformed to the form of real opcode
2030 and the encoding will be carried out using the rules for the aliased
2032 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2034 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2035 aliased
->name
, opcode
->name
);
2036 /* Convert the operands to the form of the real opcode. */
2037 convert_to_real (inst
, aliased
);
2041 aarch64_opnd_info
*info
= inst
->operands
;
2043 /* Call the inserter of each operand. */
2044 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2046 const aarch64_operand
*opnd
;
2047 enum aarch64_opnd type
= opcode
->operands
[i
];
2048 if (type
== AARCH64_OPND_NIL
)
2052 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2055 opnd
= &aarch64_operands
[type
];
2056 if (operand_has_inserter (opnd
)
2057 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2062 /* Call opcode encoders indicated by flags. */
2063 if (opcode_has_special_coder (opcode
))
2064 do_special_encoding (inst
);
2066 /* Possibly use the instruction class to encode the chosen qualifier
2068 aarch64_encode_variant_using_iclass (inst
);
2070 /* Run a verifier if the instruction has one set. */
2071 if (opcode
->verifier
)
2073 enum err_type result
= opcode
->verifier (inst
, *code
, 0, TRUE
,
2074 mismatch_detail
, insn_sequence
);
2086 /* Always run constrain verifiers, this is needed because constrains need to
2087 maintain a global state. Regardless if the instruction has the flag set
2089 enum err_type result
= verify_constraints (inst
, *code
, 0, TRUE
,
2090 mismatch_detail
, insn_sequence
);
2103 DEBUG_TRACE ("exit with %s", opcode
->name
);
2105 *code
= inst
->value
;