1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
64 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Operand inserters. */
81 /* Insert register number. */
83 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
85 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
86 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
88 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
92 /* Insert register number, index and/or other data for SIMD register element
93 operand, e.g. the last source operand in
94 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
96 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
97 aarch64_insn
*code
, const aarch64_inst
*inst
,
98 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
101 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
102 /* index and/or type */
103 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
105 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
106 if (info
->type
== AARCH64_OPND_En
107 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
109 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
110 assert (info
->idx
== 1); /* Vn */
111 aarch64_insn value
= info
->reglane
.index
<< pos
;
112 insert_field (FLD_imm4
, code
, value
, 0);
116 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
123 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
124 insert_field (FLD_imm5
, code
, value
, 0);
127 else if (inst
->opcode
->iclass
== dotproduct
)
129 unsigned reglane_index
= info
->reglane
.index
;
130 switch (info
->qualifier
)
132 case AARCH64_OPND_QLF_S_4B
:
134 assert (reglane_index
< 4);
135 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
141 else if (inst
->opcode
->iclass
== cryptosm3
)
143 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
144 unsigned reglane_index
= info
->reglane
.index
;
145 assert (reglane_index
< 4);
146 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
150 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
151 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
152 unsigned reglane_index
= info
->reglane
.index
;
154 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
155 /* Complex operand takes two elements. */
158 switch (info
->qualifier
)
160 case AARCH64_OPND_QLF_S_H
:
162 assert (reglane_index
< 8);
163 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
165 case AARCH64_OPND_QLF_S_S
:
167 assert (reglane_index
< 4);
168 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
170 case AARCH64_OPND_QLF_S_D
:
172 assert (reglane_index
< 2);
173 insert_field (FLD_H
, code
, reglane_index
, 0);
182 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
184 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
186 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
187 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
190 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
192 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
196 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
197 in AdvSIMD load/store instructions. */
199 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
200 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
201 const aarch64_inst
*inst
,
202 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
204 aarch64_insn value
= 0;
205 /* Number of elements in each structure to be loaded/stored. */
206 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
209 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
214 switch (info
->reglist
.num_regs
)
216 case 1: value
= 0x7; break;
217 case 2: value
= 0xa; break;
218 case 3: value
= 0x6; break;
219 case 4: value
= 0x2; break;
224 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
235 insert_field (FLD_opcode
, code
, value
, 0);
240 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
241 single structure to all lanes instructions. */
243 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
244 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
245 const aarch64_inst
*inst
,
246 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
249 /* The opcode dependent area stores the number of elements in
250 each structure to be loaded/stored. */
251 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
254 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
256 value
= (aarch64_insn
) 0;
257 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
258 /* OP_LD1R does not have alternating variant, but have "two consecutive"
260 value
= (aarch64_insn
) 1;
261 insert_field (FLD_S
, code
, value
, 0);
266 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
267 operand e.g. Vt in AdvSIMD load/store single element instructions. */
269 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
270 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
271 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
272 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
274 aarch64_field field
= {0, 0};
275 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
276 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
278 assert (info
->reglist
.has_index
);
281 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
282 /* Encode the index, opcode<2:1> and size. */
283 switch (info
->qualifier
)
285 case AARCH64_OPND_QLF_S_B
:
286 /* Index encoded in "Q:S:size". */
287 QSsize
= info
->reglist
.index
;
290 case AARCH64_OPND_QLF_S_H
:
291 /* Index encoded in "Q:S:size<1>". */
292 QSsize
= info
->reglist
.index
<< 1;
295 case AARCH64_OPND_QLF_S_S
:
296 /* Index encoded in "Q:S". */
297 QSsize
= info
->reglist
.index
<< 2;
300 case AARCH64_OPND_QLF_S_D
:
301 /* Index encoded in "Q". */
302 QSsize
= info
->reglist
.index
<< 3 | 0x1;
308 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
309 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
310 insert_field_2 (&field
, code
, opcodeh2
, 0);
315 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
316 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
317 or SSHR <V><d>, <V><n>, #<shift>. */
319 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
320 const aarch64_opnd_info
*info
,
321 aarch64_insn
*code
, const aarch64_inst
*inst
,
322 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
324 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
327 if (inst
->opcode
->iclass
== asimdshf
)
331 0000 x SEE AdvSIMD modified immediate
340 Q
= (val
& 0x1) ? 1 : 0;
341 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
345 assert (info
->type
== AARCH64_OPND_IMM_VLSR
346 || info
->type
== AARCH64_OPND_IMM_VLSL
);
348 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
351 0000 SEE AdvSIMD modified immediate
352 0001 (16-UInt(immh:immb))
353 001x (32-UInt(immh:immb))
354 01xx (64-UInt(immh:immb))
355 1xxx (128-UInt(immh:immb)) */
356 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
360 0000 SEE AdvSIMD modified immediate
361 0001 (UInt(immh:immb)-8)
362 001x (UInt(immh:immb)-16)
363 01xx (UInt(immh:immb)-32)
364 1xxx (UInt(immh:immb)-64) */
365 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
366 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
371 /* Insert fields for e.g. the immediate operands in
372 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
374 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
376 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
377 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
381 imm
= info
->imm
.value
;
382 if (operand_need_shift_by_two (self
))
384 if (operand_need_shift_by_four (self
))
386 insert_all_fields (self
, code
, imm
);
390 /* Insert immediate and its shift amount for e.g. the last operand in
391 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
393 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
394 aarch64_insn
*code
, const aarch64_inst
*inst
,
395 aarch64_operand_error
*errors
)
398 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
400 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
404 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
405 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
407 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
408 const aarch64_opnd_info
*info
,
410 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
411 aarch64_operand_error
*errors
414 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
415 uint64_t imm
= info
->imm
.value
;
416 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
417 int amount
= info
->shifter
.amount
;
418 aarch64_field field
= {0, 0};
420 /* a:b:c:d:e:f:g:h */
421 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
423 /* Either MOVI <Dd>, #<imm>
424 or MOVI <Vd>.2D, #<imm>.
425 <imm> is a 64-bit immediate
426 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
427 encoded in "a:b:c:d:e:f:g:h". */
428 imm
= aarch64_shrink_expanded_imm8 (imm
);
429 assert ((int)imm
>= 0);
431 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
433 if (kind
== AARCH64_MOD_NONE
)
436 /* shift amount partially in cmode */
437 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
438 if (kind
== AARCH64_MOD_LSL
)
440 /* AARCH64_MOD_LSL: shift zeros. */
441 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
442 assert (esize
== 4 || esize
== 2 || esize
== 1);
443 /* For 8-bit move immediate, the optional LSL #0 does not require
449 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
451 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
455 /* AARCH64_MOD_MSL: shift ones. */
457 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
459 insert_field_2 (&field
, code
, amount
, 0);
464 /* Insert fields for an 8-bit floating-point immediate. */
466 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
468 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
469 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
471 insert_all_fields (self
, code
, info
->imm
.value
);
475 /* Insert 1-bit rotation immediate (#90 or #270). */
477 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
478 const aarch64_opnd_info
*info
,
479 aarch64_insn
*code
, const aarch64_inst
*inst
,
480 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
482 uint64_t rot
= (info
->imm
.value
- 90) / 180;
484 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
488 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
490 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
491 const aarch64_opnd_info
*info
,
492 aarch64_insn
*code
, const aarch64_inst
*inst
,
493 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
495 uint64_t rot
= info
->imm
.value
/ 90;
497 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
501 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
502 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
504 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
506 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
507 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
509 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
513 /* Insert arithmetic immediate for e.g. the last operand in
514 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
516 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
517 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
518 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
521 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
522 insert_field (self
->fields
[0], code
, value
, 0);
523 /* imm12 (unsigned) */
524 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
528 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
529 the operand should be inverted before encoding. */
531 aarch64_ins_limm_1 (const aarch64_operand
*self
,
532 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
533 const aarch64_inst
*inst
, bfd_boolean invert_p
,
534 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
537 uint64_t imm
= info
->imm
.value
;
538 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
542 /* The constraint check should have guaranteed this wouldn't happen. */
543 assert (aarch64_logical_immediate_p (imm
, esize
, &value
));
545 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
550 /* Insert logical/bitmask immediate for e.g. the last operand in
551 ORR <Wd|WSP>, <Wn>, #<imm>. */
553 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
554 aarch64_insn
*code
, const aarch64_inst
*inst
,
555 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
557 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
558 inst
->opcode
->op
== OP_BIC
, errors
);
561 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
563 aarch64_ins_inv_limm (const aarch64_operand
*self
,
564 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
565 const aarch64_inst
*inst
,
566 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
568 return aarch64_ins_limm_1 (self
, info
, code
, inst
, TRUE
, errors
);
571 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
572 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
574 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
575 aarch64_insn
*code
, const aarch64_inst
*inst
,
576 aarch64_operand_error
*errors
)
578 aarch64_insn value
= 0;
580 assert (info
->idx
== 0);
583 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
584 if (inst
->opcode
->iclass
== ldstpair_indexed
585 || inst
->opcode
->iclass
== ldstnapair_offs
586 || inst
->opcode
->iclass
== ldstpair_off
587 || inst
->opcode
->iclass
== loadlit
)
590 switch (info
->qualifier
)
592 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
593 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
594 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
597 insert_field (FLD_ldst_size
, code
, value
, 0);
602 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
603 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
609 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
611 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
612 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
613 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
614 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
617 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
621 /* Encode the address operand for e.g.
622 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
624 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
625 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
627 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
630 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
633 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
635 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
637 if (kind
== AARCH64_MOD_LSL
)
638 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
639 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
641 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
642 S
= info
->shifter
.amount
!= 0;
644 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
648 Must be #0 if <extend> is explicitly LSL. */
649 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
650 insert_field (FLD_S
, code
, S
, 0);
655 /* Encode the address operand for e.g.
656 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
658 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
659 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
660 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
661 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
664 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
667 int imm
= info
->addr
.offset
.imm
;
668 insert_field (self
->fields
[1], code
, imm
, 0);
671 if (info
->addr
.writeback
)
673 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
674 insert_field (self
->fields
[2], code
, 1, 0);
679 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
681 aarch64_ins_addr_simm (const aarch64_operand
*self
,
682 const aarch64_opnd_info
*info
,
684 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
685 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
690 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
691 /* simm (imm9 or imm7) */
692 imm
= info
->addr
.offset
.imm
;
693 if (self
->fields
[0] == FLD_imm7
)
694 /* scaled immediate in ld/st pair instructions.. */
695 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
696 insert_field (self
->fields
[0], code
, imm
, 0);
697 /* pre/post- index */
698 if (info
->addr
.writeback
)
700 assert (inst
->opcode
->iclass
!= ldst_unscaled
701 && inst
->opcode
->iclass
!= ldstnapair_offs
702 && inst
->opcode
->iclass
!= ldstpair_off
703 && inst
->opcode
->iclass
!= ldst_unpriv
);
704 assert (info
->addr
.preind
!= info
->addr
.postind
);
705 if (info
->addr
.preind
)
706 insert_field (self
->fields
[1], code
, 1, 0);
712 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
714 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
715 const aarch64_opnd_info
*info
,
717 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
718 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
723 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
725 imm
= info
->addr
.offset
.imm
>> 3;
726 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
727 insert_field (self
->fields
[2], code
, imm
, 0);
729 if (info
->addr
.writeback
)
731 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
732 insert_field (self
->fields
[3], code
, 1, 0);
737 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
739 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
740 const aarch64_opnd_info
*info
,
742 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
743 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
745 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
748 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
750 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
754 /* Encode the address operand for e.g.
755 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
757 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
758 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
759 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
760 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
763 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
765 if (info
->addr
.offset
.is_reg
)
766 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
768 insert_field (FLD_Rm
, code
, 0x1f, 0);
772 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
774 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
775 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
776 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
777 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
780 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
784 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
786 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
787 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
788 const aarch64_inst
*inst
,
789 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
791 /* If a system instruction check if we have any restrictions on which
792 registers it can use. */
793 if (inst
->opcode
->iclass
== ic_system
)
795 uint64_t opcode_flags
796 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
797 uint32_t sysreg_flags
798 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
800 /* Check to see if it's read-only, else check if it's write only.
801 if it's both or unspecified don't care. */
802 if (opcode_flags
== F_SYS_READ
804 && sysreg_flags
!= F_REG_READ
)
806 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
807 detail
->error
= _("specified register cannot be read from");
808 detail
->index
= info
->idx
;
809 detail
->non_fatal
= TRUE
;
811 else if (opcode_flags
== F_SYS_WRITE
813 && sysreg_flags
!= F_REG_WRITE
)
815 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
816 detail
->error
= _("specified register cannot be written to");
817 detail
->index
= info
->idx
;
818 detail
->non_fatal
= TRUE
;
821 /* op0:op1:CRn:CRm:op2 */
822 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
823 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
827 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
829 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
830 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
831 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
832 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
835 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
840 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
842 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
843 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
844 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
845 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
847 /* op1:CRn:CRm:op2 */
848 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
849 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
853 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
856 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
857 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
858 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
859 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
862 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
866 /* Encode the prefetch operation option operand for e.g.
867 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
870 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
871 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
872 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
873 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
876 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
880 /* Encode the hint number for instructions that alias HINT but take an
884 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
885 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
886 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
887 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
890 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
894 /* Encode the extended register operand for e.g.
895 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
897 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
898 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
899 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
900 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
902 enum aarch64_modifier_kind kind
;
905 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
907 kind
= info
->shifter
.kind
;
908 if (kind
== AARCH64_MOD_LSL
)
909 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
910 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
911 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
913 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
918 /* Encode the shifted register operand for e.g.
919 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
921 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
922 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
923 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
924 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
927 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
929 insert_field (FLD_shift
, code
,
930 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
932 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
937 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
938 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
939 SELF's operand-dependent value. fields[0] specifies the field that
940 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
942 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
943 const aarch64_opnd_info
*info
,
945 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
946 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
948 int factor
= 1 + get_operand_specific_data (self
);
949 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
950 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
954 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
955 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
956 SELF's operand-dependent value. fields[0] specifies the field that
957 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
959 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
960 const aarch64_opnd_info
*info
,
962 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
963 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
965 int factor
= 1 + get_operand_specific_data (self
);
966 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
967 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
971 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
972 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
973 SELF's operand-dependent value. fields[0] specifies the field that
974 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
975 and imm3 fields, with imm3 being the less-significant part. */
977 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
978 const aarch64_opnd_info
*info
,
980 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
981 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
983 int factor
= 1 + get_operand_specific_data (self
);
984 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
985 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
986 2, FLD_imm3
, FLD_SVE_imm6
);
990 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
991 is a 4-bit signed number and where <shift> is SELF's operand-dependent
992 value. fields[0] specifies the base register field. */
994 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
995 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
996 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
997 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
999 int factor
= 1 << get_operand_specific_data (self
);
1000 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1001 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1005 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1006 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1007 value. fields[0] specifies the base register field. */
1009 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1010 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1011 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1012 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1014 int factor
= 1 << get_operand_specific_data (self
);
1015 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1016 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1020 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1021 is SELF's operand-dependent value. fields[0] specifies the base
1022 register field and fields[1] specifies the offset register field. */
1024 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1025 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1026 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1027 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1029 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1030 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1034 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1035 <shift> is SELF's operand-dependent value. fields[0] specifies the
1036 base register field, fields[1] specifies the offset register field and
1037 fields[2] is a single-bit field that selects SXTW over UXTW. */
1039 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1040 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1041 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1042 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1044 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1045 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1046 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1047 insert_field (self
->fields
[2], code
, 0, 0);
1049 insert_field (self
->fields
[2], code
, 1, 0);
1053 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1054 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1055 fields[0] specifies the base register field. */
1057 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1058 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1059 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1060 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1062 int factor
= 1 << get_operand_specific_data (self
);
1063 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1064 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1068 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1069 where <modifier> is fixed by the instruction and where <msz> is a
1070 2-bit unsigned number. fields[0] specifies the base register field
1071 and fields[1] specifies the offset register field. */
1073 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1074 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1075 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1077 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1078 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1079 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1083 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1084 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1085 field and fields[1] specifies the offset register field. */
1087 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1088 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1089 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1090 aarch64_operand_error
*errors
)
1092 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1095 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1096 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1097 field and fields[1] specifies the offset register field. */
1099 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1100 const aarch64_opnd_info
*info
,
1102 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1103 aarch64_operand_error
*errors
)
1105 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1108 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1109 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1110 field and fields[1] specifies the offset register field. */
1112 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1113 const aarch64_opnd_info
*info
,
1115 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1116 aarch64_operand_error
*errors
)
1118 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1121 /* Encode an SVE ADD/SUB immediate. */
1123 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1124 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1125 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1126 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1128 if (info
->shifter
.amount
== 8)
1129 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1130 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1131 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1133 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1137 /* Encode an SVE CPY/DUP immediate. */
1139 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1140 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1141 const aarch64_inst
*inst
,
1142 aarch64_operand_error
*errors
)
1144 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1147 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1148 array specifies which field to use for Zn. MM is encoded in the
1149 concatenation of imm5 and SVE_tszh, with imm5 being the less
1150 significant part. */
1152 aarch64_ins_sve_index (const aarch64_operand
*self
,
1153 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1154 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1155 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1157 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1158 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1159 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1160 2, FLD_imm5
, FLD_SVE_tszh
);
1164 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1166 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1167 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1168 const aarch64_inst
*inst
,
1169 aarch64_operand_error
*errors
)
1171 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1174 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1175 and where MM occupies the most-significant part. The operand-dependent
1176 value specifies the number of bits in Zn. */
1178 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1179 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1180 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1181 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1183 unsigned int reg_bits
= get_operand_specific_data (self
);
1184 assert (info
->reglane
.regno
< (1U << reg_bits
));
1185 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1186 insert_all_fields (self
, code
, val
);
1190 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1193 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1194 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1195 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1196 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1198 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1202 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1203 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1206 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1207 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1208 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1209 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1211 insert_all_fields (self
, code
, info
->imm
.value
);
1212 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1216 /* Encode an SVE shift left immediate. */
1218 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1219 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1220 const aarch64_inst
*inst
,
1221 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1223 const aarch64_opnd_info
*prev_operand
;
1226 assert (info
->idx
> 0);
1227 prev_operand
= &inst
->operands
[info
->idx
- 1];
1228 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1229 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1233 /* Encode an SVE shift right immediate. */
1235 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1236 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1237 const aarch64_inst
*inst
,
1238 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1240 const aarch64_opnd_info
*prev_operand
;
1243 assert (info
->idx
> 0);
1244 prev_operand
= &inst
->operands
[info
->idx
- 1];
1245 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1246 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1250 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1251 The fields array specifies which field to use. */
1253 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1254 const aarch64_opnd_info
*info
,
1256 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1257 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1259 if (info
->imm
.value
== 0x3f000000)
1260 insert_field (self
->fields
[0], code
, 0, 0);
1262 insert_field (self
->fields
[0], code
, 1, 0);
1266 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1267 The fields array specifies which field to use. */
1269 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1270 const aarch64_opnd_info
*info
,
1272 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1273 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1275 if (info
->imm
.value
== 0x3f000000)
1276 insert_field (self
->fields
[0], code
, 0, 0);
1278 insert_field (self
->fields
[0], code
, 1, 0);
1282 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1283 The fields array specifies which field to use. */
1285 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1286 const aarch64_opnd_info
*info
,
1288 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1289 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1291 if (info
->imm
.value
== 0)
1292 insert_field (self
->fields
[0], code
, 0, 0);
1294 insert_field (self
->fields
[0], code
, 1, 0);
1298 /* Miscellaneous encoding functions. */
1300 /* Encode size[0], i.e. bit 22, for
1301 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1304 encode_asimd_fcvt (aarch64_inst
*inst
)
1307 aarch64_field field
= {0, 0};
1308 enum aarch64_opnd_qualifier qualifier
;
1310 switch (inst
->opcode
->op
)
1314 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1315 qualifier
= inst
->operands
[1].qualifier
;
1319 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1320 qualifier
= inst
->operands
[0].qualifier
;
1325 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1326 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1327 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1328 gen_sub_field (FLD_size
, 0, 1, &field
);
1329 insert_field_2 (&field
, &inst
->value
, value
, 0);
1332 /* Encode size[0], i.e. bit 22, for
1333 e.g. FCVTXN <Vb><d>, <Va><n>. */
1336 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1338 aarch64_insn val
= 1;
1339 aarch64_field field
= {0, 0};
1340 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1341 gen_sub_field (FLD_size
, 0, 1, &field
);
1342 insert_field_2 (&field
, &inst
->value
, val
, 0);
1345 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1347 encode_fcvt (aarch64_inst
*inst
)
1350 const aarch64_field field
= {15, 2};
1353 switch (inst
->operands
[0].qualifier
)
1355 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1356 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1357 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1360 insert_field_2 (&field
, &inst
->value
, val
, 0);
1365 /* Return the index in qualifiers_list that INST is using. Should only
1366 be called once the qualifiers are known to be valid. */
1369 aarch64_get_variant (struct aarch64_inst
*inst
)
1371 int i
, nops
, variant
;
1373 nops
= aarch64_num_of_operands (inst
->opcode
);
1374 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1376 for (i
= 0; i
< nops
; ++i
)
1377 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1378 != inst
->operands
[i
].qualifier
)
1386 /* Do miscellaneous encodings that are not common enough to be driven by
1390 do_misc_encoding (aarch64_inst
*inst
)
1394 switch (inst
->opcode
->op
)
1403 encode_asimd_fcvt (inst
);
1406 encode_asisd_fcvtxn (inst
);
1410 /* Copy Pn to Pm and Pg. */
1411 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1412 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1413 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1416 /* Copy Zd to Zm. */
1417 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1418 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1421 /* Fill in the zero immediate. */
1422 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1423 2, FLD_imm5
, FLD_SVE_tszh
);
1426 /* Copy Zn to Zm. */
1427 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1428 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1433 /* Copy Pd to Pm. */
1434 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1435 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1437 case OP_MOVZS_P_P_P
:
1439 /* Copy Pn to Pm. */
1440 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1441 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1443 case OP_NOTS_P_P_P_Z
:
1444 case OP_NOT_P_P_P_Z
:
1445 /* Copy Pg to Pm. */
1446 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1447 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1453 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1455 encode_sizeq (aarch64_inst
*inst
)
1458 enum aarch64_field_kind kind
;
1461 /* Get the index of the operand whose information we are going to use
1462 to encode the size and Q fields.
1463 This is deduced from the possible valid qualifier lists. */
1464 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1465 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1466 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1467 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1469 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1471 if (inst
->opcode
->iclass
== asisdlse
1472 || inst
->opcode
->iclass
== asisdlsep
1473 || inst
->opcode
->iclass
== asisdlso
1474 || inst
->opcode
->iclass
== asisdlsop
)
1475 kind
= FLD_vldst_size
;
1478 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1481 /* Opcodes that have fields shared by multiple operands are usually flagged
1482 with flags. In this function, we detect such flags and use the
1483 information in one of the related operands to do the encoding. The 'one'
1484 operand is not any operand but one of the operands that has the enough
1485 information for such an encoding. */
1488 do_special_encoding (struct aarch64_inst
*inst
)
1491 aarch64_insn value
= 0;
1493 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1495 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1496 if (inst
->opcode
->flags
& F_COND
)
1498 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1500 if (inst
->opcode
->flags
& F_SF
)
1502 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1503 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1504 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1506 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1507 if (inst
->opcode
->flags
& F_N
)
1508 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1510 if (inst
->opcode
->flags
& F_LSE_SZ
)
1512 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1513 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1514 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1516 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1518 if (inst
->opcode
->flags
& F_SIZEQ
)
1519 encode_sizeq (inst
);
1520 if (inst
->opcode
->flags
& F_FPTYPE
)
1522 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1523 switch (inst
->operands
[idx
].qualifier
)
1525 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1526 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1527 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1528 default: assert (0);
1530 insert_field (FLD_type
, &inst
->value
, value
, 0);
1532 if (inst
->opcode
->flags
& F_SSIZE
)
1534 enum aarch64_opnd_qualifier qualifier
;
1535 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1536 qualifier
= inst
->operands
[idx
].qualifier
;
1537 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1538 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1539 value
= aarch64_get_qualifier_standard_value (qualifier
);
1540 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1542 if (inst
->opcode
->flags
& F_T
)
1544 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1545 aarch64_field field
= {0, 0};
1546 enum aarch64_opnd_qualifier qualifier
;
1549 qualifier
= inst
->operands
[idx
].qualifier
;
1550 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1551 == AARCH64_OPND_CLASS_SIMD_REG
1552 && qualifier
>= AARCH64_OPND_QLF_V_8B
1553 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1564 value
= aarch64_get_qualifier_standard_value (qualifier
);
1565 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1566 num
= (int) value
>> 1;
1567 assert (num
>= 0 && num
<= 3);
1568 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1569 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1571 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1573 /* Use Rt to encode in the case of e.g.
1574 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1575 enum aarch64_opnd_qualifier qualifier
;
1576 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1578 /* Otherwise use the result operand, which has to be a integer
1581 assert (idx
== 0 || idx
== 1);
1582 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1583 == AARCH64_OPND_CLASS_INT_REG
);
1584 qualifier
= inst
->operands
[idx
].qualifier
;
1585 insert_field (FLD_Q
, &inst
->value
,
1586 aarch64_get_qualifier_standard_value (qualifier
), 0);
1588 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1590 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1591 enum aarch64_opnd_qualifier qualifier
;
1592 aarch64_field field
= {0, 0};
1593 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1594 == AARCH64_OPND_CLASS_INT_REG
);
1595 gen_sub_field (FLD_opc
, 0, 1, &field
);
1596 qualifier
= inst
->operands
[0].qualifier
;
1597 insert_field_2 (&field
, &inst
->value
,
1598 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1600 /* Miscellaneous encoding as the last step. */
1601 if (inst
->opcode
->flags
& F_MISC
)
1602 do_misc_encoding (inst
);
1604 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1607 /* Some instructions (including all SVE ones) use the instruction class
1608 to describe how a qualifiers_list index is represented in the instruction
1609 encoding. If INST is such an instruction, encode the chosen qualifier
1613 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1615 switch (inst
->opcode
->iclass
)
1618 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1619 0, 2, FLD_SVE_M_14
, FLD_size
);
1623 case sve_shift_pred
:
1624 case sve_shift_unpred
:
1625 /* For indices and shift amounts, the variant is encoded as
1626 part of the immediate. */
1630 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1631 and depend on the immediate. They don't have a separate
1636 /* sve_misc instructions have only a single variant. */
1640 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1641 0, 2, FLD_SVE_M_16
, FLD_size
);
1645 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1650 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1654 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1658 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1666 /* Converters converting an alias opcode instruction to its real form. */
1668 /* ROR <Wd>, <Ws>, #<shift>
1670 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1672 convert_ror_to_extr (aarch64_inst
*inst
)
1674 copy_operand_info (inst
, 3, 2);
1675 copy_operand_info (inst
, 2, 1);
1678 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1680 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1682 convert_xtl_to_shll (aarch64_inst
*inst
)
1684 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1685 inst
->operands
[2].imm
.value
= 0;
1689 LSR <Xd>, <Xn>, #<shift>
1691 UBFM <Xd>, <Xn>, #<shift>, #63. */
1693 convert_sr_to_bfm (aarch64_inst
*inst
)
1695 inst
->operands
[3].imm
.value
=
1696 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1699 /* Convert MOV to ORR. */
1701 convert_mov_to_orr (aarch64_inst
*inst
)
1703 /* MOV <Vd>.<T>, <Vn>.<T>
1705 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1706 copy_operand_info (inst
, 2, 1);
1709 /* When <imms> >= <immr>, the instruction written:
1710 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1712 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1715 convert_bfx_to_bfm (aarch64_inst
*inst
)
1719 /* Convert the operand. */
1720 lsb
= inst
->operands
[2].imm
.value
;
1721 width
= inst
->operands
[3].imm
.value
;
1722 inst
->operands
[2].imm
.value
= lsb
;
1723 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1726 /* When <imms> < <immr>, the instruction written:
1727 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1729 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1732 convert_bfi_to_bfm (aarch64_inst
*inst
)
1736 /* Convert the operand. */
1737 lsb
= inst
->operands
[2].imm
.value
;
1738 width
= inst
->operands
[3].imm
.value
;
1739 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1741 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1742 inst
->operands
[3].imm
.value
= width
- 1;
1746 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1747 inst
->operands
[3].imm
.value
= width
- 1;
1751 /* The instruction written:
1752 BFC <Xd>, #<lsb>, #<width>
1754 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1757 convert_bfc_to_bfm (aarch64_inst
*inst
)
1762 copy_operand_info (inst
, 3, 2);
1763 copy_operand_info (inst
, 2, 1);
1764 copy_operand_info (inst
, 1, 0);
1765 inst
->operands
[1].reg
.regno
= 0x1f;
1767 /* Convert the immediate operand. */
1768 lsb
= inst
->operands
[2].imm
.value
;
1769 width
= inst
->operands
[3].imm
.value
;
1770 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1772 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
1773 inst
->operands
[3].imm
.value
= width
- 1;
1777 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
1778 inst
->operands
[3].imm
.value
= width
- 1;
1782 /* The instruction written:
1783 LSL <Xd>, <Xn>, #<shift>
1785 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1788 convert_lsl_to_ubfm (aarch64_inst
*inst
)
1790 int64_t shift
= inst
->operands
[2].imm
.value
;
1792 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
1794 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
1795 inst
->operands
[3].imm
.value
= 31 - shift
;
1799 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
1800 inst
->operands
[3].imm
.value
= 63 - shift
;
1804 /* CINC <Wd>, <Wn>, <cond>
1806 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1809 convert_to_csel (aarch64_inst
*inst
)
1811 copy_operand_info (inst
, 3, 2);
1812 copy_operand_info (inst
, 2, 1);
1813 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1816 /* CSET <Wd>, <cond>
1818 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1821 convert_cset_to_csinc (aarch64_inst
*inst
)
1823 copy_operand_info (inst
, 3, 1);
1824 copy_operand_info (inst
, 2, 0);
1825 copy_operand_info (inst
, 1, 0);
1826 inst
->operands
[1].reg
.regno
= 0x1f;
1827 inst
->operands
[2].reg
.regno
= 0x1f;
1828 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
1833 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1836 convert_mov_to_movewide (aarch64_inst
*inst
)
1839 uint32_t shift_amount
;
1842 switch (inst
->opcode
->op
)
1844 case OP_MOV_IMM_WIDE
:
1845 value
= inst
->operands
[1].imm
.value
;
1847 case OP_MOV_IMM_WIDEN
:
1848 value
= ~inst
->operands
[1].imm
.value
;
1853 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
1854 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
1855 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
1856 /* The constraint check should have guaranteed this wouldn't happen. */
1858 value
>>= shift_amount
;
1860 inst
->operands
[1].imm
.value
= value
;
1861 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
1862 inst
->operands
[1].shifter
.amount
= shift_amount
;
1867 ORR <Wd>, WZR, #<imm>. */
1870 convert_mov_to_movebitmask (aarch64_inst
*inst
)
1872 copy_operand_info (inst
, 2, 1);
1873 inst
->operands
[1].reg
.regno
= 0x1f;
1874 inst
->operands
[1].skip
= 0;
1877 /* Some alias opcodes are assembled by being converted to their real-form. */
1880 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
1882 const aarch64_opcode
*alias
= inst
->opcode
;
1884 if ((alias
->flags
& F_CONV
) == 0)
1885 goto convert_to_real_return
;
1891 convert_sr_to_bfm (inst
);
1894 convert_lsl_to_ubfm (inst
);
1899 convert_to_csel (inst
);
1903 convert_cset_to_csinc (inst
);
1908 convert_bfx_to_bfm (inst
);
1913 convert_bfi_to_bfm (inst
);
1916 convert_bfc_to_bfm (inst
);
1919 convert_mov_to_orr (inst
);
1921 case OP_MOV_IMM_WIDE
:
1922 case OP_MOV_IMM_WIDEN
:
1923 convert_mov_to_movewide (inst
);
1925 case OP_MOV_IMM_LOG
:
1926 convert_mov_to_movebitmask (inst
);
1929 convert_ror_to_extr (inst
);
1935 convert_xtl_to_shll (inst
);
1941 convert_to_real_return
:
1942 aarch64_replace_opcode (inst
, real
);
1945 /* Encode *INST_ORI of the opcode code OPCODE.
1946 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1947 matched operand qualifier sequence in *QLF_SEQ. */
1950 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
1951 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
1952 aarch64_opnd_qualifier_t
*qlf_seq
,
1953 aarch64_operand_error
*mismatch_detail
,
1954 aarch64_instr_sequence
* insn_sequence
)
1957 const aarch64_opcode
*aliased
;
1958 aarch64_inst copy
, *inst
;
1960 DEBUG_TRACE ("enter with %s", opcode
->name
);
1962 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1966 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
1967 if (inst
->opcode
== NULL
)
1968 inst
->opcode
= opcode
;
1970 /* Constrain the operands.
1971 After passing this, the encoding is guaranteed to succeed. */
1972 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
1974 DEBUG_TRACE ("FAIL since operand constraint not met");
1978 /* Get the base value.
1979 Note: this has to be before the aliasing handling below in order to
1980 get the base value from the alias opcode before we move on to the
1981 aliased opcode for encoding. */
1982 inst
->value
= opcode
->opcode
;
1984 /* No need to do anything else if the opcode does not have any operand. */
1985 if (aarch64_num_of_operands (opcode
) == 0)
1988 /* Assign operand indexes and check types. Also put the matched
1989 operand qualifiers in *QLF_SEQ to return. */
1990 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1992 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
1993 inst
->operands
[i
].idx
= i
;
1994 if (qlf_seq
!= NULL
)
1995 *qlf_seq
= inst
->operands
[i
].qualifier
;
1998 aliased
= aarch64_find_real_opcode (opcode
);
1999 /* If the opcode is an alias and it does not ask for direct encoding by
2000 itself, the instruction will be transformed to the form of real opcode
2001 and the encoding will be carried out using the rules for the aliased
2003 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2005 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2006 aliased
->name
, opcode
->name
);
2007 /* Convert the operands to the form of the real opcode. */
2008 convert_to_real (inst
, aliased
);
2012 aarch64_opnd_info
*info
= inst
->operands
;
2014 /* Call the inserter of each operand. */
2015 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2017 const aarch64_operand
*opnd
;
2018 enum aarch64_opnd type
= opcode
->operands
[i
];
2019 if (type
== AARCH64_OPND_NIL
)
2023 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2026 opnd
= &aarch64_operands
[type
];
2027 if (operand_has_inserter (opnd
)
2028 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2033 /* Call opcode encoders indicated by flags. */
2034 if (opcode_has_special_coder (opcode
))
2035 do_special_encoding (inst
);
2037 /* Possibly use the instruction class to encode the chosen qualifier
2039 aarch64_encode_variant_using_iclass (inst
);
2041 /* Run a verifier if the instruction has one set. */
2042 if (opcode
->verifier
)
2044 enum err_type result
= opcode
->verifier (inst
, *code
, 0, TRUE
,
2045 mismatch_detail
, insn_sequence
);
2057 /* Always run constrain verifiers, this is needed because constrains need to
2058 maintain a global state. Regardless if the instruction has the flag set
2060 enum err_type result
= verify_constraints (inst
, *code
, 0, TRUE
,
2061 mismatch_detail
, insn_sequence
);
2074 DEBUG_TRACE ("exit with %s", opcode
->name
);
2076 *code
= inst
->value
;