[AArch64][SVE 23/32] Add SVE pattern and prfop operands
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
aa2aa4c6
RS
418}
419
420/* Insert fields for an 8-bit floating-point immediate. */
421const char *
422aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
423 aarch64_insn *code,
424 const aarch64_inst *inst ATTRIBUTE_UNUSED)
425{
426 insert_all_fields (self, code, info->imm.value);
427 return NULL;
a06ea964
NC
428}
429
430/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
431 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
432const char *
433aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
434 aarch64_insn *code,
435 const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
438 return NULL;
439}
440
441/* Insert arithmetic immediate for e.g. the last operand in
442 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
443const char *
444aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
445 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
446{
447 /* shift */
448 aarch64_insn value = info->shifter.amount ? 1 : 0;
449 insert_field (self->fields[0], code, value, 0);
450 /* imm12 (unsigned) */
451 insert_field (self->fields[1], code, info->imm.value, 0);
452 return NULL;
453}
454
455/* Insert logical/bitmask immediate for e.g. the last operand in
456 ORR <Wd|WSP>, <Wn>, #<imm>. */
457const char *
458aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
459 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
460{
461 aarch64_insn value;
462 uint64_t imm = info->imm.value;
42408347 463 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964
NC
464
465 if (inst->opcode->op == OP_BIC)
466 imm = ~imm;
42408347 467 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
468 /* The constraint check should have guaranteed this wouldn't happen. */
469 assert (0);
470
471 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
472 self->fields[0]);
473 return NULL;
474}
475
476/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
477 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
478const char *
479aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
480 aarch64_insn *code, const aarch64_inst *inst)
481{
4ad3b7ef 482 aarch64_insn value = 0;
a06ea964
NC
483
484 assert (info->idx == 0);
485
486 /* Rt */
487 aarch64_ins_regno (self, info, code, inst);
488 if (inst->opcode->iclass == ldstpair_indexed
489 || inst->opcode->iclass == ldstnapair_offs
490 || inst->opcode->iclass == ldstpair_off
491 || inst->opcode->iclass == loadlit)
492 {
493 /* size */
494 switch (info->qualifier)
495 {
496 case AARCH64_OPND_QLF_S_S: value = 0; break;
497 case AARCH64_OPND_QLF_S_D: value = 1; break;
498 case AARCH64_OPND_QLF_S_Q: value = 2; break;
499 default: assert (0);
500 }
501 insert_field (FLD_ldst_size, code, value, 0);
502 }
503 else
504 {
505 /* opc[1]:size */
506 value = aarch64_get_qualifier_standard_value (info->qualifier);
507 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
508 }
509
510 return NULL;
511}
512
513/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
514const char *
515aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
516 const aarch64_opnd_info *info, aarch64_insn *code,
517 const aarch64_inst *inst ATTRIBUTE_UNUSED)
518{
519 /* Rn */
520 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
521 return NULL;
522}
523
524/* Encode the address operand for e.g.
525 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
526const char *
527aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
528 const aarch64_opnd_info *info, aarch64_insn *code,
529 const aarch64_inst *inst ATTRIBUTE_UNUSED)
530{
531 aarch64_insn S;
532 enum aarch64_modifier_kind kind = info->shifter.kind;
533
534 /* Rn */
535 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
536 /* Rm */
537 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
538 /* option */
539 if (kind == AARCH64_MOD_LSL)
540 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
541 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
542 /* S */
543 if (info->qualifier != AARCH64_OPND_QLF_S_B)
544 S = info->shifter.amount != 0;
545 else
546 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
547 S <amount>
548 0 [absent]
549 1 #0
550 Must be #0 if <extend> is explicitly LSL. */
551 S = info->shifter.operator_present && info->shifter.amount_present;
552 insert_field (FLD_S, code, S, 0);
553
554 return NULL;
555}
556
557/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
558const char *
559aarch64_ins_addr_simm (const aarch64_operand *self,
560 const aarch64_opnd_info *info,
062f38fa
RE
561 aarch64_insn *code,
562 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
563{
564 int imm;
565
566 /* Rn */
567 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
568 /* simm (imm9 or imm7) */
569 imm = info->addr.offset.imm;
570 if (self->fields[0] == FLD_imm7)
571 /* scaled immediate in ld/st pair instructions.. */
572 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
573 insert_field (self->fields[0], code, imm, 0);
574 /* pre/post- index */
575 if (info->addr.writeback)
576 {
577 assert (inst->opcode->iclass != ldst_unscaled
578 && inst->opcode->iclass != ldstnapair_offs
579 && inst->opcode->iclass != ldstpair_off
580 && inst->opcode->iclass != ldst_unpriv);
581 assert (info->addr.preind != info->addr.postind);
582 if (info->addr.preind)
583 insert_field (self->fields[1], code, 1, 0);
584 }
585
586 return NULL;
587}
588
589/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
590const char *
591aarch64_ins_addr_uimm12 (const aarch64_operand *self,
592 const aarch64_opnd_info *info,
593 aarch64_insn *code,
594 const aarch64_inst *inst ATTRIBUTE_UNUSED)
595{
596 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
597
598 /* Rn */
599 insert_field (self->fields[0], code, info->addr.base_regno, 0);
600 /* uimm12 */
601 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
602 return NULL;
603}
604
605/* Encode the address operand for e.g.
606 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
607const char *
608aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
611{
612 /* Rn */
613 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
614 /* Rm | #<amount> */
615 if (info->addr.offset.is_reg)
616 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
617 else
618 insert_field (FLD_Rm, code, 0x1f, 0);
619 return NULL;
620}
621
622/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
623const char *
624aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED)
627{
628 /* cond */
629 insert_field (FLD_cond, code, info->cond->value, 0);
630 return NULL;
631}
632
633/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
634const char *
635aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
636 const aarch64_opnd_info *info, aarch64_insn *code,
637 const aarch64_inst *inst ATTRIBUTE_UNUSED)
638{
639 /* op0:op1:CRn:CRm:op2 */
640 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
641 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
642 return NULL;
643}
644
645/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
646const char *
647aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
648 const aarch64_opnd_info *info, aarch64_insn *code,
649 const aarch64_inst *inst ATTRIBUTE_UNUSED)
650{
651 /* op1:op2 */
652 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
653 FLD_op2, FLD_op1);
654 return NULL;
655}
656
657/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
658const char *
659aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
660 const aarch64_opnd_info *info, aarch64_insn *code,
661 const aarch64_inst *inst ATTRIBUTE_UNUSED)
662{
663 /* op1:CRn:CRm:op2 */
664 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
665 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
666 return NULL;
667}
668
669/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
670
671const char *
672aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
673 const aarch64_opnd_info *info, aarch64_insn *code,
674 const aarch64_inst *inst ATTRIBUTE_UNUSED)
675{
676 /* CRm */
677 insert_field (FLD_CRm, code, info->barrier->value, 0);
678 return NULL;
679}
680
681/* Encode the prefetch operation option operand for e.g.
682 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
683
684const char *
685aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
686 const aarch64_opnd_info *info, aarch64_insn *code,
687 const aarch64_inst *inst ATTRIBUTE_UNUSED)
688{
689 /* prfop in Rt */
690 insert_field (FLD_Rt, code, info->prfop->value, 0);
691 return NULL;
692}
693
9ed608f9
MW
694/* Encode the hint number for instructions that alias HINT but take an
695 operand. */
696
697const char *
698aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
699 const aarch64_opnd_info *info, aarch64_insn *code,
700 const aarch64_inst *inst ATTRIBUTE_UNUSED)
701{
702 /* CRm:op2. */
703 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
704 return NULL;
705}
706
a06ea964
NC
707/* Encode the extended register operand for e.g.
708 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
709const char *
710aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
711 const aarch64_opnd_info *info, aarch64_insn *code,
712 const aarch64_inst *inst ATTRIBUTE_UNUSED)
713{
714 enum aarch64_modifier_kind kind;
715
716 /* Rm */
717 insert_field (FLD_Rm, code, info->reg.regno, 0);
718 /* option */
719 kind = info->shifter.kind;
720 if (kind == AARCH64_MOD_LSL)
721 kind = info->qualifier == AARCH64_OPND_QLF_W
722 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
723 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
724 /* imm3 */
725 insert_field (FLD_imm3, code, info->shifter.amount, 0);
726
727 return NULL;
728}
729
730/* Encode the shifted register operand for e.g.
731 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
732const char *
733aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
734 const aarch64_opnd_info *info, aarch64_insn *code,
735 const aarch64_inst *inst ATTRIBUTE_UNUSED)
736{
737 /* Rm */
738 insert_field (FLD_Rm, code, info->reg.regno, 0);
739 /* shift */
740 insert_field (FLD_shift, code,
741 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
742 /* imm6 */
743 insert_field (FLD_imm6, code, info->shifter.amount, 0);
744
745 return NULL;
746}
747
f11ad6bc
RS
748/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
749 array specifies which field to use for Zn. MM is encoded in the
750 concatenation of imm5 and SVE_tszh, with imm5 being the less
751 significant part. */
752const char *
753aarch64_ins_sve_index (const aarch64_operand *self,
754 const aarch64_opnd_info *info, aarch64_insn *code,
755 const aarch64_inst *inst ATTRIBUTE_UNUSED)
756{
757 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
758 insert_field (self->fields[0], code, info->reglane.regno, 0);
759 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
760 2, FLD_imm5, FLD_SVE_tszh);
761 return NULL;
762}
763
764/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
765 to use for Zn. */
766const char *
767aarch64_ins_sve_reglist (const aarch64_operand *self,
768 const aarch64_opnd_info *info, aarch64_insn *code,
769 const aarch64_inst *inst ATTRIBUTE_UNUSED)
770{
771 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
772 return NULL;
773}
774
a06ea964
NC
775/* Miscellaneous encoding functions. */
776
777/* Encode size[0], i.e. bit 22, for
778 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
779
780static void
781encode_asimd_fcvt (aarch64_inst *inst)
782{
783 aarch64_insn value;
784 aarch64_field field = {0, 0};
785 enum aarch64_opnd_qualifier qualifier;
786
787 switch (inst->opcode->op)
788 {
789 case OP_FCVTN:
790 case OP_FCVTN2:
791 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
792 qualifier = inst->operands[1].qualifier;
793 break;
794 case OP_FCVTL:
795 case OP_FCVTL2:
796 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
797 qualifier = inst->operands[0].qualifier;
798 break;
799 default:
800 assert (0);
801 }
802 assert (qualifier == AARCH64_OPND_QLF_V_4S
803 || qualifier == AARCH64_OPND_QLF_V_2D);
804 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
805 gen_sub_field (FLD_size, 0, 1, &field);
806 insert_field_2 (&field, &inst->value, value, 0);
807}
808
809/* Encode size[0], i.e. bit 22, for
810 e.g. FCVTXN <Vb><d>, <Va><n>. */
811
812static void
813encode_asisd_fcvtxn (aarch64_inst *inst)
814{
815 aarch64_insn val = 1;
816 aarch64_field field = {0, 0};
817 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
818 gen_sub_field (FLD_size, 0, 1, &field);
819 insert_field_2 (&field, &inst->value, val, 0);
820}
821
822/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
823static void
824encode_fcvt (aarch64_inst *inst)
825{
826 aarch64_insn val;
827 const aarch64_field field = {15, 2};
828
829 /* opc dstsize */
830 switch (inst->operands[0].qualifier)
831 {
832 case AARCH64_OPND_QLF_S_S: val = 0; break;
833 case AARCH64_OPND_QLF_S_D: val = 1; break;
834 case AARCH64_OPND_QLF_S_H: val = 3; break;
835 default: abort ();
836 }
837 insert_field_2 (&field, &inst->value, val, 0);
838
839 return;
840}
841
842/* Do miscellaneous encodings that are not common enough to be driven by
843 flags. */
844
845static void
846do_misc_encoding (aarch64_inst *inst)
847{
848 switch (inst->opcode->op)
849 {
850 case OP_FCVT:
851 encode_fcvt (inst);
852 break;
853 case OP_FCVTN:
854 case OP_FCVTN2:
855 case OP_FCVTL:
856 case OP_FCVTL2:
857 encode_asimd_fcvt (inst);
858 break;
859 case OP_FCVTXN_S:
860 encode_asisd_fcvtxn (inst);
861 break;
862 default: break;
863 }
864}
865
866/* Encode the 'size' and 'Q' field for e.g. SHADD. */
867static void
868encode_sizeq (aarch64_inst *inst)
869{
870 aarch64_insn sizeq;
871 enum aarch64_field_kind kind;
872 int idx;
873
874 /* Get the index of the operand whose information we are going to use
875 to encode the size and Q fields.
876 This is deduced from the possible valid qualifier lists. */
877 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
878 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
879 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
880 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
881 /* Q */
882 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
883 /* size */
884 if (inst->opcode->iclass == asisdlse
885 || inst->opcode->iclass == asisdlsep
886 || inst->opcode->iclass == asisdlso
887 || inst->opcode->iclass == asisdlsop)
888 kind = FLD_vldst_size;
889 else
890 kind = FLD_size;
891 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
892}
893
894/* Opcodes that have fields shared by multiple operands are usually flagged
895 with flags. In this function, we detect such flags and use the
896 information in one of the related operands to do the encoding. The 'one'
897 operand is not any operand but one of the operands that has the enough
898 information for such an encoding. */
899
900static void
901do_special_encoding (struct aarch64_inst *inst)
902{
903 int idx;
4ad3b7ef 904 aarch64_insn value = 0;
a06ea964
NC
905
906 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
907
908 /* Condition for truly conditional executed instructions, e.g. b.cond. */
909 if (inst->opcode->flags & F_COND)
910 {
911 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
912 }
913 if (inst->opcode->flags & F_SF)
914 {
915 idx = select_operand_for_sf_field_coding (inst->opcode);
916 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
917 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
918 ? 1 : 0;
919 insert_field (FLD_sf, &inst->value, value, 0);
920 if (inst->opcode->flags & F_N)
921 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
922 }
ee804238
JW
923 if (inst->opcode->flags & F_LSE_SZ)
924 {
925 idx = select_operand_for_sf_field_coding (inst->opcode);
926 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
927 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
928 ? 1 : 0;
929 insert_field (FLD_lse_sz, &inst->value, value, 0);
930 }
a06ea964
NC
931 if (inst->opcode->flags & F_SIZEQ)
932 encode_sizeq (inst);
933 if (inst->opcode->flags & F_FPTYPE)
934 {
935 idx = select_operand_for_fptype_field_coding (inst->opcode);
936 switch (inst->operands[idx].qualifier)
937 {
938 case AARCH64_OPND_QLF_S_S: value = 0; break;
939 case AARCH64_OPND_QLF_S_D: value = 1; break;
940 case AARCH64_OPND_QLF_S_H: value = 3; break;
941 default: assert (0);
942 }
943 insert_field (FLD_type, &inst->value, value, 0);
944 }
945 if (inst->opcode->flags & F_SSIZE)
946 {
947 enum aarch64_opnd_qualifier qualifier;
948 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
949 qualifier = inst->operands[idx].qualifier;
950 assert (qualifier >= AARCH64_OPND_QLF_S_B
951 && qualifier <= AARCH64_OPND_QLF_S_Q);
952 value = aarch64_get_qualifier_standard_value (qualifier);
953 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
954 }
955 if (inst->opcode->flags & F_T)
956 {
957 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
958 aarch64_field field = {0, 0};
959 enum aarch64_opnd_qualifier qualifier;
960
961 idx = 0;
962 qualifier = inst->operands[idx].qualifier;
963 assert (aarch64_get_operand_class (inst->opcode->operands[0])
964 == AARCH64_OPND_CLASS_SIMD_REG
965 && qualifier >= AARCH64_OPND_QLF_V_8B
966 && qualifier <= AARCH64_OPND_QLF_V_2D);
967 /* imm5<3:0> q <t>
968 0000 x reserved
969 xxx1 0 8b
970 xxx1 1 16b
971 xx10 0 4h
972 xx10 1 8h
973 x100 0 2s
974 x100 1 4s
975 1000 0 reserved
976 1000 1 2d */
977 value = aarch64_get_qualifier_standard_value (qualifier);
978 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
979 num = (int) value >> 1;
980 assert (num >= 0 && num <= 3);
981 gen_sub_field (FLD_imm5, 0, num + 1, &field);
982 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
983 }
984 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
985 {
986 /* Use Rt to encode in the case of e.g.
987 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
988 enum aarch64_opnd_qualifier qualifier;
989 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
990 if (idx == -1)
991 /* Otherwise use the result operand, which has to be a integer
992 register. */
993 idx = 0;
994 assert (idx == 0 || idx == 1);
995 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
996 == AARCH64_OPND_CLASS_INT_REG);
997 qualifier = inst->operands[idx].qualifier;
998 insert_field (FLD_Q, &inst->value,
999 aarch64_get_qualifier_standard_value (qualifier), 0);
1000 }
1001 if (inst->opcode->flags & F_LDS_SIZE)
1002 {
1003 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1004 enum aarch64_opnd_qualifier qualifier;
1005 aarch64_field field = {0, 0};
1006 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1007 == AARCH64_OPND_CLASS_INT_REG);
1008 gen_sub_field (FLD_opc, 0, 1, &field);
1009 qualifier = inst->operands[0].qualifier;
1010 insert_field_2 (&field, &inst->value,
1011 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1012 }
1013 /* Miscellaneous encoding as the last step. */
1014 if (inst->opcode->flags & F_MISC)
1015 do_misc_encoding (inst);
1016
1017 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1018}
1019
1020/* Converters converting an alias opcode instruction to its real form. */
1021
1022/* ROR <Wd>, <Ws>, #<shift>
1023 is equivalent to:
1024 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1025static void
1026convert_ror_to_extr (aarch64_inst *inst)
1027{
1028 copy_operand_info (inst, 3, 2);
1029 copy_operand_info (inst, 2, 1);
1030}
1031
e30181a5
YZ
1032/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1033 is equivalent to:
1034 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1035static void
1036convert_xtl_to_shll (aarch64_inst *inst)
1037{
1038 inst->operands[2].qualifier = inst->operands[1].qualifier;
1039 inst->operands[2].imm.value = 0;
1040}
1041
a06ea964
NC
1042/* Convert
1043 LSR <Xd>, <Xn>, #<shift>
1044 to
1045 UBFM <Xd>, <Xn>, #<shift>, #63. */
1046static void
1047convert_sr_to_bfm (aarch64_inst *inst)
1048{
1049 inst->operands[3].imm.value =
1050 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1051}
1052
1053/* Convert MOV to ORR. */
1054static void
1055convert_mov_to_orr (aarch64_inst *inst)
1056{
1057 /* MOV <Vd>.<T>, <Vn>.<T>
1058 is equivalent to:
1059 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1060 copy_operand_info (inst, 2, 1);
1061}
1062
1063/* When <imms> >= <immr>, the instruction written:
1064 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1065 is equivalent to:
1066 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1067
1068static void
1069convert_bfx_to_bfm (aarch64_inst *inst)
1070{
1071 int64_t lsb, width;
1072
1073 /* Convert the operand. */
1074 lsb = inst->operands[2].imm.value;
1075 width = inst->operands[3].imm.value;
1076 inst->operands[2].imm.value = lsb;
1077 inst->operands[3].imm.value = lsb + width - 1;
1078}
1079
1080/* When <imms> < <immr>, the instruction written:
1081 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1082 is equivalent to:
1083 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1084
1085static void
1086convert_bfi_to_bfm (aarch64_inst *inst)
1087{
1088 int64_t lsb, width;
1089
1090 /* Convert the operand. */
1091 lsb = inst->operands[2].imm.value;
1092 width = inst->operands[3].imm.value;
1093 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1094 {
1095 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1096 inst->operands[3].imm.value = width - 1;
1097 }
1098 else
1099 {
1100 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1101 inst->operands[3].imm.value = width - 1;
1102 }
1103}
1104
d685192a
MW
1105/* The instruction written:
1106 BFC <Xd>, #<lsb>, #<width>
1107 is equivalent to:
1108 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1109
1110static void
1111convert_bfc_to_bfm (aarch64_inst *inst)
1112{
1113 int64_t lsb, width;
1114
1115 /* Insert XZR. */
1116 copy_operand_info (inst, 3, 2);
1117 copy_operand_info (inst, 2, 1);
1118 copy_operand_info (inst, 2, 0);
1119 inst->operands[1].reg.regno = 0x1f;
1120
1121 /* Convert the immedate operand. */
1122 lsb = inst->operands[2].imm.value;
1123 width = inst->operands[3].imm.value;
1124 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1125 {
1126 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1127 inst->operands[3].imm.value = width - 1;
1128 }
1129 else
1130 {
1131 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1132 inst->operands[3].imm.value = width - 1;
1133 }
1134}
1135
a06ea964
NC
1136/* The instruction written:
1137 LSL <Xd>, <Xn>, #<shift>
1138 is equivalent to:
1139 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1140
1141static void
1142convert_lsl_to_ubfm (aarch64_inst *inst)
1143{
1144 int64_t shift = inst->operands[2].imm.value;
1145
1146 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1147 {
1148 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1149 inst->operands[3].imm.value = 31 - shift;
1150 }
1151 else
1152 {
1153 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1154 inst->operands[3].imm.value = 63 - shift;
1155 }
1156}
1157
1158/* CINC <Wd>, <Wn>, <cond>
1159 is equivalent to:
1160 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1161
1162static void
1163convert_to_csel (aarch64_inst *inst)
1164{
1165 copy_operand_info (inst, 3, 2);
1166 copy_operand_info (inst, 2, 1);
1167 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1168}
1169
1170/* CSET <Wd>, <cond>
1171 is equivalent to:
1172 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1173
1174static void
1175convert_cset_to_csinc (aarch64_inst *inst)
1176{
1177 copy_operand_info (inst, 3, 1);
1178 copy_operand_info (inst, 2, 0);
1179 copy_operand_info (inst, 1, 0);
1180 inst->operands[1].reg.regno = 0x1f;
1181 inst->operands[2].reg.regno = 0x1f;
1182 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1183}
1184
1185/* MOV <Wd>, #<imm>
1186 is equivalent to:
1187 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1188
1189static void
1190convert_mov_to_movewide (aarch64_inst *inst)
1191{
1192 int is32;
1193 uint32_t shift_amount;
1194 uint64_t value;
1195
1196 switch (inst->opcode->op)
1197 {
1198 case OP_MOV_IMM_WIDE:
1199 value = inst->operands[1].imm.value;
1200 break;
1201 case OP_MOV_IMM_WIDEN:
1202 value = ~inst->operands[1].imm.value;
1203 break;
1204 default:
1205 assert (0);
1206 }
1207 inst->operands[1].type = AARCH64_OPND_HALF;
1208 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1209 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1210 /* The constraint check should have guaranteed this wouldn't happen. */
1211 assert (0);
a06ea964
NC
1212 value >>= shift_amount;
1213 value &= 0xffff;
1214 inst->operands[1].imm.value = value;
1215 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1216 inst->operands[1].shifter.amount = shift_amount;
1217}
1218
1219/* MOV <Wd>, #<imm>
1220 is equivalent to:
1221 ORR <Wd>, WZR, #<imm>. */
1222
1223static void
1224convert_mov_to_movebitmask (aarch64_inst *inst)
1225{
1226 copy_operand_info (inst, 2, 1);
1227 inst->operands[1].reg.regno = 0x1f;
1228 inst->operands[1].skip = 0;
1229}
1230
1231/* Some alias opcodes are assembled by being converted to their real-form. */
1232
1233static void
1234convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1235{
1236 const aarch64_opcode *alias = inst->opcode;
1237
1238 if ((alias->flags & F_CONV) == 0)
1239 goto convert_to_real_return;
1240
1241 switch (alias->op)
1242 {
1243 case OP_ASR_IMM:
1244 case OP_LSR_IMM:
1245 convert_sr_to_bfm (inst);
1246 break;
1247 case OP_LSL_IMM:
1248 convert_lsl_to_ubfm (inst);
1249 break;
1250 case OP_CINC:
1251 case OP_CINV:
1252 case OP_CNEG:
1253 convert_to_csel (inst);
1254 break;
1255 case OP_CSET:
1256 case OP_CSETM:
1257 convert_cset_to_csinc (inst);
1258 break;
1259 case OP_UBFX:
1260 case OP_BFXIL:
1261 case OP_SBFX:
1262 convert_bfx_to_bfm (inst);
1263 break;
1264 case OP_SBFIZ:
1265 case OP_BFI:
1266 case OP_UBFIZ:
1267 convert_bfi_to_bfm (inst);
1268 break;
d685192a
MW
1269 case OP_BFC:
1270 convert_bfc_to_bfm (inst);
1271 break;
a06ea964
NC
1272 case OP_MOV_V:
1273 convert_mov_to_orr (inst);
1274 break;
1275 case OP_MOV_IMM_WIDE:
1276 case OP_MOV_IMM_WIDEN:
1277 convert_mov_to_movewide (inst);
1278 break;
1279 case OP_MOV_IMM_LOG:
1280 convert_mov_to_movebitmask (inst);
1281 break;
1282 case OP_ROR_IMM:
1283 convert_ror_to_extr (inst);
1284 break;
e30181a5
YZ
1285 case OP_SXTL:
1286 case OP_SXTL2:
1287 case OP_UXTL:
1288 case OP_UXTL2:
1289 convert_xtl_to_shll (inst);
1290 break;
a06ea964
NC
1291 default:
1292 break;
1293 }
1294
1295convert_to_real_return:
1296 aarch64_replace_opcode (inst, real);
1297}
1298
1299/* Encode *INST_ORI of the opcode code OPCODE.
1300 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1301 matched operand qualifier sequence in *QLF_SEQ. */
1302
1303int
1304aarch64_opcode_encode (const aarch64_opcode *opcode,
1305 const aarch64_inst *inst_ori, aarch64_insn *code,
1306 aarch64_opnd_qualifier_t *qlf_seq,
1307 aarch64_operand_error *mismatch_detail)
1308{
1309 int i;
1310 const aarch64_opcode *aliased;
1311 aarch64_inst copy, *inst;
1312
1313 DEBUG_TRACE ("enter with %s", opcode->name);
1314
1315 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1316 copy = *inst_ori;
1317 inst = &copy;
1318
1319 assert (inst->opcode == NULL || inst->opcode == opcode);
1320 if (inst->opcode == NULL)
1321 inst->opcode = opcode;
1322
1323 /* Constrain the operands.
1324 After passing this, the encoding is guaranteed to succeed. */
1325 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1326 {
1327 DEBUG_TRACE ("FAIL since operand constraint not met");
1328 return 0;
1329 }
1330
1331 /* Get the base value.
1332 Note: this has to be before the aliasing handling below in order to
1333 get the base value from the alias opcode before we move on to the
1334 aliased opcode for encoding. */
1335 inst->value = opcode->opcode;
1336
1337 /* No need to do anything else if the opcode does not have any operand. */
1338 if (aarch64_num_of_operands (opcode) == 0)
1339 goto encoding_exit;
1340
1341 /* Assign operand indexes and check types. Also put the matched
1342 operand qualifiers in *QLF_SEQ to return. */
1343 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1344 {
1345 assert (opcode->operands[i] == inst->operands[i].type);
1346 inst->operands[i].idx = i;
1347 if (qlf_seq != NULL)
1348 *qlf_seq = inst->operands[i].qualifier;
1349 }
1350
1351 aliased = aarch64_find_real_opcode (opcode);
1352 /* If the opcode is an alias and it does not ask for direct encoding by
1353 itself, the instruction will be transformed to the form of real opcode
1354 and the encoding will be carried out using the rules for the aliased
1355 opcode. */
1356 if (aliased != NULL && (opcode->flags & F_CONV))
1357 {
1358 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1359 aliased->name, opcode->name);
1360 /* Convert the operands to the form of the real opcode. */
1361 convert_to_real (inst, aliased);
1362 opcode = aliased;
1363 }
1364
1365 aarch64_opnd_info *info = inst->operands;
1366
1367 /* Call the inserter of each operand. */
1368 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1369 {
1370 const aarch64_operand *opnd;
1371 enum aarch64_opnd type = opcode->operands[i];
1372 if (type == AARCH64_OPND_NIL)
1373 break;
1374 if (info->skip)
1375 {
1376 DEBUG_TRACE ("skip the incomplete operand %d", i);
1377 continue;
1378 }
1379 opnd = &aarch64_operands[type];
1380 if (operand_has_inserter (opnd))
1381 aarch64_insert_operand (opnd, info, &inst->value, inst);
1382 }
1383
1384 /* Call opcode encoders indicated by flags. */
1385 if (opcode_has_special_coder (opcode))
1386 do_special_encoding (inst);
1387
1388encoding_exit:
1389 DEBUG_TRACE ("exit with %s", opcode->name);
1390
1391 *code = inst->value;
1392
1393 return 1;
1394}
This page took 0.340418 seconds and 4 git commands to generate.