[AArch64][SVE 15/32] Add {insert,extract}_all_fields helpers
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
CommitLineData
a06ea964 1/* aarch64-asm.c -- AArch64 assembler support.
6f2750fe 2 Copyright (C) 2012-2016 Free Software Foundation, Inc.
a06ea964
NC
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21#include "sysdep.h"
22#include <stdarg.h>
b5464a68 23#include "libiberty.h"
a06ea964
NC
24#include "aarch64-asm.h"
25
26/* Utilities. */
27
28/* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
9aff4b7a 35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
a06ea964
NC
36 the order of M, L, H. */
37
38static inline void
39insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40{
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57}
58
b5464a68
RS
59/* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62static void
63insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65{
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76}
77
a06ea964
NC
78/* Operand inserters. */
79
80/* Insert register number. */
81const char *
82aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED)
85{
86 insert_field (self->fields[0], code, info->reg.regno, 0);
87 return NULL;
88}
89
90/* Insert register number, index and/or other data for SIMD register element
91 operand, e.g. the last source operand in
92 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
93const char *
94aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code, const aarch64_inst *inst)
96{
97 /* regno */
98 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
99 /* index and/or type */
100 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
101 {
102 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
103 if (info->type == AARCH64_OPND_En
104 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
105 {
106 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
107 assert (info->idx == 1); /* Vn */
108 aarch64_insn value = info->reglane.index << pos;
109 insert_field (FLD_imm4, code, value, 0);
110 }
111 else
112 {
113 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
114 imm5<3:0> <V>
115 0000 RESERVED
116 xxx1 B
117 xx10 H
118 x100 S
119 1000 D */
120 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
121 insert_field (FLD_imm5, code, value, 0);
122 }
123 }
124 else
125 {
126 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
127 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
128 switch (info->qualifier)
129 {
130 case AARCH64_OPND_QLF_S_H:
131 /* H:L:M */
132 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
133 break;
134 case AARCH64_OPND_QLF_S_S:
135 /* H:L */
136 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
137 break;
138 case AARCH64_OPND_QLF_S_D:
139 /* H */
140 insert_field (FLD_H, code, info->reglane.index, 0);
141 break;
142 default:
143 assert (0);
144 }
145 }
146 return NULL;
147}
148
149/* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
150const char *
151aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
152 aarch64_insn *code,
153 const aarch64_inst *inst ATTRIBUTE_UNUSED)
154{
155 /* R */
156 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
157 /* len */
158 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
159 return NULL;
160}
161
162/* Insert Rt and opcode fields for a register list operand, e.g. Vt
163 in AdvSIMD load/store instructions. */
164const char *
165aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
166 const aarch64_opnd_info *info, aarch64_insn *code,
167 const aarch64_inst *inst)
168{
4ad3b7ef 169 aarch64_insn value = 0;
a06ea964
NC
170 /* Number of elements in each structure to be loaded/stored. */
171 unsigned num = get_opcode_dependent_value (inst->opcode);
172
173 /* Rt */
174 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
175 /* opcode */
176 switch (num)
177 {
178 case 1:
179 switch (info->reglist.num_regs)
180 {
181 case 1: value = 0x7; break;
182 case 2: value = 0xa; break;
183 case 3: value = 0x6; break;
184 case 4: value = 0x2; break;
185 default: assert (0);
186 }
187 break;
188 case 2:
189 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
190 break;
191 case 3:
192 value = 0x4;
193 break;
194 case 4:
195 value = 0x0;
196 break;
197 default:
198 assert (0);
199 }
200 insert_field (FLD_opcode, code, value, 0);
201
202 return NULL;
203}
204
205/* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
206 single structure to all lanes instructions. */
207const char *
208aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
209 const aarch64_opnd_info *info, aarch64_insn *code,
210 const aarch64_inst *inst)
211{
212 aarch64_insn value;
213 /* The opcode dependent area stores the number of elements in
214 each structure to be loaded/stored. */
215 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
216
217 /* Rt */
218 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
219 /* S */
220 value = (aarch64_insn) 0;
221 if (is_ld1r && info->reglist.num_regs == 2)
222 /* OP_LD1R does not have alternating variant, but have "two consecutive"
223 instead. */
224 value = (aarch64_insn) 1;
225 insert_field (FLD_S, code, value, 0);
226
227 return NULL;
228}
229
230/* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
231 operand e.g. Vt in AdvSIMD load/store single element instructions. */
232const char *
233aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
234 const aarch64_opnd_info *info, aarch64_insn *code,
235 const aarch64_inst *inst ATTRIBUTE_UNUSED)
236{
237 aarch64_field field = {0, 0};
4ad3b7ef
KT
238 aarch64_insn QSsize = 0; /* fields Q:S:size. */
239 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
a06ea964
NC
240
241 assert (info->reglist.has_index);
242
243 /* Rt */
244 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
245 /* Encode the index, opcode<2:1> and size. */
246 switch (info->qualifier)
247 {
248 case AARCH64_OPND_QLF_S_B:
249 /* Index encoded in "Q:S:size". */
250 QSsize = info->reglist.index;
251 opcodeh2 = 0x0;
252 break;
253 case AARCH64_OPND_QLF_S_H:
254 /* Index encoded in "Q:S:size<1>". */
255 QSsize = info->reglist.index << 1;
256 opcodeh2 = 0x1;
257 break;
258 case AARCH64_OPND_QLF_S_S:
259 /* Index encoded in "Q:S". */
260 QSsize = info->reglist.index << 2;
261 opcodeh2 = 0x2;
262 break;
263 case AARCH64_OPND_QLF_S_D:
264 /* Index encoded in "Q". */
265 QSsize = info->reglist.index << 3 | 0x1;
266 opcodeh2 = 0x2;
267 break;
268 default:
269 assert (0);
270 }
271 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
272 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
273 insert_field_2 (&field, code, opcodeh2, 0);
274
275 return NULL;
276}
277
278/* Insert fields immh:immb and/or Q for e.g. the shift immediate in
279 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
280 or SSHR <V><d>, <V><n>, #<shift>. */
281const char *
282aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
283 const aarch64_opnd_info *info,
284 aarch64_insn *code, const aarch64_inst *inst)
285{
286 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
287 aarch64_insn Q, imm;
288
289 if (inst->opcode->iclass == asimdshf)
290 {
291 /* Q
292 immh Q <T>
293 0000 x SEE AdvSIMD modified immediate
294 0001 0 8B
295 0001 1 16B
296 001x 0 4H
297 001x 1 8H
298 01xx 0 2S
299 01xx 1 4S
300 1xxx 0 RESERVED
301 1xxx 1 2D */
302 Q = (val & 0x1) ? 1 : 0;
303 insert_field (FLD_Q, code, Q, inst->opcode->mask);
304 val >>= 1;
305 }
306
307 assert (info->type == AARCH64_OPND_IMM_VLSR
308 || info->type == AARCH64_OPND_IMM_VLSL);
309
310 if (info->type == AARCH64_OPND_IMM_VLSR)
311 /* immh:immb
312 immh <shift>
313 0000 SEE AdvSIMD modified immediate
314 0001 (16-UInt(immh:immb))
315 001x (32-UInt(immh:immb))
316 01xx (64-UInt(immh:immb))
317 1xxx (128-UInt(immh:immb)) */
318 imm = (16 << (unsigned)val) - info->imm.value;
319 else
320 /* immh:immb
321 immh <shift>
322 0000 SEE AdvSIMD modified immediate
323 0001 (UInt(immh:immb)-8)
324 001x (UInt(immh:immb)-16)
325 01xx (UInt(immh:immb)-32)
326 1xxx (UInt(immh:immb)-64) */
327 imm = info->imm.value + (8 << (unsigned)val);
328 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
329
330 return NULL;
331}
332
333/* Insert fields for e.g. the immediate operands in
334 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
335const char *
336aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
337 aarch64_insn *code,
338 const aarch64_inst *inst ATTRIBUTE_UNUSED)
339{
340 int64_t imm;
a06ea964
NC
341
342 imm = info->imm.value;
343 if (operand_need_shift_by_two (self))
344 imm >>= 2;
b5464a68 345 insert_all_fields (self, code, imm);
a06ea964
NC
346 return NULL;
347}
348
349/* Insert immediate and its shift amount for e.g. the last operand in
350 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
351const char *
352aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
062f38fa 353 aarch64_insn *code, const aarch64_inst *inst)
a06ea964
NC
354{
355 /* imm16 */
356 aarch64_ins_imm (self, info, code, inst);
357 /* hw */
358 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
359 return NULL;
360}
361
362/* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
363 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
364const char *
365aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
366 const aarch64_opnd_info *info,
367 aarch64_insn *code,
368 const aarch64_inst *inst ATTRIBUTE_UNUSED)
369{
370 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
371 uint64_t imm = info->imm.value;
372 enum aarch64_modifier_kind kind = info->shifter.kind;
373 int amount = info->shifter.amount;
374 aarch64_field field = {0, 0};
375
376 /* a:b:c:d:e:f:g:h */
377 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
378 {
379 /* Either MOVI <Dd>, #<imm>
380 or MOVI <Vd>.2D, #<imm>.
381 <imm> is a 64-bit immediate
382 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
383 encoded in "a:b:c:d:e:f:g:h". */
384 imm = aarch64_shrink_expanded_imm8 (imm);
385 assert ((int)imm >= 0);
386 }
a06ea964
NC
387 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
388
389 if (kind == AARCH64_MOD_NONE)
390 return NULL;
391
392 /* shift amount partially in cmode */
393 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
394 if (kind == AARCH64_MOD_LSL)
395 {
396 /* AARCH64_MOD_LSL: shift zeros. */
397 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
f5555712
YZ
398 assert (esize == 4 || esize == 2 || esize == 1);
399 /* For 8-bit move immediate, the optional LSL #0 does not require
400 encoding. */
401 if (esize == 1)
402 return NULL;
a06ea964
NC
403 amount >>= 3;
404 if (esize == 4)
405 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
406 else
407 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
408 }
409 else
410 {
411 /* AARCH64_MOD_MSL: shift ones. */
412 amount >>= 4;
413 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
414 }
415 insert_field_2 (&field, code, amount, 0);
416
417 return NULL;
418}
419
420/* Insert #<fbits> for the immediate operand in fp fix-point instructions,
421 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
422const char *
423aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
424 aarch64_insn *code,
425 const aarch64_inst *inst ATTRIBUTE_UNUSED)
426{
427 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
428 return NULL;
429}
430
431/* Insert arithmetic immediate for e.g. the last operand in
432 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
433const char *
434aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
435 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
436{
437 /* shift */
438 aarch64_insn value = info->shifter.amount ? 1 : 0;
439 insert_field (self->fields[0], code, value, 0);
440 /* imm12 (unsigned) */
441 insert_field (self->fields[1], code, info->imm.value, 0);
442 return NULL;
443}
444
445/* Insert logical/bitmask immediate for e.g. the last operand in
446 ORR <Wd|WSP>, <Wn>, #<imm>. */
447const char *
448aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
449 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
450{
451 aarch64_insn value;
452 uint64_t imm = info->imm.value;
42408347 453 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
a06ea964
NC
454
455 if (inst->opcode->op == OP_BIC)
456 imm = ~imm;
42408347 457 if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
a06ea964
NC
458 /* The constraint check should have guaranteed this wouldn't happen. */
459 assert (0);
460
461 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
462 self->fields[0]);
463 return NULL;
464}
465
466/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
467 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
468const char *
469aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
470 aarch64_insn *code, const aarch64_inst *inst)
471{
4ad3b7ef 472 aarch64_insn value = 0;
a06ea964
NC
473
474 assert (info->idx == 0);
475
476 /* Rt */
477 aarch64_ins_regno (self, info, code, inst);
478 if (inst->opcode->iclass == ldstpair_indexed
479 || inst->opcode->iclass == ldstnapair_offs
480 || inst->opcode->iclass == ldstpair_off
481 || inst->opcode->iclass == loadlit)
482 {
483 /* size */
484 switch (info->qualifier)
485 {
486 case AARCH64_OPND_QLF_S_S: value = 0; break;
487 case AARCH64_OPND_QLF_S_D: value = 1; break;
488 case AARCH64_OPND_QLF_S_Q: value = 2; break;
489 default: assert (0);
490 }
491 insert_field (FLD_ldst_size, code, value, 0);
492 }
493 else
494 {
495 /* opc[1]:size */
496 value = aarch64_get_qualifier_standard_value (info->qualifier);
497 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
498 }
499
500 return NULL;
501}
502
503/* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
504const char *
505aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
506 const aarch64_opnd_info *info, aarch64_insn *code,
507 const aarch64_inst *inst ATTRIBUTE_UNUSED)
508{
509 /* Rn */
510 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
511 return NULL;
512}
513
514/* Encode the address operand for e.g.
515 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
516const char *
517aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
518 const aarch64_opnd_info *info, aarch64_insn *code,
519 const aarch64_inst *inst ATTRIBUTE_UNUSED)
520{
521 aarch64_insn S;
522 enum aarch64_modifier_kind kind = info->shifter.kind;
523
524 /* Rn */
525 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
526 /* Rm */
527 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
528 /* option */
529 if (kind == AARCH64_MOD_LSL)
530 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
531 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
532 /* S */
533 if (info->qualifier != AARCH64_OPND_QLF_S_B)
534 S = info->shifter.amount != 0;
535 else
536 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
537 S <amount>
538 0 [absent]
539 1 #0
540 Must be #0 if <extend> is explicitly LSL. */
541 S = info->shifter.operator_present && info->shifter.amount_present;
542 insert_field (FLD_S, code, S, 0);
543
544 return NULL;
545}
546
547/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
548const char *
549aarch64_ins_addr_simm (const aarch64_operand *self,
550 const aarch64_opnd_info *info,
062f38fa
RE
551 aarch64_insn *code,
552 const aarch64_inst *inst ATTRIBUTE_UNUSED)
a06ea964
NC
553{
554 int imm;
555
556 /* Rn */
557 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
558 /* simm (imm9 or imm7) */
559 imm = info->addr.offset.imm;
560 if (self->fields[0] == FLD_imm7)
561 /* scaled immediate in ld/st pair instructions.. */
562 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
563 insert_field (self->fields[0], code, imm, 0);
564 /* pre/post- index */
565 if (info->addr.writeback)
566 {
567 assert (inst->opcode->iclass != ldst_unscaled
568 && inst->opcode->iclass != ldstnapair_offs
569 && inst->opcode->iclass != ldstpair_off
570 && inst->opcode->iclass != ldst_unpriv);
571 assert (info->addr.preind != info->addr.postind);
572 if (info->addr.preind)
573 insert_field (self->fields[1], code, 1, 0);
574 }
575
576 return NULL;
577}
578
579/* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
580const char *
581aarch64_ins_addr_uimm12 (const aarch64_operand *self,
582 const aarch64_opnd_info *info,
583 aarch64_insn *code,
584 const aarch64_inst *inst ATTRIBUTE_UNUSED)
585{
586 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
587
588 /* Rn */
589 insert_field (self->fields[0], code, info->addr.base_regno, 0);
590 /* uimm12 */
591 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
592 return NULL;
593}
594
595/* Encode the address operand for e.g.
596 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
597const char *
598aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
599 const aarch64_opnd_info *info, aarch64_insn *code,
600 const aarch64_inst *inst ATTRIBUTE_UNUSED)
601{
602 /* Rn */
603 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
604 /* Rm | #<amount> */
605 if (info->addr.offset.is_reg)
606 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
607 else
608 insert_field (FLD_Rm, code, 0x1f, 0);
609 return NULL;
610}
611
612/* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
613const char *
614aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
615 const aarch64_opnd_info *info, aarch64_insn *code,
616 const aarch64_inst *inst ATTRIBUTE_UNUSED)
617{
618 /* cond */
619 insert_field (FLD_cond, code, info->cond->value, 0);
620 return NULL;
621}
622
623/* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
624const char *
625aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
626 const aarch64_opnd_info *info, aarch64_insn *code,
627 const aarch64_inst *inst ATTRIBUTE_UNUSED)
628{
629 /* op0:op1:CRn:CRm:op2 */
630 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
631 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
632 return NULL;
633}
634
635/* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
636const char *
637aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED)
640{
641 /* op1:op2 */
642 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
643 FLD_op2, FLD_op1);
644 return NULL;
645}
646
647/* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
648const char *
649aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
650 const aarch64_opnd_info *info, aarch64_insn *code,
651 const aarch64_inst *inst ATTRIBUTE_UNUSED)
652{
653 /* op1:CRn:CRm:op2 */
654 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
655 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
656 return NULL;
657}
658
659/* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
660
661const char *
662aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
663 const aarch64_opnd_info *info, aarch64_insn *code,
664 const aarch64_inst *inst ATTRIBUTE_UNUSED)
665{
666 /* CRm */
667 insert_field (FLD_CRm, code, info->barrier->value, 0);
668 return NULL;
669}
670
671/* Encode the prefetch operation option operand for e.g.
672 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
673
674const char *
675aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
676 const aarch64_opnd_info *info, aarch64_insn *code,
677 const aarch64_inst *inst ATTRIBUTE_UNUSED)
678{
679 /* prfop in Rt */
680 insert_field (FLD_Rt, code, info->prfop->value, 0);
681 return NULL;
682}
683
9ed608f9
MW
684/* Encode the hint number for instructions that alias HINT but take an
685 operand. */
686
687const char *
688aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
689 const aarch64_opnd_info *info, aarch64_insn *code,
690 const aarch64_inst *inst ATTRIBUTE_UNUSED)
691{
692 /* CRm:op2. */
693 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
694 return NULL;
695}
696
a06ea964
NC
697/* Encode the extended register operand for e.g.
698 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
699const char *
700aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
701 const aarch64_opnd_info *info, aarch64_insn *code,
702 const aarch64_inst *inst ATTRIBUTE_UNUSED)
703{
704 enum aarch64_modifier_kind kind;
705
706 /* Rm */
707 insert_field (FLD_Rm, code, info->reg.regno, 0);
708 /* option */
709 kind = info->shifter.kind;
710 if (kind == AARCH64_MOD_LSL)
711 kind = info->qualifier == AARCH64_OPND_QLF_W
712 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
713 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
714 /* imm3 */
715 insert_field (FLD_imm3, code, info->shifter.amount, 0);
716
717 return NULL;
718}
719
720/* Encode the shifted register operand for e.g.
721 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
722const char *
723aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
724 const aarch64_opnd_info *info, aarch64_insn *code,
725 const aarch64_inst *inst ATTRIBUTE_UNUSED)
726{
727 /* Rm */
728 insert_field (FLD_Rm, code, info->reg.regno, 0);
729 /* shift */
730 insert_field (FLD_shift, code,
731 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
732 /* imm6 */
733 insert_field (FLD_imm6, code, info->shifter.amount, 0);
734
735 return NULL;
736}
737
738/* Miscellaneous encoding functions. */
739
740/* Encode size[0], i.e. bit 22, for
741 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
742
743static void
744encode_asimd_fcvt (aarch64_inst *inst)
745{
746 aarch64_insn value;
747 aarch64_field field = {0, 0};
748 enum aarch64_opnd_qualifier qualifier;
749
750 switch (inst->opcode->op)
751 {
752 case OP_FCVTN:
753 case OP_FCVTN2:
754 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
755 qualifier = inst->operands[1].qualifier;
756 break;
757 case OP_FCVTL:
758 case OP_FCVTL2:
759 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
760 qualifier = inst->operands[0].qualifier;
761 break;
762 default:
763 assert (0);
764 }
765 assert (qualifier == AARCH64_OPND_QLF_V_4S
766 || qualifier == AARCH64_OPND_QLF_V_2D);
767 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
768 gen_sub_field (FLD_size, 0, 1, &field);
769 insert_field_2 (&field, &inst->value, value, 0);
770}
771
772/* Encode size[0], i.e. bit 22, for
773 e.g. FCVTXN <Vb><d>, <Va><n>. */
774
775static void
776encode_asisd_fcvtxn (aarch64_inst *inst)
777{
778 aarch64_insn val = 1;
779 aarch64_field field = {0, 0};
780 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
781 gen_sub_field (FLD_size, 0, 1, &field);
782 insert_field_2 (&field, &inst->value, val, 0);
783}
784
785/* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
786static void
787encode_fcvt (aarch64_inst *inst)
788{
789 aarch64_insn val;
790 const aarch64_field field = {15, 2};
791
792 /* opc dstsize */
793 switch (inst->operands[0].qualifier)
794 {
795 case AARCH64_OPND_QLF_S_S: val = 0; break;
796 case AARCH64_OPND_QLF_S_D: val = 1; break;
797 case AARCH64_OPND_QLF_S_H: val = 3; break;
798 default: abort ();
799 }
800 insert_field_2 (&field, &inst->value, val, 0);
801
802 return;
803}
804
805/* Do miscellaneous encodings that are not common enough to be driven by
806 flags. */
807
808static void
809do_misc_encoding (aarch64_inst *inst)
810{
811 switch (inst->opcode->op)
812 {
813 case OP_FCVT:
814 encode_fcvt (inst);
815 break;
816 case OP_FCVTN:
817 case OP_FCVTN2:
818 case OP_FCVTL:
819 case OP_FCVTL2:
820 encode_asimd_fcvt (inst);
821 break;
822 case OP_FCVTXN_S:
823 encode_asisd_fcvtxn (inst);
824 break;
825 default: break;
826 }
827}
828
829/* Encode the 'size' and 'Q' field for e.g. SHADD. */
830static void
831encode_sizeq (aarch64_inst *inst)
832{
833 aarch64_insn sizeq;
834 enum aarch64_field_kind kind;
835 int idx;
836
837 /* Get the index of the operand whose information we are going to use
838 to encode the size and Q fields.
839 This is deduced from the possible valid qualifier lists. */
840 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
841 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
842 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
843 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
844 /* Q */
845 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
846 /* size */
847 if (inst->opcode->iclass == asisdlse
848 || inst->opcode->iclass == asisdlsep
849 || inst->opcode->iclass == asisdlso
850 || inst->opcode->iclass == asisdlsop)
851 kind = FLD_vldst_size;
852 else
853 kind = FLD_size;
854 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
855}
856
857/* Opcodes that have fields shared by multiple operands are usually flagged
858 with flags. In this function, we detect such flags and use the
859 information in one of the related operands to do the encoding. The 'one'
860 operand is not any operand but one of the operands that has the enough
861 information for such an encoding. */
862
863static void
864do_special_encoding (struct aarch64_inst *inst)
865{
866 int idx;
4ad3b7ef 867 aarch64_insn value = 0;
a06ea964
NC
868
869 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
870
871 /* Condition for truly conditional executed instructions, e.g. b.cond. */
872 if (inst->opcode->flags & F_COND)
873 {
874 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
875 }
876 if (inst->opcode->flags & F_SF)
877 {
878 idx = select_operand_for_sf_field_coding (inst->opcode);
879 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
880 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
881 ? 1 : 0;
882 insert_field (FLD_sf, &inst->value, value, 0);
883 if (inst->opcode->flags & F_N)
884 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
885 }
ee804238
JW
886 if (inst->opcode->flags & F_LSE_SZ)
887 {
888 idx = select_operand_for_sf_field_coding (inst->opcode);
889 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
890 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
891 ? 1 : 0;
892 insert_field (FLD_lse_sz, &inst->value, value, 0);
893 }
a06ea964
NC
894 if (inst->opcode->flags & F_SIZEQ)
895 encode_sizeq (inst);
896 if (inst->opcode->flags & F_FPTYPE)
897 {
898 idx = select_operand_for_fptype_field_coding (inst->opcode);
899 switch (inst->operands[idx].qualifier)
900 {
901 case AARCH64_OPND_QLF_S_S: value = 0; break;
902 case AARCH64_OPND_QLF_S_D: value = 1; break;
903 case AARCH64_OPND_QLF_S_H: value = 3; break;
904 default: assert (0);
905 }
906 insert_field (FLD_type, &inst->value, value, 0);
907 }
908 if (inst->opcode->flags & F_SSIZE)
909 {
910 enum aarch64_opnd_qualifier qualifier;
911 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
912 qualifier = inst->operands[idx].qualifier;
913 assert (qualifier >= AARCH64_OPND_QLF_S_B
914 && qualifier <= AARCH64_OPND_QLF_S_Q);
915 value = aarch64_get_qualifier_standard_value (qualifier);
916 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
917 }
918 if (inst->opcode->flags & F_T)
919 {
920 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
921 aarch64_field field = {0, 0};
922 enum aarch64_opnd_qualifier qualifier;
923
924 idx = 0;
925 qualifier = inst->operands[idx].qualifier;
926 assert (aarch64_get_operand_class (inst->opcode->operands[0])
927 == AARCH64_OPND_CLASS_SIMD_REG
928 && qualifier >= AARCH64_OPND_QLF_V_8B
929 && qualifier <= AARCH64_OPND_QLF_V_2D);
930 /* imm5<3:0> q <t>
931 0000 x reserved
932 xxx1 0 8b
933 xxx1 1 16b
934 xx10 0 4h
935 xx10 1 8h
936 x100 0 2s
937 x100 1 4s
938 1000 0 reserved
939 1000 1 2d */
940 value = aarch64_get_qualifier_standard_value (qualifier);
941 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
942 num = (int) value >> 1;
943 assert (num >= 0 && num <= 3);
944 gen_sub_field (FLD_imm5, 0, num + 1, &field);
945 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
946 }
947 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
948 {
949 /* Use Rt to encode in the case of e.g.
950 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
951 enum aarch64_opnd_qualifier qualifier;
952 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
953 if (idx == -1)
954 /* Otherwise use the result operand, which has to be a integer
955 register. */
956 idx = 0;
957 assert (idx == 0 || idx == 1);
958 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
959 == AARCH64_OPND_CLASS_INT_REG);
960 qualifier = inst->operands[idx].qualifier;
961 insert_field (FLD_Q, &inst->value,
962 aarch64_get_qualifier_standard_value (qualifier), 0);
963 }
964 if (inst->opcode->flags & F_LDS_SIZE)
965 {
966 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
967 enum aarch64_opnd_qualifier qualifier;
968 aarch64_field field = {0, 0};
969 assert (aarch64_get_operand_class (inst->opcode->operands[0])
970 == AARCH64_OPND_CLASS_INT_REG);
971 gen_sub_field (FLD_opc, 0, 1, &field);
972 qualifier = inst->operands[0].qualifier;
973 insert_field_2 (&field, &inst->value,
974 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
975 }
976 /* Miscellaneous encoding as the last step. */
977 if (inst->opcode->flags & F_MISC)
978 do_misc_encoding (inst);
979
980 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
981}
982
983/* Converters converting an alias opcode instruction to its real form. */
984
985/* ROR <Wd>, <Ws>, #<shift>
986 is equivalent to:
987 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
988static void
989convert_ror_to_extr (aarch64_inst *inst)
990{
991 copy_operand_info (inst, 3, 2);
992 copy_operand_info (inst, 2, 1);
993}
994
e30181a5
YZ
995/* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
996 is equivalent to:
997 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
998static void
999convert_xtl_to_shll (aarch64_inst *inst)
1000{
1001 inst->operands[2].qualifier = inst->operands[1].qualifier;
1002 inst->operands[2].imm.value = 0;
1003}
1004
a06ea964
NC
1005/* Convert
1006 LSR <Xd>, <Xn>, #<shift>
1007 to
1008 UBFM <Xd>, <Xn>, #<shift>, #63. */
1009static void
1010convert_sr_to_bfm (aarch64_inst *inst)
1011{
1012 inst->operands[3].imm.value =
1013 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1014}
1015
1016/* Convert MOV to ORR. */
1017static void
1018convert_mov_to_orr (aarch64_inst *inst)
1019{
1020 /* MOV <Vd>.<T>, <Vn>.<T>
1021 is equivalent to:
1022 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1023 copy_operand_info (inst, 2, 1);
1024}
1025
1026/* When <imms> >= <immr>, the instruction written:
1027 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1028 is equivalent to:
1029 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1030
1031static void
1032convert_bfx_to_bfm (aarch64_inst *inst)
1033{
1034 int64_t lsb, width;
1035
1036 /* Convert the operand. */
1037 lsb = inst->operands[2].imm.value;
1038 width = inst->operands[3].imm.value;
1039 inst->operands[2].imm.value = lsb;
1040 inst->operands[3].imm.value = lsb + width - 1;
1041}
1042
1043/* When <imms> < <immr>, the instruction written:
1044 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1045 is equivalent to:
1046 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1047
1048static void
1049convert_bfi_to_bfm (aarch64_inst *inst)
1050{
1051 int64_t lsb, width;
1052
1053 /* Convert the operand. */
1054 lsb = inst->operands[2].imm.value;
1055 width = inst->operands[3].imm.value;
1056 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1057 {
1058 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1059 inst->operands[3].imm.value = width - 1;
1060 }
1061 else
1062 {
1063 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1064 inst->operands[3].imm.value = width - 1;
1065 }
1066}
1067
d685192a
MW
1068/* The instruction written:
1069 BFC <Xd>, #<lsb>, #<width>
1070 is equivalent to:
1071 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1072
1073static void
1074convert_bfc_to_bfm (aarch64_inst *inst)
1075{
1076 int64_t lsb, width;
1077
1078 /* Insert XZR. */
1079 copy_operand_info (inst, 3, 2);
1080 copy_operand_info (inst, 2, 1);
1081 copy_operand_info (inst, 2, 0);
1082 inst->operands[1].reg.regno = 0x1f;
1083
1084 /* Convert the immedate operand. */
1085 lsb = inst->operands[2].imm.value;
1086 width = inst->operands[3].imm.value;
1087 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1088 {
1089 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1090 inst->operands[3].imm.value = width - 1;
1091 }
1092 else
1093 {
1094 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1095 inst->operands[3].imm.value = width - 1;
1096 }
1097}
1098
a06ea964
NC
1099/* The instruction written:
1100 LSL <Xd>, <Xn>, #<shift>
1101 is equivalent to:
1102 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1103
1104static void
1105convert_lsl_to_ubfm (aarch64_inst *inst)
1106{
1107 int64_t shift = inst->operands[2].imm.value;
1108
1109 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1110 {
1111 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1112 inst->operands[3].imm.value = 31 - shift;
1113 }
1114 else
1115 {
1116 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1117 inst->operands[3].imm.value = 63 - shift;
1118 }
1119}
1120
1121/* CINC <Wd>, <Wn>, <cond>
1122 is equivalent to:
1123 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1124
1125static void
1126convert_to_csel (aarch64_inst *inst)
1127{
1128 copy_operand_info (inst, 3, 2);
1129 copy_operand_info (inst, 2, 1);
1130 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1131}
1132
1133/* CSET <Wd>, <cond>
1134 is equivalent to:
1135 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1136
1137static void
1138convert_cset_to_csinc (aarch64_inst *inst)
1139{
1140 copy_operand_info (inst, 3, 1);
1141 copy_operand_info (inst, 2, 0);
1142 copy_operand_info (inst, 1, 0);
1143 inst->operands[1].reg.regno = 0x1f;
1144 inst->operands[2].reg.regno = 0x1f;
1145 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1146}
1147
1148/* MOV <Wd>, #<imm>
1149 is equivalent to:
1150 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1151
1152static void
1153convert_mov_to_movewide (aarch64_inst *inst)
1154{
1155 int is32;
1156 uint32_t shift_amount;
1157 uint64_t value;
1158
1159 switch (inst->opcode->op)
1160 {
1161 case OP_MOV_IMM_WIDE:
1162 value = inst->operands[1].imm.value;
1163 break;
1164 case OP_MOV_IMM_WIDEN:
1165 value = ~inst->operands[1].imm.value;
1166 break;
1167 default:
1168 assert (0);
1169 }
1170 inst->operands[1].type = AARCH64_OPND_HALF;
1171 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
062f38fa
RE
1172 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1173 /* The constraint check should have guaranteed this wouldn't happen. */
1174 assert (0);
a06ea964
NC
1175 value >>= shift_amount;
1176 value &= 0xffff;
1177 inst->operands[1].imm.value = value;
1178 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1179 inst->operands[1].shifter.amount = shift_amount;
1180}
1181
1182/* MOV <Wd>, #<imm>
1183 is equivalent to:
1184 ORR <Wd>, WZR, #<imm>. */
1185
1186static void
1187convert_mov_to_movebitmask (aarch64_inst *inst)
1188{
1189 copy_operand_info (inst, 2, 1);
1190 inst->operands[1].reg.regno = 0x1f;
1191 inst->operands[1].skip = 0;
1192}
1193
1194/* Some alias opcodes are assembled by being converted to their real-form. */
1195
1196static void
1197convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1198{
1199 const aarch64_opcode *alias = inst->opcode;
1200
1201 if ((alias->flags & F_CONV) == 0)
1202 goto convert_to_real_return;
1203
1204 switch (alias->op)
1205 {
1206 case OP_ASR_IMM:
1207 case OP_LSR_IMM:
1208 convert_sr_to_bfm (inst);
1209 break;
1210 case OP_LSL_IMM:
1211 convert_lsl_to_ubfm (inst);
1212 break;
1213 case OP_CINC:
1214 case OP_CINV:
1215 case OP_CNEG:
1216 convert_to_csel (inst);
1217 break;
1218 case OP_CSET:
1219 case OP_CSETM:
1220 convert_cset_to_csinc (inst);
1221 break;
1222 case OP_UBFX:
1223 case OP_BFXIL:
1224 case OP_SBFX:
1225 convert_bfx_to_bfm (inst);
1226 break;
1227 case OP_SBFIZ:
1228 case OP_BFI:
1229 case OP_UBFIZ:
1230 convert_bfi_to_bfm (inst);
1231 break;
d685192a
MW
1232 case OP_BFC:
1233 convert_bfc_to_bfm (inst);
1234 break;
a06ea964
NC
1235 case OP_MOV_V:
1236 convert_mov_to_orr (inst);
1237 break;
1238 case OP_MOV_IMM_WIDE:
1239 case OP_MOV_IMM_WIDEN:
1240 convert_mov_to_movewide (inst);
1241 break;
1242 case OP_MOV_IMM_LOG:
1243 convert_mov_to_movebitmask (inst);
1244 break;
1245 case OP_ROR_IMM:
1246 convert_ror_to_extr (inst);
1247 break;
e30181a5
YZ
1248 case OP_SXTL:
1249 case OP_SXTL2:
1250 case OP_UXTL:
1251 case OP_UXTL2:
1252 convert_xtl_to_shll (inst);
1253 break;
a06ea964
NC
1254 default:
1255 break;
1256 }
1257
1258convert_to_real_return:
1259 aarch64_replace_opcode (inst, real);
1260}
1261
1262/* Encode *INST_ORI of the opcode code OPCODE.
1263 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1264 matched operand qualifier sequence in *QLF_SEQ. */
1265
1266int
1267aarch64_opcode_encode (const aarch64_opcode *opcode,
1268 const aarch64_inst *inst_ori, aarch64_insn *code,
1269 aarch64_opnd_qualifier_t *qlf_seq,
1270 aarch64_operand_error *mismatch_detail)
1271{
1272 int i;
1273 const aarch64_opcode *aliased;
1274 aarch64_inst copy, *inst;
1275
1276 DEBUG_TRACE ("enter with %s", opcode->name);
1277
1278 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1279 copy = *inst_ori;
1280 inst = &copy;
1281
1282 assert (inst->opcode == NULL || inst->opcode == opcode);
1283 if (inst->opcode == NULL)
1284 inst->opcode = opcode;
1285
1286 /* Constrain the operands.
1287 After passing this, the encoding is guaranteed to succeed. */
1288 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1289 {
1290 DEBUG_TRACE ("FAIL since operand constraint not met");
1291 return 0;
1292 }
1293
1294 /* Get the base value.
1295 Note: this has to be before the aliasing handling below in order to
1296 get the base value from the alias opcode before we move on to the
1297 aliased opcode for encoding. */
1298 inst->value = opcode->opcode;
1299
1300 /* No need to do anything else if the opcode does not have any operand. */
1301 if (aarch64_num_of_operands (opcode) == 0)
1302 goto encoding_exit;
1303
1304 /* Assign operand indexes and check types. Also put the matched
1305 operand qualifiers in *QLF_SEQ to return. */
1306 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1307 {
1308 assert (opcode->operands[i] == inst->operands[i].type);
1309 inst->operands[i].idx = i;
1310 if (qlf_seq != NULL)
1311 *qlf_seq = inst->operands[i].qualifier;
1312 }
1313
1314 aliased = aarch64_find_real_opcode (opcode);
1315 /* If the opcode is an alias and it does not ask for direct encoding by
1316 itself, the instruction will be transformed to the form of real opcode
1317 and the encoding will be carried out using the rules for the aliased
1318 opcode. */
1319 if (aliased != NULL && (opcode->flags & F_CONV))
1320 {
1321 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1322 aliased->name, opcode->name);
1323 /* Convert the operands to the form of the real opcode. */
1324 convert_to_real (inst, aliased);
1325 opcode = aliased;
1326 }
1327
1328 aarch64_opnd_info *info = inst->operands;
1329
1330 /* Call the inserter of each operand. */
1331 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1332 {
1333 const aarch64_operand *opnd;
1334 enum aarch64_opnd type = opcode->operands[i];
1335 if (type == AARCH64_OPND_NIL)
1336 break;
1337 if (info->skip)
1338 {
1339 DEBUG_TRACE ("skip the incomplete operand %d", i);
1340 continue;
1341 }
1342 opnd = &aarch64_operands[type];
1343 if (operand_has_inserter (opnd))
1344 aarch64_insert_operand (opnd, info, &inst->value, inst);
1345 }
1346
1347 /* Call opcode encoders indicated by flags. */
1348 if (opcode_has_special_coder (opcode))
1349 do_special_encoding (inst);
1350
1351encoding_exit:
1352 DEBUG_TRACE ("exit with %s", opcode->name);
1353
1354 *code = inst->value;
1355
1356 return 1;
1357}
This page took 0.279645 seconds and 4 git commands to generate.