e10240a9170c22d05087d23c639eda91dc8eed61
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "aarch64-asm.h"
24
25 /* Utilities. */
26
27 /* The unnamed arguments consist of the number of fields and information about
28 these fields where the VALUE will be inserted into CODE. MASK can be zero or
29 the base mask of the opcode.
30
31 N.B. the fields are required to be in such an order than the least signficant
32 field for VALUE comes the first, e.g. the <index> in
33 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
34 is encoded in H:L:M in some cases, the the fields H:L:M should be passed in
35 the order of M, L, H. */
36
37 static inline void
38 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
39 {
40 uint32_t num;
41 const aarch64_field *field;
42 enum aarch64_field_kind kind;
43 va_list va;
44
45 va_start (va, mask);
46 num = va_arg (va, uint32_t);
47 assert (num <= 5);
48 while (num--)
49 {
50 kind = va_arg (va, enum aarch64_field_kind);
51 field = &fields[kind];
52 insert_field (kind, code, value, mask);
53 value >>= field->width;
54 }
55 va_end (va);
56 }
57
58 /* Operand inserters. */
59
60 /* Insert register number. */
61 const char *
62 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
63 aarch64_insn *code,
64 const aarch64_inst *inst ATTRIBUTE_UNUSED)
65 {
66 insert_field (self->fields[0], code, info->reg.regno, 0);
67 return NULL;
68 }
69
70 /* Insert register number, index and/or other data for SIMD register element
71 operand, e.g. the last source operand in
72 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
73 const char *
74 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
75 aarch64_insn *code, const aarch64_inst *inst)
76 {
77 /* regno */
78 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
79 /* index and/or type */
80 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
81 {
82 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
83 if (info->type == AARCH64_OPND_En
84 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
85 {
86 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
87 assert (info->idx == 1); /* Vn */
88 aarch64_insn value = info->reglane.index << pos;
89 insert_field (FLD_imm4, code, value, 0);
90 }
91 else
92 {
93 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
94 imm5<3:0> <V>
95 0000 RESERVED
96 xxx1 B
97 xx10 H
98 x100 S
99 1000 D */
100 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
101 insert_field (FLD_imm5, code, value, 0);
102 }
103 }
104 else
105 {
106 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
107 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
108 switch (info->qualifier)
109 {
110 case AARCH64_OPND_QLF_S_H:
111 /* H:L:M */
112 insert_fields (code, info->reglane.index, 0, 3, FLD_M, FLD_L, FLD_H);
113 break;
114 case AARCH64_OPND_QLF_S_S:
115 /* H:L */
116 insert_fields (code, info->reglane.index, 0, 2, FLD_L, FLD_H);
117 break;
118 case AARCH64_OPND_QLF_S_D:
119 /* H */
120 insert_field (FLD_H, code, info->reglane.index, 0);
121 break;
122 default:
123 assert (0);
124 }
125 }
126 return NULL;
127 }
128
129 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
130 const char *
131 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
132 aarch64_insn *code,
133 const aarch64_inst *inst ATTRIBUTE_UNUSED)
134 {
135 /* R */
136 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
137 /* len */
138 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
139 return NULL;
140 }
141
142 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
143 in AdvSIMD load/store instructions. */
144 const char *
145 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
146 const aarch64_opnd_info *info, aarch64_insn *code,
147 const aarch64_inst *inst)
148 {
149 aarch64_insn value;
150 /* Number of elements in each structure to be loaded/stored. */
151 unsigned num = get_opcode_dependent_value (inst->opcode);
152
153 /* Rt */
154 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
155 /* opcode */
156 switch (num)
157 {
158 case 1:
159 switch (info->reglist.num_regs)
160 {
161 case 1: value = 0x7; break;
162 case 2: value = 0xa; break;
163 case 3: value = 0x6; break;
164 case 4: value = 0x2; break;
165 default: assert (0);
166 }
167 break;
168 case 2:
169 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
170 break;
171 case 3:
172 value = 0x4;
173 break;
174 case 4:
175 value = 0x0;
176 break;
177 default:
178 assert (0);
179 }
180 insert_field (FLD_opcode, code, value, 0);
181
182 return NULL;
183 }
184
185 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
186 single structure to all lanes instructions. */
187 const char *
188 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
189 const aarch64_opnd_info *info, aarch64_insn *code,
190 const aarch64_inst *inst)
191 {
192 aarch64_insn value;
193 /* The opcode dependent area stores the number of elements in
194 each structure to be loaded/stored. */
195 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
196
197 /* Rt */
198 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
199 /* S */
200 value = (aarch64_insn) 0;
201 if (is_ld1r && info->reglist.num_regs == 2)
202 /* OP_LD1R does not have alternating variant, but have "two consecutive"
203 instead. */
204 value = (aarch64_insn) 1;
205 insert_field (FLD_S, code, value, 0);
206
207 return NULL;
208 }
209
210 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
211 operand e.g. Vt in AdvSIMD load/store single element instructions. */
212 const char *
213 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
214 const aarch64_opnd_info *info, aarch64_insn *code,
215 const aarch64_inst *inst ATTRIBUTE_UNUSED)
216 {
217 aarch64_field field = {0, 0};
218 aarch64_insn QSsize; /* fields Q:S:size. */
219 aarch64_insn opcodeh2; /* opcode<2:1> */
220
221 assert (info->reglist.has_index);
222
223 /* Rt */
224 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
225 /* Encode the index, opcode<2:1> and size. */
226 switch (info->qualifier)
227 {
228 case AARCH64_OPND_QLF_S_B:
229 /* Index encoded in "Q:S:size". */
230 QSsize = info->reglist.index;
231 opcodeh2 = 0x0;
232 break;
233 case AARCH64_OPND_QLF_S_H:
234 /* Index encoded in "Q:S:size<1>". */
235 QSsize = info->reglist.index << 1;
236 opcodeh2 = 0x1;
237 break;
238 case AARCH64_OPND_QLF_S_S:
239 /* Index encoded in "Q:S". */
240 QSsize = info->reglist.index << 2;
241 opcodeh2 = 0x2;
242 break;
243 case AARCH64_OPND_QLF_S_D:
244 /* Index encoded in "Q". */
245 QSsize = info->reglist.index << 3 | 0x1;
246 opcodeh2 = 0x2;
247 break;
248 default:
249 assert (0);
250 }
251 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
252 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
253 insert_field_2 (&field, code, opcodeh2, 0);
254
255 return NULL;
256 }
257
258 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
259 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
260 or SSHR <V><d>, <V><n>, #<shift>. */
261 const char *
262 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
263 const aarch64_opnd_info *info,
264 aarch64_insn *code, const aarch64_inst *inst)
265 {
266 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
267 aarch64_insn Q, imm;
268
269 if (inst->opcode->iclass == asimdshf)
270 {
271 /* Q
272 immh Q <T>
273 0000 x SEE AdvSIMD modified immediate
274 0001 0 8B
275 0001 1 16B
276 001x 0 4H
277 001x 1 8H
278 01xx 0 2S
279 01xx 1 4S
280 1xxx 0 RESERVED
281 1xxx 1 2D */
282 Q = (val & 0x1) ? 1 : 0;
283 insert_field (FLD_Q, code, Q, inst->opcode->mask);
284 val >>= 1;
285 }
286
287 assert (info->type == AARCH64_OPND_IMM_VLSR
288 || info->type == AARCH64_OPND_IMM_VLSL);
289
290 if (info->type == AARCH64_OPND_IMM_VLSR)
291 /* immh:immb
292 immh <shift>
293 0000 SEE AdvSIMD modified immediate
294 0001 (16-UInt(immh:immb))
295 001x (32-UInt(immh:immb))
296 01xx (64-UInt(immh:immb))
297 1xxx (128-UInt(immh:immb)) */
298 imm = (16 << (unsigned)val) - info->imm.value;
299 else
300 /* immh:immb
301 immh <shift>
302 0000 SEE AdvSIMD modified immediate
303 0001 (UInt(immh:immb)-8)
304 001x (UInt(immh:immb)-16)
305 01xx (UInt(immh:immb)-32)
306 1xxx (UInt(immh:immb)-64) */
307 imm = info->imm.value + (8 << (unsigned)val);
308 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
309
310 return NULL;
311 }
312
313 /* Insert fields for e.g. the immediate operands in
314 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
315 const char *
316 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
317 aarch64_insn *code,
318 const aarch64_inst *inst ATTRIBUTE_UNUSED)
319 {
320 int64_t imm;
321 /* Maximum of two fields to insert. */
322 assert (self->fields[2] == FLD_NIL);
323
324 imm = info->imm.value;
325 if (operand_need_shift_by_two (self))
326 imm >>= 2;
327 if (self->fields[1] == FLD_NIL)
328 insert_field (self->fields[0], code, imm, 0);
329 else
330 /* e.g. TBZ b5:b40. */
331 insert_fields (code, imm, 0, 2, self->fields[1], self->fields[0]);
332 return NULL;
333 }
334
335 /* Insert immediate and its shift amount for e.g. the last operand in
336 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
337 const char *
338 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
339 aarch64_insn *code,
340 const aarch64_inst *inst ATTRIBUTE_UNUSED)
341 {
342 /* imm16 */
343 aarch64_ins_imm (self, info, code, inst);
344 /* hw */
345 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
346 return NULL;
347 }
348
349 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
350 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
351 const char *
352 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
353 const aarch64_opnd_info *info,
354 aarch64_insn *code,
355 const aarch64_inst *inst ATTRIBUTE_UNUSED)
356 {
357 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
358 uint64_t imm = info->imm.value;
359 enum aarch64_modifier_kind kind = info->shifter.kind;
360 int amount = info->shifter.amount;
361 aarch64_field field = {0, 0};
362
363 /* a:b:c:d:e:f:g:h */
364 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
365 {
366 /* Either MOVI <Dd>, #<imm>
367 or MOVI <Vd>.2D, #<imm>.
368 <imm> is a 64-bit immediate
369 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
370 encoded in "a:b:c:d:e:f:g:h". */
371 imm = aarch64_shrink_expanded_imm8 (imm);
372 assert ((int)imm >= 0);
373 }
374 assert (imm <= 255);
375 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
376
377 if (kind == AARCH64_MOD_NONE)
378 return NULL;
379
380 /* shift amount partially in cmode */
381 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
382 if (kind == AARCH64_MOD_LSL)
383 {
384 /* AARCH64_MOD_LSL: shift zeros. */
385 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
386 assert (esize == 4 || esize == 2);
387 amount >>= 3;
388 if (esize == 4)
389 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
390 else
391 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
392 }
393 else
394 {
395 /* AARCH64_MOD_MSL: shift ones. */
396 amount >>= 4;
397 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
398 }
399 insert_field_2 (&field, code, amount, 0);
400
401 return NULL;
402 }
403
404 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
405 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
406 const char *
407 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
408 aarch64_insn *code,
409 const aarch64_inst *inst ATTRIBUTE_UNUSED)
410 {
411 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
412 return NULL;
413 }
414
415 /* Insert arithmetic immediate for e.g. the last operand in
416 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
417 const char *
418 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
419 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
420 {
421 /* shift */
422 aarch64_insn value = info->shifter.amount ? 1 : 0;
423 insert_field (self->fields[0], code, value, 0);
424 /* imm12 (unsigned) */
425 insert_field (self->fields[1], code, info->imm.value, 0);
426 return NULL;
427 }
428
429 /* Insert logical/bitmask immediate for e.g. the last operand in
430 ORR <Wd|WSP>, <Wn>, #<imm>. */
431 const char *
432 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
433 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
434 {
435 aarch64_insn value;
436 uint64_t imm = info->imm.value;
437 int is32 = aarch64_get_qualifier_esize (inst->operands[0].qualifier) == 4;
438
439 if (inst->opcode->op == OP_BIC)
440 imm = ~imm;
441 if (aarch64_logical_immediate_p (imm, is32, &value) == FALSE)
442 /* The constraint check should have guaranteed this wouldn't happen. */
443 assert (0);
444
445 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
446 self->fields[0]);
447 return NULL;
448 }
449
450 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
451 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
452 const char *
453 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
454 aarch64_insn *code, const aarch64_inst *inst)
455 {
456 aarch64_insn value;
457
458 assert (info->idx == 0);
459
460 /* Rt */
461 aarch64_ins_regno (self, info, code, inst);
462 if (inst->opcode->iclass == ldstpair_indexed
463 || inst->opcode->iclass == ldstnapair_offs
464 || inst->opcode->iclass == ldstpair_off
465 || inst->opcode->iclass == loadlit)
466 {
467 /* size */
468 switch (info->qualifier)
469 {
470 case AARCH64_OPND_QLF_S_S: value = 0; break;
471 case AARCH64_OPND_QLF_S_D: value = 1; break;
472 case AARCH64_OPND_QLF_S_Q: value = 2; break;
473 default: assert (0);
474 }
475 insert_field (FLD_ldst_size, code, value, 0);
476 }
477 else
478 {
479 /* opc[1]:size */
480 value = aarch64_get_qualifier_standard_value (info->qualifier);
481 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
482 }
483
484 return NULL;
485 }
486
487 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
488 const char *
489 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
490 const aarch64_opnd_info *info, aarch64_insn *code,
491 const aarch64_inst *inst ATTRIBUTE_UNUSED)
492 {
493 /* Rn */
494 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
495 return NULL;
496 }
497
498 /* Encode the address operand for e.g.
499 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
500 const char *
501 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
502 const aarch64_opnd_info *info, aarch64_insn *code,
503 const aarch64_inst *inst ATTRIBUTE_UNUSED)
504 {
505 aarch64_insn S;
506 enum aarch64_modifier_kind kind = info->shifter.kind;
507
508 /* Rn */
509 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
510 /* Rm */
511 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
512 /* option */
513 if (kind == AARCH64_MOD_LSL)
514 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
515 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
516 /* S */
517 if (info->qualifier != AARCH64_OPND_QLF_S_B)
518 S = info->shifter.amount != 0;
519 else
520 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
521 S <amount>
522 0 [absent]
523 1 #0
524 Must be #0 if <extend> is explicitly LSL. */
525 S = info->shifter.operator_present && info->shifter.amount_present;
526 insert_field (FLD_S, code, S, 0);
527
528 return NULL;
529 }
530
531 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
532 const char *
533 aarch64_ins_addr_simm (const aarch64_operand *self,
534 const aarch64_opnd_info *info,
535 aarch64_insn *code, const aarch64_inst *inst)
536 {
537 int imm;
538
539 /* Rn */
540 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
541 /* simm (imm9 or imm7) */
542 imm = info->addr.offset.imm;
543 if (self->fields[0] == FLD_imm7)
544 /* scaled immediate in ld/st pair instructions.. */
545 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
546 insert_field (self->fields[0], code, imm, 0);
547 /* pre/post- index */
548 if (info->addr.writeback)
549 {
550 assert (inst->opcode->iclass != ldst_unscaled
551 && inst->opcode->iclass != ldstnapair_offs
552 && inst->opcode->iclass != ldstpair_off
553 && inst->opcode->iclass != ldst_unpriv);
554 assert (info->addr.preind != info->addr.postind);
555 if (info->addr.preind)
556 insert_field (self->fields[1], code, 1, 0);
557 }
558
559 return NULL;
560 }
561
562 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
563 const char *
564 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
565 const aarch64_opnd_info *info,
566 aarch64_insn *code,
567 const aarch64_inst *inst ATTRIBUTE_UNUSED)
568 {
569 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
570
571 /* Rn */
572 insert_field (self->fields[0], code, info->addr.base_regno, 0);
573 /* uimm12 */
574 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
575 return NULL;
576 }
577
578 /* Encode the address operand for e.g.
579 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
580 const char *
581 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
582 const aarch64_opnd_info *info, aarch64_insn *code,
583 const aarch64_inst *inst ATTRIBUTE_UNUSED)
584 {
585 /* Rn */
586 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
587 /* Rm | #<amount> */
588 if (info->addr.offset.is_reg)
589 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
590 else
591 insert_field (FLD_Rm, code, 0x1f, 0);
592 return NULL;
593 }
594
595 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
596 const char *
597 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
598 const aarch64_opnd_info *info, aarch64_insn *code,
599 const aarch64_inst *inst ATTRIBUTE_UNUSED)
600 {
601 /* cond */
602 insert_field (FLD_cond, code, info->cond->value, 0);
603 return NULL;
604 }
605
606 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
607 const char *
608 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED)
611 {
612 /* op0:op1:CRn:CRm:op2 */
613 insert_fields (code, info->sysreg, inst->opcode->mask, 5,
614 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
615 return NULL;
616 }
617
618 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
619 const char *
620 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
621 const aarch64_opnd_info *info, aarch64_insn *code,
622 const aarch64_inst *inst ATTRIBUTE_UNUSED)
623 {
624 /* op1:op2 */
625 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
626 FLD_op2, FLD_op1);
627 return NULL;
628 }
629
630 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
631 const char *
632 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
633 const aarch64_opnd_info *info, aarch64_insn *code,
634 const aarch64_inst *inst ATTRIBUTE_UNUSED)
635 {
636 /* op1:CRn:CRm:op2 */
637 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
638 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
639 return NULL;
640 }
641
642 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
643
644 const char *
645 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
646 const aarch64_opnd_info *info, aarch64_insn *code,
647 const aarch64_inst *inst ATTRIBUTE_UNUSED)
648 {
649 /* CRm */
650 insert_field (FLD_CRm, code, info->barrier->value, 0);
651 return NULL;
652 }
653
654 /* Encode the prefetch operation option operand for e.g.
655 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
656
657 const char *
658 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 const aarch64_opnd_info *info, aarch64_insn *code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED)
661 {
662 /* prfop in Rt */
663 insert_field (FLD_Rt, code, info->prfop->value, 0);
664 return NULL;
665 }
666
667 /* Encode the extended register operand for e.g.
668 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
669 const char *
670 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
671 const aarch64_opnd_info *info, aarch64_insn *code,
672 const aarch64_inst *inst ATTRIBUTE_UNUSED)
673 {
674 enum aarch64_modifier_kind kind;
675
676 /* Rm */
677 insert_field (FLD_Rm, code, info->reg.regno, 0);
678 /* option */
679 kind = info->shifter.kind;
680 if (kind == AARCH64_MOD_LSL)
681 kind = info->qualifier == AARCH64_OPND_QLF_W
682 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
683 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
684 /* imm3 */
685 insert_field (FLD_imm3, code, info->shifter.amount, 0);
686
687 return NULL;
688 }
689
690 /* Encode the shifted register operand for e.g.
691 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
692 const char *
693 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
694 const aarch64_opnd_info *info, aarch64_insn *code,
695 const aarch64_inst *inst ATTRIBUTE_UNUSED)
696 {
697 /* Rm */
698 insert_field (FLD_Rm, code, info->reg.regno, 0);
699 /* shift */
700 insert_field (FLD_shift, code,
701 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
702 /* imm6 */
703 insert_field (FLD_imm6, code, info->shifter.amount, 0);
704
705 return NULL;
706 }
707
708 /* Miscellaneous encoding functions. */
709
710 /* Encode size[0], i.e. bit 22, for
711 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
712
713 static void
714 encode_asimd_fcvt (aarch64_inst *inst)
715 {
716 aarch64_insn value;
717 aarch64_field field = {0, 0};
718 enum aarch64_opnd_qualifier qualifier;
719
720 switch (inst->opcode->op)
721 {
722 case OP_FCVTN:
723 case OP_FCVTN2:
724 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
725 qualifier = inst->operands[1].qualifier;
726 break;
727 case OP_FCVTL:
728 case OP_FCVTL2:
729 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
730 qualifier = inst->operands[0].qualifier;
731 break;
732 default:
733 assert (0);
734 }
735 assert (qualifier == AARCH64_OPND_QLF_V_4S
736 || qualifier == AARCH64_OPND_QLF_V_2D);
737 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
738 gen_sub_field (FLD_size, 0, 1, &field);
739 insert_field_2 (&field, &inst->value, value, 0);
740 }
741
742 /* Encode size[0], i.e. bit 22, for
743 e.g. FCVTXN <Vb><d>, <Va><n>. */
744
745 static void
746 encode_asisd_fcvtxn (aarch64_inst *inst)
747 {
748 aarch64_insn val = 1;
749 aarch64_field field = {0, 0};
750 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
751 gen_sub_field (FLD_size, 0, 1, &field);
752 insert_field_2 (&field, &inst->value, val, 0);
753 }
754
755 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
756 static void
757 encode_fcvt (aarch64_inst *inst)
758 {
759 aarch64_insn val;
760 const aarch64_field field = {15, 2};
761
762 /* opc dstsize */
763 switch (inst->operands[0].qualifier)
764 {
765 case AARCH64_OPND_QLF_S_S: val = 0; break;
766 case AARCH64_OPND_QLF_S_D: val = 1; break;
767 case AARCH64_OPND_QLF_S_H: val = 3; break;
768 default: abort ();
769 }
770 insert_field_2 (&field, &inst->value, val, 0);
771
772 return;
773 }
774
775 /* Do miscellaneous encodings that are not common enough to be driven by
776 flags. */
777
778 static void
779 do_misc_encoding (aarch64_inst *inst)
780 {
781 switch (inst->opcode->op)
782 {
783 case OP_FCVT:
784 encode_fcvt (inst);
785 break;
786 case OP_FCVTN:
787 case OP_FCVTN2:
788 case OP_FCVTL:
789 case OP_FCVTL2:
790 encode_asimd_fcvt (inst);
791 break;
792 case OP_FCVTXN_S:
793 encode_asisd_fcvtxn (inst);
794 break;
795 default: break;
796 }
797 }
798
799 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
800 static void
801 encode_sizeq (aarch64_inst *inst)
802 {
803 aarch64_insn sizeq;
804 enum aarch64_field_kind kind;
805 int idx;
806
807 /* Get the index of the operand whose information we are going to use
808 to encode the size and Q fields.
809 This is deduced from the possible valid qualifier lists. */
810 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
811 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
812 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
813 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
814 /* Q */
815 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
816 /* size */
817 if (inst->opcode->iclass == asisdlse
818 || inst->opcode->iclass == asisdlsep
819 || inst->opcode->iclass == asisdlso
820 || inst->opcode->iclass == asisdlsop)
821 kind = FLD_vldst_size;
822 else
823 kind = FLD_size;
824 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
825 }
826
827 /* Opcodes that have fields shared by multiple operands are usually flagged
828 with flags. In this function, we detect such flags and use the
829 information in one of the related operands to do the encoding. The 'one'
830 operand is not any operand but one of the operands that has the enough
831 information for such an encoding. */
832
833 static void
834 do_special_encoding (struct aarch64_inst *inst)
835 {
836 int idx;
837 aarch64_insn value;
838
839 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
840
841 /* Condition for truly conditional executed instructions, e.g. b.cond. */
842 if (inst->opcode->flags & F_COND)
843 {
844 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
845 }
846 if (inst->opcode->flags & F_SF)
847 {
848 idx = select_operand_for_sf_field_coding (inst->opcode);
849 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
850 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
851 ? 1 : 0;
852 insert_field (FLD_sf, &inst->value, value, 0);
853 if (inst->opcode->flags & F_N)
854 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
855 }
856 if (inst->opcode->flags & F_SIZEQ)
857 encode_sizeq (inst);
858 if (inst->opcode->flags & F_FPTYPE)
859 {
860 idx = select_operand_for_fptype_field_coding (inst->opcode);
861 switch (inst->operands[idx].qualifier)
862 {
863 case AARCH64_OPND_QLF_S_S: value = 0; break;
864 case AARCH64_OPND_QLF_S_D: value = 1; break;
865 case AARCH64_OPND_QLF_S_H: value = 3; break;
866 default: assert (0);
867 }
868 insert_field (FLD_type, &inst->value, value, 0);
869 }
870 if (inst->opcode->flags & F_SSIZE)
871 {
872 enum aarch64_opnd_qualifier qualifier;
873 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
874 qualifier = inst->operands[idx].qualifier;
875 assert (qualifier >= AARCH64_OPND_QLF_S_B
876 && qualifier <= AARCH64_OPND_QLF_S_Q);
877 value = aarch64_get_qualifier_standard_value (qualifier);
878 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
879 }
880 if (inst->opcode->flags & F_T)
881 {
882 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
883 aarch64_field field = {0, 0};
884 enum aarch64_opnd_qualifier qualifier;
885
886 idx = 0;
887 qualifier = inst->operands[idx].qualifier;
888 assert (aarch64_get_operand_class (inst->opcode->operands[0])
889 == AARCH64_OPND_CLASS_SIMD_REG
890 && qualifier >= AARCH64_OPND_QLF_V_8B
891 && qualifier <= AARCH64_OPND_QLF_V_2D);
892 /* imm5<3:0> q <t>
893 0000 x reserved
894 xxx1 0 8b
895 xxx1 1 16b
896 xx10 0 4h
897 xx10 1 8h
898 x100 0 2s
899 x100 1 4s
900 1000 0 reserved
901 1000 1 2d */
902 value = aarch64_get_qualifier_standard_value (qualifier);
903 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
904 num = (int) value >> 1;
905 assert (num >= 0 && num <= 3);
906 gen_sub_field (FLD_imm5, 0, num + 1, &field);
907 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
908 }
909 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
910 {
911 /* Use Rt to encode in the case of e.g.
912 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
913 enum aarch64_opnd_qualifier qualifier;
914 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
915 if (idx == -1)
916 /* Otherwise use the result operand, which has to be a integer
917 register. */
918 idx = 0;
919 assert (idx == 0 || idx == 1);
920 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
921 == AARCH64_OPND_CLASS_INT_REG);
922 qualifier = inst->operands[idx].qualifier;
923 insert_field (FLD_Q, &inst->value,
924 aarch64_get_qualifier_standard_value (qualifier), 0);
925 }
926 if (inst->opcode->flags & F_LDS_SIZE)
927 {
928 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
929 enum aarch64_opnd_qualifier qualifier;
930 aarch64_field field = {0, 0};
931 assert (aarch64_get_operand_class (inst->opcode->operands[0])
932 == AARCH64_OPND_CLASS_INT_REG);
933 gen_sub_field (FLD_opc, 0, 1, &field);
934 qualifier = inst->operands[0].qualifier;
935 insert_field_2 (&field, &inst->value,
936 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
937 }
938 /* Miscellaneous encoding as the last step. */
939 if (inst->opcode->flags & F_MISC)
940 do_misc_encoding (inst);
941
942 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
943 }
944
945 /* Converters converting an alias opcode instruction to its real form. */
946
947 /* ROR <Wd>, <Ws>, #<shift>
948 is equivalent to:
949 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
950 static void
951 convert_ror_to_extr (aarch64_inst *inst)
952 {
953 copy_operand_info (inst, 3, 2);
954 copy_operand_info (inst, 2, 1);
955 }
956
957 /* Convert
958 LSR <Xd>, <Xn>, #<shift>
959 to
960 UBFM <Xd>, <Xn>, #<shift>, #63. */
961 static void
962 convert_sr_to_bfm (aarch64_inst *inst)
963 {
964 inst->operands[3].imm.value =
965 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
966 }
967
968 /* Convert MOV to ORR. */
969 static void
970 convert_mov_to_orr (aarch64_inst *inst)
971 {
972 /* MOV <Vd>.<T>, <Vn>.<T>
973 is equivalent to:
974 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
975 copy_operand_info (inst, 2, 1);
976 }
977
978 /* When <imms> >= <immr>, the instruction written:
979 SBFX <Xd>, <Xn>, #<lsb>, #<width>
980 is equivalent to:
981 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
982
983 static void
984 convert_bfx_to_bfm (aarch64_inst *inst)
985 {
986 int64_t lsb, width;
987
988 /* Convert the operand. */
989 lsb = inst->operands[2].imm.value;
990 width = inst->operands[3].imm.value;
991 inst->operands[2].imm.value = lsb;
992 inst->operands[3].imm.value = lsb + width - 1;
993 }
994
995 /* When <imms> < <immr>, the instruction written:
996 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
997 is equivalent to:
998 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
999
1000 static void
1001 convert_bfi_to_bfm (aarch64_inst *inst)
1002 {
1003 int64_t lsb, width;
1004
1005 /* Convert the operand. */
1006 lsb = inst->operands[2].imm.value;
1007 width = inst->operands[3].imm.value;
1008 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1009 {
1010 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1011 inst->operands[3].imm.value = width - 1;
1012 }
1013 else
1014 {
1015 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1016 inst->operands[3].imm.value = width - 1;
1017 }
1018 }
1019
1020 /* The instruction written:
1021 LSL <Xd>, <Xn>, #<shift>
1022 is equivalent to:
1023 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1024
1025 static void
1026 convert_lsl_to_ubfm (aarch64_inst *inst)
1027 {
1028 int64_t shift = inst->operands[2].imm.value;
1029
1030 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1031 {
1032 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1033 inst->operands[3].imm.value = 31 - shift;
1034 }
1035 else
1036 {
1037 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1038 inst->operands[3].imm.value = 63 - shift;
1039 }
1040 }
1041
1042 /* CINC <Wd>, <Wn>, <cond>
1043 is equivalent to:
1044 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1045
1046 static void
1047 convert_to_csel (aarch64_inst *inst)
1048 {
1049 copy_operand_info (inst, 3, 2);
1050 copy_operand_info (inst, 2, 1);
1051 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1052 }
1053
1054 /* CSET <Wd>, <cond>
1055 is equivalent to:
1056 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1057
1058 static void
1059 convert_cset_to_csinc (aarch64_inst *inst)
1060 {
1061 copy_operand_info (inst, 3, 1);
1062 copy_operand_info (inst, 2, 0);
1063 copy_operand_info (inst, 1, 0);
1064 inst->operands[1].reg.regno = 0x1f;
1065 inst->operands[2].reg.regno = 0x1f;
1066 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1067 }
1068
1069 /* MOV <Wd>, #<imm>
1070 is equivalent to:
1071 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1072
1073 static void
1074 convert_mov_to_movewide (aarch64_inst *inst)
1075 {
1076 int is32;
1077 uint32_t shift_amount;
1078 uint64_t value;
1079
1080 switch (inst->opcode->op)
1081 {
1082 case OP_MOV_IMM_WIDE:
1083 value = inst->operands[1].imm.value;
1084 break;
1085 case OP_MOV_IMM_WIDEN:
1086 value = ~inst->operands[1].imm.value;
1087 break;
1088 default:
1089 assert (0);
1090 }
1091 inst->operands[1].type = AARCH64_OPND_HALF;
1092 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1093 /* This should have been guaranteed by the constraint check. */
1094 assert (aarch64_wide_constant_p (value, is32, &shift_amount) == TRUE);
1095 value >>= shift_amount;
1096 value &= 0xffff;
1097 inst->operands[1].imm.value = value;
1098 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1099 inst->operands[1].shifter.amount = shift_amount;
1100 }
1101
1102 /* MOV <Wd>, #<imm>
1103 is equivalent to:
1104 ORR <Wd>, WZR, #<imm>. */
1105
1106 static void
1107 convert_mov_to_movebitmask (aarch64_inst *inst)
1108 {
1109 copy_operand_info (inst, 2, 1);
1110 inst->operands[1].reg.regno = 0x1f;
1111 inst->operands[1].skip = 0;
1112 }
1113
1114 /* Some alias opcodes are assembled by being converted to their real-form. */
1115
1116 static void
1117 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1118 {
1119 const aarch64_opcode *alias = inst->opcode;
1120
1121 if ((alias->flags & F_CONV) == 0)
1122 goto convert_to_real_return;
1123
1124 switch (alias->op)
1125 {
1126 case OP_ASR_IMM:
1127 case OP_LSR_IMM:
1128 convert_sr_to_bfm (inst);
1129 break;
1130 case OP_LSL_IMM:
1131 convert_lsl_to_ubfm (inst);
1132 break;
1133 case OP_CINC:
1134 case OP_CINV:
1135 case OP_CNEG:
1136 convert_to_csel (inst);
1137 break;
1138 case OP_CSET:
1139 case OP_CSETM:
1140 convert_cset_to_csinc (inst);
1141 break;
1142 case OP_UBFX:
1143 case OP_BFXIL:
1144 case OP_SBFX:
1145 convert_bfx_to_bfm (inst);
1146 break;
1147 case OP_SBFIZ:
1148 case OP_BFI:
1149 case OP_UBFIZ:
1150 convert_bfi_to_bfm (inst);
1151 break;
1152 case OP_MOV_V:
1153 convert_mov_to_orr (inst);
1154 break;
1155 case OP_MOV_IMM_WIDE:
1156 case OP_MOV_IMM_WIDEN:
1157 convert_mov_to_movewide (inst);
1158 break;
1159 case OP_MOV_IMM_LOG:
1160 convert_mov_to_movebitmask (inst);
1161 break;
1162 case OP_ROR_IMM:
1163 convert_ror_to_extr (inst);
1164 break;
1165 default:
1166 break;
1167 }
1168
1169 convert_to_real_return:
1170 aarch64_replace_opcode (inst, real);
1171 }
1172
1173 /* Encode *INST_ORI of the opcode code OPCODE.
1174 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1175 matched operand qualifier sequence in *QLF_SEQ. */
1176
1177 int
1178 aarch64_opcode_encode (const aarch64_opcode *opcode,
1179 const aarch64_inst *inst_ori, aarch64_insn *code,
1180 aarch64_opnd_qualifier_t *qlf_seq,
1181 aarch64_operand_error *mismatch_detail)
1182 {
1183 int i;
1184 const aarch64_opcode *aliased;
1185 aarch64_inst copy, *inst;
1186
1187 DEBUG_TRACE ("enter with %s", opcode->name);
1188
1189 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1190 copy = *inst_ori;
1191 inst = &copy;
1192
1193 assert (inst->opcode == NULL || inst->opcode == opcode);
1194 if (inst->opcode == NULL)
1195 inst->opcode = opcode;
1196
1197 /* Constrain the operands.
1198 After passing this, the encoding is guaranteed to succeed. */
1199 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1200 {
1201 DEBUG_TRACE ("FAIL since operand constraint not met");
1202 return 0;
1203 }
1204
1205 /* Get the base value.
1206 Note: this has to be before the aliasing handling below in order to
1207 get the base value from the alias opcode before we move on to the
1208 aliased opcode for encoding. */
1209 inst->value = opcode->opcode;
1210
1211 /* No need to do anything else if the opcode does not have any operand. */
1212 if (aarch64_num_of_operands (opcode) == 0)
1213 goto encoding_exit;
1214
1215 /* Assign operand indexes and check types. Also put the matched
1216 operand qualifiers in *QLF_SEQ to return. */
1217 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1218 {
1219 assert (opcode->operands[i] == inst->operands[i].type);
1220 inst->operands[i].idx = i;
1221 if (qlf_seq != NULL)
1222 *qlf_seq = inst->operands[i].qualifier;
1223 }
1224
1225 aliased = aarch64_find_real_opcode (opcode);
1226 /* If the opcode is an alias and it does not ask for direct encoding by
1227 itself, the instruction will be transformed to the form of real opcode
1228 and the encoding will be carried out using the rules for the aliased
1229 opcode. */
1230 if (aliased != NULL && (opcode->flags & F_CONV))
1231 {
1232 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1233 aliased->name, opcode->name);
1234 /* Convert the operands to the form of the real opcode. */
1235 convert_to_real (inst, aliased);
1236 opcode = aliased;
1237 }
1238
1239 aarch64_opnd_info *info = inst->operands;
1240
1241 /* Call the inserter of each operand. */
1242 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1243 {
1244 const aarch64_operand *opnd;
1245 enum aarch64_opnd type = opcode->operands[i];
1246 if (type == AARCH64_OPND_NIL)
1247 break;
1248 if (info->skip)
1249 {
1250 DEBUG_TRACE ("skip the incomplete operand %d", i);
1251 continue;
1252 }
1253 opnd = &aarch64_operands[type];
1254 if (operand_has_inserter (opnd))
1255 aarch64_insert_operand (opnd, info, &inst->value, inst);
1256 }
1257
1258 /* Call opcode encoders indicated by flags. */
1259 if (opcode_has_special_coder (opcode))
1260 do_special_encoding (inst);
1261
1262 encoding_exit:
1263 DEBUG_TRACE ("exit with %s", opcode->name);
1264
1265 *code = inst->value;
1266
1267 return 1;
1268 }
This page took 0.084141 seconds and 4 git commands to generate.