Modify AArch64 Assembly and disassembly functions to be able to fail and report why.
[deliverable/binutils-gdb.git] / opcodes / aarch64-asm.c
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2018 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of the GNU opcodes library.
6
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25
26 /* Utilities. */
27
28 /* The unnamed arguments consist of the number of fields and information about
29 these fields where the VALUE will be inserted into CODE. MASK can be zero or
30 the base mask of the opcode.
31
32 N.B. the fields are required to be in such an order than the least signficant
33 field for VALUE comes the first, e.g. the <index> in
34 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
35 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
36 the order of M, L, H. */
37
38 static inline void
39 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
40 {
41 uint32_t num;
42 const aarch64_field *field;
43 enum aarch64_field_kind kind;
44 va_list va;
45
46 va_start (va, mask);
47 num = va_arg (va, uint32_t);
48 assert (num <= 5);
49 while (num--)
50 {
51 kind = va_arg (va, enum aarch64_field_kind);
52 field = &fields[kind];
53 insert_field (kind, code, value, mask);
54 value >>= field->width;
55 }
56 va_end (va);
57 }
58
59 /* Insert a raw field value VALUE into all fields in SELF->fields.
60 The least significant bit goes in the final field. */
61
62 static void
63 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
64 aarch64_insn value)
65 {
66 unsigned int i;
67 enum aarch64_field_kind kind;
68
69 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
70 if (self->fields[i] != FLD_NIL)
71 {
72 kind = self->fields[i];
73 insert_field (kind, code, value, 0);
74 value >>= fields[kind].width;
75 }
76 }
77
78 /* Operand inserters. */
79
80 /* Insert register number. */
81 bfd_boolean
82 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
83 aarch64_insn *code,
84 const aarch64_inst *inst ATTRIBUTE_UNUSED,
85 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
86 {
87 insert_field (self->fields[0], code, info->reg.regno, 0);
88 return TRUE;
89 }
90
91 /* Insert register number, index and/or other data for SIMD register element
92 operand, e.g. the last source operand in
93 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
94 bfd_boolean
95 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
96 aarch64_insn *code, const aarch64_inst *inst,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
98 {
99 /* regno */
100 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
101 /* index and/or type */
102 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
103 {
104 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
105 if (info->type == AARCH64_OPND_En
106 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
107 {
108 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
109 assert (info->idx == 1); /* Vn */
110 aarch64_insn value = info->reglane.index << pos;
111 insert_field (FLD_imm4, code, value, 0);
112 }
113 else
114 {
115 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
116 imm5<3:0> <V>
117 0000 RESERVED
118 xxx1 B
119 xx10 H
120 x100 S
121 1000 D */
122 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
123 insert_field (FLD_imm5, code, value, 0);
124 }
125 }
126 else if (inst->opcode->iclass == dotproduct)
127 {
128 unsigned reglane_index = info->reglane.index;
129 switch (info->qualifier)
130 {
131 case AARCH64_OPND_QLF_S_4B:
132 /* L:H */
133 assert (reglane_index < 4);
134 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
135 break;
136 default:
137 assert (0);
138 }
139 }
140 else if (inst->opcode->iclass == cryptosm3)
141 {
142 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
143 unsigned reglane_index = info->reglane.index;
144 assert (reglane_index < 4);
145 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
146 }
147 else
148 {
149 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
150 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
151 unsigned reglane_index = info->reglane.index;
152
153 if (inst->opcode->op == OP_FCMLA_ELEM)
154 /* Complex operand takes two elements. */
155 reglane_index *= 2;
156
157 switch (info->qualifier)
158 {
159 case AARCH64_OPND_QLF_S_H:
160 /* H:L:M */
161 assert (reglane_index < 8);
162 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
163 break;
164 case AARCH64_OPND_QLF_S_S:
165 /* H:L */
166 assert (reglane_index < 4);
167 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
168 break;
169 case AARCH64_OPND_QLF_S_D:
170 /* H */
171 assert (reglane_index < 2);
172 insert_field (FLD_H, code, reglane_index, 0);
173 break;
174 default:
175 assert (0);
176 }
177 }
178 return TRUE;
179 }
180
181 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
182 bfd_boolean
183 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
184 aarch64_insn *code,
185 const aarch64_inst *inst ATTRIBUTE_UNUSED,
186 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
187 {
188 /* R */
189 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
190 /* len */
191 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
192 return TRUE;
193 }
194
195 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
196 in AdvSIMD load/store instructions. */
197 bfd_boolean
198 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
199 const aarch64_opnd_info *info, aarch64_insn *code,
200 const aarch64_inst *inst,
201 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
202 {
203 aarch64_insn value = 0;
204 /* Number of elements in each structure to be loaded/stored. */
205 unsigned num = get_opcode_dependent_value (inst->opcode);
206
207 /* Rt */
208 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
209 /* opcode */
210 switch (num)
211 {
212 case 1:
213 switch (info->reglist.num_regs)
214 {
215 case 1: value = 0x7; break;
216 case 2: value = 0xa; break;
217 case 3: value = 0x6; break;
218 case 4: value = 0x2; break;
219 default: assert (0);
220 }
221 break;
222 case 2:
223 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
224 break;
225 case 3:
226 value = 0x4;
227 break;
228 case 4:
229 value = 0x0;
230 break;
231 default:
232 assert (0);
233 }
234 insert_field (FLD_opcode, code, value, 0);
235
236 return TRUE;
237 }
238
239 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
240 single structure to all lanes instructions. */
241 bfd_boolean
242 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
243 const aarch64_opnd_info *info, aarch64_insn *code,
244 const aarch64_inst *inst,
245 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
246 {
247 aarch64_insn value;
248 /* The opcode dependent area stores the number of elements in
249 each structure to be loaded/stored. */
250 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
251
252 /* Rt */
253 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
254 /* S */
255 value = (aarch64_insn) 0;
256 if (is_ld1r && info->reglist.num_regs == 2)
257 /* OP_LD1R does not have alternating variant, but have "two consecutive"
258 instead. */
259 value = (aarch64_insn) 1;
260 insert_field (FLD_S, code, value, 0);
261
262 return TRUE;
263 }
264
265 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
266 operand e.g. Vt in AdvSIMD load/store single element instructions. */
267 bfd_boolean
268 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
269 const aarch64_opnd_info *info, aarch64_insn *code,
270 const aarch64_inst *inst ATTRIBUTE_UNUSED,
271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
272 {
273 aarch64_field field = {0, 0};
274 aarch64_insn QSsize = 0; /* fields Q:S:size. */
275 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
276
277 assert (info->reglist.has_index);
278
279 /* Rt */
280 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
281 /* Encode the index, opcode<2:1> and size. */
282 switch (info->qualifier)
283 {
284 case AARCH64_OPND_QLF_S_B:
285 /* Index encoded in "Q:S:size". */
286 QSsize = info->reglist.index;
287 opcodeh2 = 0x0;
288 break;
289 case AARCH64_OPND_QLF_S_H:
290 /* Index encoded in "Q:S:size<1>". */
291 QSsize = info->reglist.index << 1;
292 opcodeh2 = 0x1;
293 break;
294 case AARCH64_OPND_QLF_S_S:
295 /* Index encoded in "Q:S". */
296 QSsize = info->reglist.index << 2;
297 opcodeh2 = 0x2;
298 break;
299 case AARCH64_OPND_QLF_S_D:
300 /* Index encoded in "Q". */
301 QSsize = info->reglist.index << 3 | 0x1;
302 opcodeh2 = 0x2;
303 break;
304 default:
305 assert (0);
306 }
307 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
308 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
309 insert_field_2 (&field, code, opcodeh2, 0);
310
311 return TRUE;
312 }
313
314 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
315 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
316 or SSHR <V><d>, <V><n>, #<shift>. */
317 bfd_boolean
318 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
319 const aarch64_opnd_info *info,
320 aarch64_insn *code, const aarch64_inst *inst,
321 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
322 {
323 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
324 aarch64_insn Q, imm;
325
326 if (inst->opcode->iclass == asimdshf)
327 {
328 /* Q
329 immh Q <T>
330 0000 x SEE AdvSIMD modified immediate
331 0001 0 8B
332 0001 1 16B
333 001x 0 4H
334 001x 1 8H
335 01xx 0 2S
336 01xx 1 4S
337 1xxx 0 RESERVED
338 1xxx 1 2D */
339 Q = (val & 0x1) ? 1 : 0;
340 insert_field (FLD_Q, code, Q, inst->opcode->mask);
341 val >>= 1;
342 }
343
344 assert (info->type == AARCH64_OPND_IMM_VLSR
345 || info->type == AARCH64_OPND_IMM_VLSL);
346
347 if (info->type == AARCH64_OPND_IMM_VLSR)
348 /* immh:immb
349 immh <shift>
350 0000 SEE AdvSIMD modified immediate
351 0001 (16-UInt(immh:immb))
352 001x (32-UInt(immh:immb))
353 01xx (64-UInt(immh:immb))
354 1xxx (128-UInt(immh:immb)) */
355 imm = (16 << (unsigned)val) - info->imm.value;
356 else
357 /* immh:immb
358 immh <shift>
359 0000 SEE AdvSIMD modified immediate
360 0001 (UInt(immh:immb)-8)
361 001x (UInt(immh:immb)-16)
362 01xx (UInt(immh:immb)-32)
363 1xxx (UInt(immh:immb)-64) */
364 imm = info->imm.value + (8 << (unsigned)val);
365 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
366
367 return TRUE;
368 }
369
370 /* Insert fields for e.g. the immediate operands in
371 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
372 bfd_boolean
373 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
374 aarch64_insn *code,
375 const aarch64_inst *inst ATTRIBUTE_UNUSED,
376 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
377 {
378 int64_t imm;
379
380 imm = info->imm.value;
381 if (operand_need_shift_by_two (self))
382 imm >>= 2;
383 insert_all_fields (self, code, imm);
384 return TRUE;
385 }
386
387 /* Insert immediate and its shift amount for e.g. the last operand in
388 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
389 bfd_boolean
390 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
391 aarch64_insn *code, const aarch64_inst *inst,
392 aarch64_operand_error *errors)
393 {
394 /* imm16 */
395 aarch64_ins_imm (self, info, code, inst, errors);
396 /* hw */
397 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
398 return TRUE;
399 }
400
401 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
402 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
403 bfd_boolean
404 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
405 const aarch64_opnd_info *info,
406 aarch64_insn *code,
407 const aarch64_inst *inst ATTRIBUTE_UNUSED,
408 aarch64_operand_error *errors
409 ATTRIBUTE_UNUSED)
410 {
411 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
412 uint64_t imm = info->imm.value;
413 enum aarch64_modifier_kind kind = info->shifter.kind;
414 int amount = info->shifter.amount;
415 aarch64_field field = {0, 0};
416
417 /* a:b:c:d:e:f:g:h */
418 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
419 {
420 /* Either MOVI <Dd>, #<imm>
421 or MOVI <Vd>.2D, #<imm>.
422 <imm> is a 64-bit immediate
423 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
424 encoded in "a:b:c:d:e:f:g:h". */
425 imm = aarch64_shrink_expanded_imm8 (imm);
426 assert ((int)imm >= 0);
427 }
428 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
429
430 if (kind == AARCH64_MOD_NONE)
431 return TRUE;
432
433 /* shift amount partially in cmode */
434 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
435 if (kind == AARCH64_MOD_LSL)
436 {
437 /* AARCH64_MOD_LSL: shift zeros. */
438 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
439 assert (esize == 4 || esize == 2 || esize == 1);
440 /* For 8-bit move immediate, the optional LSL #0 does not require
441 encoding. */
442 if (esize == 1)
443 return TRUE;
444 amount >>= 3;
445 if (esize == 4)
446 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
447 else
448 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
449 }
450 else
451 {
452 /* AARCH64_MOD_MSL: shift ones. */
453 amount >>= 4;
454 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
455 }
456 insert_field_2 (&field, code, amount, 0);
457
458 return TRUE;
459 }
460
461 /* Insert fields for an 8-bit floating-point immediate. */
462 bfd_boolean
463 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
464 aarch64_insn *code,
465 const aarch64_inst *inst ATTRIBUTE_UNUSED,
466 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
467 {
468 insert_all_fields (self, code, info->imm.value);
469 return TRUE;
470 }
471
472 /* Insert 1-bit rotation immediate (#90 or #270). */
473 bfd_boolean
474 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
475 const aarch64_opnd_info *info,
476 aarch64_insn *code, const aarch64_inst *inst,
477 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
478 {
479 uint64_t rot = (info->imm.value - 90) / 180;
480 assert (rot < 2U);
481 insert_field (self->fields[0], code, rot, inst->opcode->mask);
482 return TRUE;
483 }
484
485 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
486 bfd_boolean
487 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
488 const aarch64_opnd_info *info,
489 aarch64_insn *code, const aarch64_inst *inst,
490 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
491 {
492 uint64_t rot = info->imm.value / 90;
493 assert (rot < 4U);
494 insert_field (self->fields[0], code, rot, inst->opcode->mask);
495 return TRUE;
496 }
497
498 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
499 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
500 bfd_boolean
501 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
502 aarch64_insn *code,
503 const aarch64_inst *inst ATTRIBUTE_UNUSED,
504 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
505 {
506 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
507 return TRUE;
508 }
509
510 /* Insert arithmetic immediate for e.g. the last operand in
511 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
512 bfd_boolean
513 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
514 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
515 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
516 {
517 /* shift */
518 aarch64_insn value = info->shifter.amount ? 1 : 0;
519 insert_field (self->fields[0], code, value, 0);
520 /* imm12 (unsigned) */
521 insert_field (self->fields[1], code, info->imm.value, 0);
522 return TRUE;
523 }
524
525 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
526 the operand should be inverted before encoding. */
527 static bfd_boolean
528 aarch64_ins_limm_1 (const aarch64_operand *self,
529 const aarch64_opnd_info *info, aarch64_insn *code,
530 const aarch64_inst *inst, bfd_boolean invert_p,
531 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
532 {
533 aarch64_insn value;
534 uint64_t imm = info->imm.value;
535 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
536
537 if (invert_p)
538 imm = ~imm;
539 /* The constraint check should have guaranteed this wouldn't happen. */
540 assert (aarch64_logical_immediate_p (imm, esize, &value));
541
542 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
543 self->fields[0]);
544 return TRUE;
545 }
546
547 /* Insert logical/bitmask immediate for e.g. the last operand in
548 ORR <Wd|WSP>, <Wn>, #<imm>. */
549 bfd_boolean
550 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
551 aarch64_insn *code, const aarch64_inst *inst,
552 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
553 {
554 return aarch64_ins_limm_1 (self, info, code, inst,
555 inst->opcode->op == OP_BIC, errors);
556 }
557
558 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
559 bfd_boolean
560 aarch64_ins_inv_limm (const aarch64_operand *self,
561 const aarch64_opnd_info *info, aarch64_insn *code,
562 const aarch64_inst *inst,
563 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
564 {
565 return aarch64_ins_limm_1 (self, info, code, inst, TRUE, errors);
566 }
567
568 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
569 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
570 bfd_boolean
571 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
572 aarch64_insn *code, const aarch64_inst *inst,
573 aarch64_operand_error *errors)
574 {
575 aarch64_insn value = 0;
576
577 assert (info->idx == 0);
578
579 /* Rt */
580 aarch64_ins_regno (self, info, code, inst, errors);
581 if (inst->opcode->iclass == ldstpair_indexed
582 || inst->opcode->iclass == ldstnapair_offs
583 || inst->opcode->iclass == ldstpair_off
584 || inst->opcode->iclass == loadlit)
585 {
586 /* size */
587 switch (info->qualifier)
588 {
589 case AARCH64_OPND_QLF_S_S: value = 0; break;
590 case AARCH64_OPND_QLF_S_D: value = 1; break;
591 case AARCH64_OPND_QLF_S_Q: value = 2; break;
592 default: assert (0);
593 }
594 insert_field (FLD_ldst_size, code, value, 0);
595 }
596 else
597 {
598 /* opc[1]:size */
599 value = aarch64_get_qualifier_standard_value (info->qualifier);
600 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
601 }
602
603 return TRUE;
604 }
605
606 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
607 bfd_boolean
608 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
609 const aarch64_opnd_info *info, aarch64_insn *code,
610 const aarch64_inst *inst ATTRIBUTE_UNUSED,
611 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
612 {
613 /* Rn */
614 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
615 return TRUE;
616 }
617
618 /* Encode the address operand for e.g.
619 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
620 bfd_boolean
621 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
622 const aarch64_opnd_info *info, aarch64_insn *code,
623 const aarch64_inst *inst ATTRIBUTE_UNUSED,
624 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
625 {
626 aarch64_insn S;
627 enum aarch64_modifier_kind kind = info->shifter.kind;
628
629 /* Rn */
630 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
631 /* Rm */
632 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
633 /* option */
634 if (kind == AARCH64_MOD_LSL)
635 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
636 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
637 /* S */
638 if (info->qualifier != AARCH64_OPND_QLF_S_B)
639 S = info->shifter.amount != 0;
640 else
641 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
642 S <amount>
643 0 [absent]
644 1 #0
645 Must be #0 if <extend> is explicitly LSL. */
646 S = info->shifter.operator_present && info->shifter.amount_present;
647 insert_field (FLD_S, code, S, 0);
648
649 return TRUE;
650 }
651
652 /* Encode the address operand for e.g.
653 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
654 bfd_boolean
655 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
656 const aarch64_opnd_info *info, aarch64_insn *code,
657 const aarch64_inst *inst ATTRIBUTE_UNUSED,
658 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
659 {
660 /* Rn */
661 insert_field (self->fields[0], code, info->addr.base_regno, 0);
662
663 /* simm9 */
664 int imm = info->addr.offset.imm;
665 insert_field (self->fields[1], code, imm, 0);
666
667 /* writeback */
668 if (info->addr.writeback)
669 {
670 assert (info->addr.preind == 1 && info->addr.postind == 0);
671 insert_field (self->fields[2], code, 1, 0);
672 }
673 return TRUE;
674 }
675
676 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
677 bfd_boolean
678 aarch64_ins_addr_simm (const aarch64_operand *self,
679 const aarch64_opnd_info *info,
680 aarch64_insn *code,
681 const aarch64_inst *inst ATTRIBUTE_UNUSED,
682 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
683 {
684 int imm;
685
686 /* Rn */
687 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
688 /* simm (imm9 or imm7) */
689 imm = info->addr.offset.imm;
690 if (self->fields[0] == FLD_imm7)
691 /* scaled immediate in ld/st pair instructions.. */
692 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
693 insert_field (self->fields[0], code, imm, 0);
694 /* pre/post- index */
695 if (info->addr.writeback)
696 {
697 assert (inst->opcode->iclass != ldst_unscaled
698 && inst->opcode->iclass != ldstnapair_offs
699 && inst->opcode->iclass != ldstpair_off
700 && inst->opcode->iclass != ldst_unpriv);
701 assert (info->addr.preind != info->addr.postind);
702 if (info->addr.preind)
703 insert_field (self->fields[1], code, 1, 0);
704 }
705
706 return TRUE;
707 }
708
709 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
710 bfd_boolean
711 aarch64_ins_addr_simm10 (const aarch64_operand *self,
712 const aarch64_opnd_info *info,
713 aarch64_insn *code,
714 const aarch64_inst *inst ATTRIBUTE_UNUSED,
715 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
716 {
717 int imm;
718
719 /* Rn */
720 insert_field (self->fields[0], code, info->addr.base_regno, 0);
721 /* simm10 */
722 imm = info->addr.offset.imm >> 3;
723 insert_field (self->fields[1], code, imm >> 9, 0);
724 insert_field (self->fields[2], code, imm, 0);
725 /* writeback */
726 if (info->addr.writeback)
727 {
728 assert (info->addr.preind == 1 && info->addr.postind == 0);
729 insert_field (self->fields[3], code, 1, 0);
730 }
731 return TRUE;
732 }
733
734 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
735 bfd_boolean
736 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
737 const aarch64_opnd_info *info,
738 aarch64_insn *code,
739 const aarch64_inst *inst ATTRIBUTE_UNUSED,
740 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
741 {
742 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
743
744 /* Rn */
745 insert_field (self->fields[0], code, info->addr.base_regno, 0);
746 /* uimm12 */
747 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
748 return TRUE;
749 }
750
751 /* Encode the address operand for e.g.
752 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
753 bfd_boolean
754 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
755 const aarch64_opnd_info *info, aarch64_insn *code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED,
757 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
758 {
759 /* Rn */
760 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
761 /* Rm | #<amount> */
762 if (info->addr.offset.is_reg)
763 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
764 else
765 insert_field (FLD_Rm, code, 0x1f, 0);
766 return TRUE;
767 }
768
769 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
770 bfd_boolean
771 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
772 const aarch64_opnd_info *info, aarch64_insn *code,
773 const aarch64_inst *inst ATTRIBUTE_UNUSED,
774 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
775 {
776 /* cond */
777 insert_field (FLD_cond, code, info->cond->value, 0);
778 return TRUE;
779 }
780
781 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
782 bfd_boolean
783 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
784 const aarch64_opnd_info *info, aarch64_insn *code,
785 const aarch64_inst *inst,
786 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
787 {
788 /* op0:op1:CRn:CRm:op2 */
789 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
790 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
791 return TRUE;
792 }
793
794 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
795 bfd_boolean
796 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
797 const aarch64_opnd_info *info, aarch64_insn *code,
798 const aarch64_inst *inst ATTRIBUTE_UNUSED,
799 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
800 {
801 /* op1:op2 */
802 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
803 FLD_op2, FLD_op1);
804 return TRUE;
805 }
806
807 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
808 bfd_boolean
809 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
810 const aarch64_opnd_info *info, aarch64_insn *code,
811 const aarch64_inst *inst ATTRIBUTE_UNUSED,
812 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
813 {
814 /* op1:CRn:CRm:op2 */
815 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
816 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
817 return TRUE;
818 }
819
820 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
821
822 bfd_boolean
823 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
824 const aarch64_opnd_info *info, aarch64_insn *code,
825 const aarch64_inst *inst ATTRIBUTE_UNUSED,
826 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
827 {
828 /* CRm */
829 insert_field (FLD_CRm, code, info->barrier->value, 0);
830 return TRUE;
831 }
832
833 /* Encode the prefetch operation option operand for e.g.
834 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
835
836 bfd_boolean
837 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
838 const aarch64_opnd_info *info, aarch64_insn *code,
839 const aarch64_inst *inst ATTRIBUTE_UNUSED,
840 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
841 {
842 /* prfop in Rt */
843 insert_field (FLD_Rt, code, info->prfop->value, 0);
844 return TRUE;
845 }
846
847 /* Encode the hint number for instructions that alias HINT but take an
848 operand. */
849
850 bfd_boolean
851 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
852 const aarch64_opnd_info *info, aarch64_insn *code,
853 const aarch64_inst *inst ATTRIBUTE_UNUSED,
854 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
855 {
856 /* CRm:op2. */
857 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
858 return TRUE;
859 }
860
861 /* Encode the extended register operand for e.g.
862 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
863 bfd_boolean
864 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
865 const aarch64_opnd_info *info, aarch64_insn *code,
866 const aarch64_inst *inst ATTRIBUTE_UNUSED,
867 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
868 {
869 enum aarch64_modifier_kind kind;
870
871 /* Rm */
872 insert_field (FLD_Rm, code, info->reg.regno, 0);
873 /* option */
874 kind = info->shifter.kind;
875 if (kind == AARCH64_MOD_LSL)
876 kind = info->qualifier == AARCH64_OPND_QLF_W
877 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
878 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
879 /* imm3 */
880 insert_field (FLD_imm3, code, info->shifter.amount, 0);
881
882 return TRUE;
883 }
884
885 /* Encode the shifted register operand for e.g.
886 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
887 bfd_boolean
888 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
889 const aarch64_opnd_info *info, aarch64_insn *code,
890 const aarch64_inst *inst ATTRIBUTE_UNUSED,
891 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
892 {
893 /* Rm */
894 insert_field (FLD_Rm, code, info->reg.regno, 0);
895 /* shift */
896 insert_field (FLD_shift, code,
897 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
898 /* imm6 */
899 insert_field (FLD_imm6, code, info->shifter.amount, 0);
900
901 return TRUE;
902 }
903
904 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
905 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
906 SELF's operand-dependent value. fields[0] specifies the field that
907 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
908 bfd_boolean
909 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
910 const aarch64_opnd_info *info,
911 aarch64_insn *code,
912 const aarch64_inst *inst ATTRIBUTE_UNUSED,
913 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
914 {
915 int factor = 1 + get_operand_specific_data (self);
916 insert_field (self->fields[0], code, info->addr.base_regno, 0);
917 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
918 return TRUE;
919 }
920
921 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
922 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
923 SELF's operand-dependent value. fields[0] specifies the field that
924 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
925 bfd_boolean
926 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
927 const aarch64_opnd_info *info,
928 aarch64_insn *code,
929 const aarch64_inst *inst ATTRIBUTE_UNUSED,
930 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
931 {
932 int factor = 1 + get_operand_specific_data (self);
933 insert_field (self->fields[0], code, info->addr.base_regno, 0);
934 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
935 return TRUE;
936 }
937
938 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
939 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
940 SELF's operand-dependent value. fields[0] specifies the field that
941 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
942 and imm3 fields, with imm3 being the less-significant part. */
943 bfd_boolean
944 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
945 const aarch64_opnd_info *info,
946 aarch64_insn *code,
947 const aarch64_inst *inst ATTRIBUTE_UNUSED,
948 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
949 {
950 int factor = 1 + get_operand_specific_data (self);
951 insert_field (self->fields[0], code, info->addr.base_regno, 0);
952 insert_fields (code, info->addr.offset.imm / factor, 0,
953 2, FLD_imm3, FLD_SVE_imm6);
954 return TRUE;
955 }
956
957 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
958 is a 4-bit signed number and where <shift> is SELF's operand-dependent
959 value. fields[0] specifies the base register field. */
960 bfd_boolean
961 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
962 const aarch64_opnd_info *info, aarch64_insn *code,
963 const aarch64_inst *inst ATTRIBUTE_UNUSED,
964 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
965 {
966 int factor = 1 << get_operand_specific_data (self);
967 insert_field (self->fields[0], code, info->addr.base_regno, 0);
968 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
969 return TRUE;
970 }
971
972 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
973 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
974 value. fields[0] specifies the base register field. */
975 bfd_boolean
976 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
977 const aarch64_opnd_info *info, aarch64_insn *code,
978 const aarch64_inst *inst ATTRIBUTE_UNUSED,
979 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
980 {
981 int factor = 1 << get_operand_specific_data (self);
982 insert_field (self->fields[0], code, info->addr.base_regno, 0);
983 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
984 return TRUE;
985 }
986
987 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
988 is SELF's operand-dependent value. fields[0] specifies the base
989 register field and fields[1] specifies the offset register field. */
990 bfd_boolean
991 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
992 const aarch64_opnd_info *info, aarch64_insn *code,
993 const aarch64_inst *inst ATTRIBUTE_UNUSED,
994 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
995 {
996 insert_field (self->fields[0], code, info->addr.base_regno, 0);
997 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
998 return TRUE;
999 }
1000
1001 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1002 <shift> is SELF's operand-dependent value. fields[0] specifies the
1003 base register field, fields[1] specifies the offset register field and
1004 fields[2] is a single-bit field that selects SXTW over UXTW. */
1005 bfd_boolean
1006 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1007 const aarch64_opnd_info *info, aarch64_insn *code,
1008 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1009 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1010 {
1011 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1012 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1013 if (info->shifter.kind == AARCH64_MOD_UXTW)
1014 insert_field (self->fields[2], code, 0, 0);
1015 else
1016 insert_field (self->fields[2], code, 1, 0);
1017 return TRUE;
1018 }
1019
1020 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1021 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1022 fields[0] specifies the base register field. */
1023 bfd_boolean
1024 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1025 const aarch64_opnd_info *info, aarch64_insn *code,
1026 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1027 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1028 {
1029 int factor = 1 << get_operand_specific_data (self);
1030 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1031 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1032 return TRUE;
1033 }
1034
1035 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1036 where <modifier> is fixed by the instruction and where <msz> is a
1037 2-bit unsigned number. fields[0] specifies the base register field
1038 and fields[1] specifies the offset register field. */
1039 static bfd_boolean
1040 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1041 const aarch64_opnd_info *info, aarch64_insn *code,
1042 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1043 {
1044 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1045 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1046 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1047 return TRUE;
1048 }
1049
1050 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1051 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1052 field and fields[1] specifies the offset register field. */
1053 bfd_boolean
1054 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1055 const aarch64_opnd_info *info, aarch64_insn *code,
1056 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1057 aarch64_operand_error *errors)
1058 {
1059 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1060 }
1061
1062 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1063 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1064 field and fields[1] specifies the offset register field. */
1065 bfd_boolean
1066 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1067 const aarch64_opnd_info *info,
1068 aarch64_insn *code,
1069 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1070 aarch64_operand_error *errors)
1071 {
1072 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1073 }
1074
1075 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1076 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1077 field and fields[1] specifies the offset register field. */
1078 bfd_boolean
1079 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1080 const aarch64_opnd_info *info,
1081 aarch64_insn *code,
1082 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1083 aarch64_operand_error *errors)
1084 {
1085 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1086 }
1087
1088 /* Encode an SVE ADD/SUB immediate. */
1089 bfd_boolean
1090 aarch64_ins_sve_aimm (const aarch64_operand *self,
1091 const aarch64_opnd_info *info, aarch64_insn *code,
1092 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1093 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1094 {
1095 if (info->shifter.amount == 8)
1096 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1097 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1098 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1099 else
1100 insert_all_fields (self, code, info->imm.value & 0xff);
1101 return TRUE;
1102 }
1103
1104 /* Encode an SVE CPY/DUP immediate. */
1105 bfd_boolean
1106 aarch64_ins_sve_asimm (const aarch64_operand *self,
1107 const aarch64_opnd_info *info, aarch64_insn *code,
1108 const aarch64_inst *inst,
1109 aarch64_operand_error *errors)
1110 {
1111 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1112 }
1113
1114 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1115 array specifies which field to use for Zn. MM is encoded in the
1116 concatenation of imm5 and SVE_tszh, with imm5 being the less
1117 significant part. */
1118 bfd_boolean
1119 aarch64_ins_sve_index (const aarch64_operand *self,
1120 const aarch64_opnd_info *info, aarch64_insn *code,
1121 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1122 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1123 {
1124 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1125 insert_field (self->fields[0], code, info->reglane.regno, 0);
1126 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1127 2, FLD_imm5, FLD_SVE_tszh);
1128 return TRUE;
1129 }
1130
1131 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1132 bfd_boolean
1133 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1134 const aarch64_opnd_info *info, aarch64_insn *code,
1135 const aarch64_inst *inst,
1136 aarch64_operand_error *errors)
1137 {
1138 return aarch64_ins_limm (self, info, code, inst, errors);
1139 }
1140
1141 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1142 and where MM occupies the most-significant part. The operand-dependent
1143 value specifies the number of bits in Zn. */
1144 bfd_boolean
1145 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1146 const aarch64_opnd_info *info, aarch64_insn *code,
1147 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1148 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1149 {
1150 unsigned int reg_bits = get_operand_specific_data (self);
1151 assert (info->reglane.regno < (1U << reg_bits));
1152 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1153 insert_all_fields (self, code, val);
1154 return TRUE;
1155 }
1156
1157 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1158 to use for Zn. */
1159 bfd_boolean
1160 aarch64_ins_sve_reglist (const aarch64_operand *self,
1161 const aarch64_opnd_info *info, aarch64_insn *code,
1162 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1163 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1164 {
1165 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1166 return TRUE;
1167 }
1168
1169 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1170 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1171 field. */
1172 bfd_boolean
1173 aarch64_ins_sve_scale (const aarch64_operand *self,
1174 const aarch64_opnd_info *info, aarch64_insn *code,
1175 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1176 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1177 {
1178 insert_all_fields (self, code, info->imm.value);
1179 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1180 return TRUE;
1181 }
1182
1183 /* Encode an SVE shift left immediate. */
1184 bfd_boolean
1185 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1186 const aarch64_opnd_info *info, aarch64_insn *code,
1187 const aarch64_inst *inst,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1189 {
1190 const aarch64_opnd_info *prev_operand;
1191 unsigned int esize;
1192
1193 assert (info->idx > 0);
1194 prev_operand = &inst->operands[info->idx - 1];
1195 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1196 insert_all_fields (self, code, 8 * esize + info->imm.value);
1197 return TRUE;
1198 }
1199
1200 /* Encode an SVE shift right immediate. */
1201 bfd_boolean
1202 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1203 const aarch64_opnd_info *info, aarch64_insn *code,
1204 const aarch64_inst *inst,
1205 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1206 {
1207 const aarch64_opnd_info *prev_operand;
1208 unsigned int esize;
1209
1210 assert (info->idx > 0);
1211 prev_operand = &inst->operands[info->idx - 1];
1212 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1213 insert_all_fields (self, code, 16 * esize - info->imm.value);
1214 return TRUE;
1215 }
1216
1217 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1218 The fields array specifies which field to use. */
1219 bfd_boolean
1220 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1221 const aarch64_opnd_info *info,
1222 aarch64_insn *code,
1223 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1224 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1225 {
1226 if (info->imm.value == 0x3f000000)
1227 insert_field (self->fields[0], code, 0, 0);
1228 else
1229 insert_field (self->fields[0], code, 1, 0);
1230 return TRUE;
1231 }
1232
1233 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1234 The fields array specifies which field to use. */
1235 bfd_boolean
1236 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1237 const aarch64_opnd_info *info,
1238 aarch64_insn *code,
1239 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1240 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1241 {
1242 if (info->imm.value == 0x3f000000)
1243 insert_field (self->fields[0], code, 0, 0);
1244 else
1245 insert_field (self->fields[0], code, 1, 0);
1246 return TRUE;
1247 }
1248
1249 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1250 The fields array specifies which field to use. */
1251 bfd_boolean
1252 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1253 const aarch64_opnd_info *info,
1254 aarch64_insn *code,
1255 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1256 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1257 {
1258 if (info->imm.value == 0)
1259 insert_field (self->fields[0], code, 0, 0);
1260 else
1261 insert_field (self->fields[0], code, 1, 0);
1262 return TRUE;
1263 }
1264
1265 /* Miscellaneous encoding functions. */
1266
1267 /* Encode size[0], i.e. bit 22, for
1268 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1269
1270 static void
1271 encode_asimd_fcvt (aarch64_inst *inst)
1272 {
1273 aarch64_insn value;
1274 aarch64_field field = {0, 0};
1275 enum aarch64_opnd_qualifier qualifier;
1276
1277 switch (inst->opcode->op)
1278 {
1279 case OP_FCVTN:
1280 case OP_FCVTN2:
1281 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1282 qualifier = inst->operands[1].qualifier;
1283 break;
1284 case OP_FCVTL:
1285 case OP_FCVTL2:
1286 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1287 qualifier = inst->operands[0].qualifier;
1288 break;
1289 default:
1290 assert (0);
1291 }
1292 assert (qualifier == AARCH64_OPND_QLF_V_4S
1293 || qualifier == AARCH64_OPND_QLF_V_2D);
1294 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1295 gen_sub_field (FLD_size, 0, 1, &field);
1296 insert_field_2 (&field, &inst->value, value, 0);
1297 }
1298
1299 /* Encode size[0], i.e. bit 22, for
1300 e.g. FCVTXN <Vb><d>, <Va><n>. */
1301
1302 static void
1303 encode_asisd_fcvtxn (aarch64_inst *inst)
1304 {
1305 aarch64_insn val = 1;
1306 aarch64_field field = {0, 0};
1307 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1308 gen_sub_field (FLD_size, 0, 1, &field);
1309 insert_field_2 (&field, &inst->value, val, 0);
1310 }
1311
1312 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1313 static void
1314 encode_fcvt (aarch64_inst *inst)
1315 {
1316 aarch64_insn val;
1317 const aarch64_field field = {15, 2};
1318
1319 /* opc dstsize */
1320 switch (inst->operands[0].qualifier)
1321 {
1322 case AARCH64_OPND_QLF_S_S: val = 0; break;
1323 case AARCH64_OPND_QLF_S_D: val = 1; break;
1324 case AARCH64_OPND_QLF_S_H: val = 3; break;
1325 default: abort ();
1326 }
1327 insert_field_2 (&field, &inst->value, val, 0);
1328
1329 return;
1330 }
1331
1332 /* Return the index in qualifiers_list that INST is using. Should only
1333 be called once the qualifiers are known to be valid. */
1334
1335 static int
1336 aarch64_get_variant (struct aarch64_inst *inst)
1337 {
1338 int i, nops, variant;
1339
1340 nops = aarch64_num_of_operands (inst->opcode);
1341 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1342 {
1343 for (i = 0; i < nops; ++i)
1344 if (inst->opcode->qualifiers_list[variant][i]
1345 != inst->operands[i].qualifier)
1346 break;
1347 if (i == nops)
1348 return variant;
1349 }
1350 abort ();
1351 }
1352
1353 /* Do miscellaneous encodings that are not common enough to be driven by
1354 flags. */
1355
1356 static void
1357 do_misc_encoding (aarch64_inst *inst)
1358 {
1359 unsigned int value;
1360
1361 switch (inst->opcode->op)
1362 {
1363 case OP_FCVT:
1364 encode_fcvt (inst);
1365 break;
1366 case OP_FCVTN:
1367 case OP_FCVTN2:
1368 case OP_FCVTL:
1369 case OP_FCVTL2:
1370 encode_asimd_fcvt (inst);
1371 break;
1372 case OP_FCVTXN_S:
1373 encode_asisd_fcvtxn (inst);
1374 break;
1375 case OP_MOV_P_P:
1376 case OP_MOVS_P_P:
1377 /* Copy Pn to Pm and Pg. */
1378 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1379 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1380 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1381 break;
1382 case OP_MOV_Z_P_Z:
1383 /* Copy Zd to Zm. */
1384 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1385 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1386 break;
1387 case OP_MOV_Z_V:
1388 /* Fill in the zero immediate. */
1389 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1390 2, FLD_imm5, FLD_SVE_tszh);
1391 break;
1392 case OP_MOV_Z_Z:
1393 /* Copy Zn to Zm. */
1394 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1395 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1396 break;
1397 case OP_MOV_Z_Zi:
1398 break;
1399 case OP_MOVM_P_P_P:
1400 /* Copy Pd to Pm. */
1401 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1402 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1403 break;
1404 case OP_MOVZS_P_P_P:
1405 case OP_MOVZ_P_P_P:
1406 /* Copy Pn to Pm. */
1407 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1408 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1409 break;
1410 case OP_NOTS_P_P_P_Z:
1411 case OP_NOT_P_P_P_Z:
1412 /* Copy Pg to Pm. */
1413 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1414 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1415 break;
1416 default: break;
1417 }
1418 }
1419
1420 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1421 static void
1422 encode_sizeq (aarch64_inst *inst)
1423 {
1424 aarch64_insn sizeq;
1425 enum aarch64_field_kind kind;
1426 int idx;
1427
1428 /* Get the index of the operand whose information we are going to use
1429 to encode the size and Q fields.
1430 This is deduced from the possible valid qualifier lists. */
1431 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1432 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1433 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1434 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1435 /* Q */
1436 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1437 /* size */
1438 if (inst->opcode->iclass == asisdlse
1439 || inst->opcode->iclass == asisdlsep
1440 || inst->opcode->iclass == asisdlso
1441 || inst->opcode->iclass == asisdlsop)
1442 kind = FLD_vldst_size;
1443 else
1444 kind = FLD_size;
1445 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1446 }
1447
1448 /* Opcodes that have fields shared by multiple operands are usually flagged
1449 with flags. In this function, we detect such flags and use the
1450 information in one of the related operands to do the encoding. The 'one'
1451 operand is not any operand but one of the operands that has the enough
1452 information for such an encoding. */
1453
1454 static void
1455 do_special_encoding (struct aarch64_inst *inst)
1456 {
1457 int idx;
1458 aarch64_insn value = 0;
1459
1460 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1461
1462 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1463 if (inst->opcode->flags & F_COND)
1464 {
1465 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1466 }
1467 if (inst->opcode->flags & F_SF)
1468 {
1469 idx = select_operand_for_sf_field_coding (inst->opcode);
1470 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1471 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1472 ? 1 : 0;
1473 insert_field (FLD_sf, &inst->value, value, 0);
1474 if (inst->opcode->flags & F_N)
1475 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1476 }
1477 if (inst->opcode->flags & F_LSE_SZ)
1478 {
1479 idx = select_operand_for_sf_field_coding (inst->opcode);
1480 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1481 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1482 ? 1 : 0;
1483 insert_field (FLD_lse_sz, &inst->value, value, 0);
1484 }
1485 if (inst->opcode->flags & F_SIZEQ)
1486 encode_sizeq (inst);
1487 if (inst->opcode->flags & F_FPTYPE)
1488 {
1489 idx = select_operand_for_fptype_field_coding (inst->opcode);
1490 switch (inst->operands[idx].qualifier)
1491 {
1492 case AARCH64_OPND_QLF_S_S: value = 0; break;
1493 case AARCH64_OPND_QLF_S_D: value = 1; break;
1494 case AARCH64_OPND_QLF_S_H: value = 3; break;
1495 default: assert (0);
1496 }
1497 insert_field (FLD_type, &inst->value, value, 0);
1498 }
1499 if (inst->opcode->flags & F_SSIZE)
1500 {
1501 enum aarch64_opnd_qualifier qualifier;
1502 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1503 qualifier = inst->operands[idx].qualifier;
1504 assert (qualifier >= AARCH64_OPND_QLF_S_B
1505 && qualifier <= AARCH64_OPND_QLF_S_Q);
1506 value = aarch64_get_qualifier_standard_value (qualifier);
1507 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1508 }
1509 if (inst->opcode->flags & F_T)
1510 {
1511 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1512 aarch64_field field = {0, 0};
1513 enum aarch64_opnd_qualifier qualifier;
1514
1515 idx = 0;
1516 qualifier = inst->operands[idx].qualifier;
1517 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1518 == AARCH64_OPND_CLASS_SIMD_REG
1519 && qualifier >= AARCH64_OPND_QLF_V_8B
1520 && qualifier <= AARCH64_OPND_QLF_V_2D);
1521 /* imm5<3:0> q <t>
1522 0000 x reserved
1523 xxx1 0 8b
1524 xxx1 1 16b
1525 xx10 0 4h
1526 xx10 1 8h
1527 x100 0 2s
1528 x100 1 4s
1529 1000 0 reserved
1530 1000 1 2d */
1531 value = aarch64_get_qualifier_standard_value (qualifier);
1532 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1533 num = (int) value >> 1;
1534 assert (num >= 0 && num <= 3);
1535 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1536 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1537 }
1538 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1539 {
1540 /* Use Rt to encode in the case of e.g.
1541 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1542 enum aarch64_opnd_qualifier qualifier;
1543 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1544 if (idx == -1)
1545 /* Otherwise use the result operand, which has to be a integer
1546 register. */
1547 idx = 0;
1548 assert (idx == 0 || idx == 1);
1549 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1550 == AARCH64_OPND_CLASS_INT_REG);
1551 qualifier = inst->operands[idx].qualifier;
1552 insert_field (FLD_Q, &inst->value,
1553 aarch64_get_qualifier_standard_value (qualifier), 0);
1554 }
1555 if (inst->opcode->flags & F_LDS_SIZE)
1556 {
1557 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1558 enum aarch64_opnd_qualifier qualifier;
1559 aarch64_field field = {0, 0};
1560 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1561 == AARCH64_OPND_CLASS_INT_REG);
1562 gen_sub_field (FLD_opc, 0, 1, &field);
1563 qualifier = inst->operands[0].qualifier;
1564 insert_field_2 (&field, &inst->value,
1565 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1566 }
1567 /* Miscellaneous encoding as the last step. */
1568 if (inst->opcode->flags & F_MISC)
1569 do_misc_encoding (inst);
1570
1571 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1572 }
1573
1574 /* Some instructions (including all SVE ones) use the instruction class
1575 to describe how a qualifiers_list index is represented in the instruction
1576 encoding. If INST is such an instruction, encode the chosen qualifier
1577 variant. */
1578
1579 static void
1580 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1581 {
1582 switch (inst->opcode->iclass)
1583 {
1584 case sve_cpy:
1585 insert_fields (&inst->value, aarch64_get_variant (inst),
1586 0, 2, FLD_SVE_M_14, FLD_size);
1587 break;
1588
1589 case sve_index:
1590 case sve_shift_pred:
1591 case sve_shift_unpred:
1592 /* For indices and shift amounts, the variant is encoded as
1593 part of the immediate. */
1594 break;
1595
1596 case sve_limm:
1597 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1598 and depend on the immediate. They don't have a separate
1599 encoding. */
1600 break;
1601
1602 case sve_misc:
1603 /* sve_misc instructions have only a single variant. */
1604 break;
1605
1606 case sve_movprfx:
1607 insert_fields (&inst->value, aarch64_get_variant (inst),
1608 0, 2, FLD_SVE_M_16, FLD_size);
1609 break;
1610
1611 case sve_pred_zm:
1612 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1613 break;
1614
1615 case sve_size_bhs:
1616 case sve_size_bhsd:
1617 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1618 break;
1619
1620 case sve_size_hsd:
1621 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1622 break;
1623
1624 case sve_size_sd:
1625 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1626 break;
1627
1628 default:
1629 break;
1630 }
1631 }
1632
1633 /* Converters converting an alias opcode instruction to its real form. */
1634
1635 /* ROR <Wd>, <Ws>, #<shift>
1636 is equivalent to:
1637 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1638 static void
1639 convert_ror_to_extr (aarch64_inst *inst)
1640 {
1641 copy_operand_info (inst, 3, 2);
1642 copy_operand_info (inst, 2, 1);
1643 }
1644
1645 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1646 is equivalent to:
1647 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1648 static void
1649 convert_xtl_to_shll (aarch64_inst *inst)
1650 {
1651 inst->operands[2].qualifier = inst->operands[1].qualifier;
1652 inst->operands[2].imm.value = 0;
1653 }
1654
1655 /* Convert
1656 LSR <Xd>, <Xn>, #<shift>
1657 to
1658 UBFM <Xd>, <Xn>, #<shift>, #63. */
1659 static void
1660 convert_sr_to_bfm (aarch64_inst *inst)
1661 {
1662 inst->operands[3].imm.value =
1663 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1664 }
1665
1666 /* Convert MOV to ORR. */
1667 static void
1668 convert_mov_to_orr (aarch64_inst *inst)
1669 {
1670 /* MOV <Vd>.<T>, <Vn>.<T>
1671 is equivalent to:
1672 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1673 copy_operand_info (inst, 2, 1);
1674 }
1675
1676 /* When <imms> >= <immr>, the instruction written:
1677 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1678 is equivalent to:
1679 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1680
1681 static void
1682 convert_bfx_to_bfm (aarch64_inst *inst)
1683 {
1684 int64_t lsb, width;
1685
1686 /* Convert the operand. */
1687 lsb = inst->operands[2].imm.value;
1688 width = inst->operands[3].imm.value;
1689 inst->operands[2].imm.value = lsb;
1690 inst->operands[3].imm.value = lsb + width - 1;
1691 }
1692
1693 /* When <imms> < <immr>, the instruction written:
1694 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1695 is equivalent to:
1696 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1697
1698 static void
1699 convert_bfi_to_bfm (aarch64_inst *inst)
1700 {
1701 int64_t lsb, width;
1702
1703 /* Convert the operand. */
1704 lsb = inst->operands[2].imm.value;
1705 width = inst->operands[3].imm.value;
1706 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1707 {
1708 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1709 inst->operands[3].imm.value = width - 1;
1710 }
1711 else
1712 {
1713 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1714 inst->operands[3].imm.value = width - 1;
1715 }
1716 }
1717
1718 /* The instruction written:
1719 BFC <Xd>, #<lsb>, #<width>
1720 is equivalent to:
1721 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
1722
1723 static void
1724 convert_bfc_to_bfm (aarch64_inst *inst)
1725 {
1726 int64_t lsb, width;
1727
1728 /* Insert XZR. */
1729 copy_operand_info (inst, 3, 2);
1730 copy_operand_info (inst, 2, 1);
1731 copy_operand_info (inst, 1, 0);
1732 inst->operands[1].reg.regno = 0x1f;
1733
1734 /* Convert the immediate operand. */
1735 lsb = inst->operands[2].imm.value;
1736 width = inst->operands[3].imm.value;
1737 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1738 {
1739 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
1740 inst->operands[3].imm.value = width - 1;
1741 }
1742 else
1743 {
1744 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
1745 inst->operands[3].imm.value = width - 1;
1746 }
1747 }
1748
1749 /* The instruction written:
1750 LSL <Xd>, <Xn>, #<shift>
1751 is equivalent to:
1752 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
1753
1754 static void
1755 convert_lsl_to_ubfm (aarch64_inst *inst)
1756 {
1757 int64_t shift = inst->operands[2].imm.value;
1758
1759 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
1760 {
1761 inst->operands[2].imm.value = (32 - shift) & 0x1f;
1762 inst->operands[3].imm.value = 31 - shift;
1763 }
1764 else
1765 {
1766 inst->operands[2].imm.value = (64 - shift) & 0x3f;
1767 inst->operands[3].imm.value = 63 - shift;
1768 }
1769 }
1770
1771 /* CINC <Wd>, <Wn>, <cond>
1772 is equivalent to:
1773 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
1774
1775 static void
1776 convert_to_csel (aarch64_inst *inst)
1777 {
1778 copy_operand_info (inst, 3, 2);
1779 copy_operand_info (inst, 2, 1);
1780 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1781 }
1782
1783 /* CSET <Wd>, <cond>
1784 is equivalent to:
1785 CSINC <Wd>, WZR, WZR, invert(<cond>). */
1786
1787 static void
1788 convert_cset_to_csinc (aarch64_inst *inst)
1789 {
1790 copy_operand_info (inst, 3, 1);
1791 copy_operand_info (inst, 2, 0);
1792 copy_operand_info (inst, 1, 0);
1793 inst->operands[1].reg.regno = 0x1f;
1794 inst->operands[2].reg.regno = 0x1f;
1795 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
1796 }
1797
1798 /* MOV <Wd>, #<imm>
1799 is equivalent to:
1800 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
1801
1802 static void
1803 convert_mov_to_movewide (aarch64_inst *inst)
1804 {
1805 int is32;
1806 uint32_t shift_amount;
1807 uint64_t value;
1808
1809 switch (inst->opcode->op)
1810 {
1811 case OP_MOV_IMM_WIDE:
1812 value = inst->operands[1].imm.value;
1813 break;
1814 case OP_MOV_IMM_WIDEN:
1815 value = ~inst->operands[1].imm.value;
1816 break;
1817 default:
1818 assert (0);
1819 }
1820 inst->operands[1].type = AARCH64_OPND_HALF;
1821 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
1822 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
1823 /* The constraint check should have guaranteed this wouldn't happen. */
1824 assert (0);
1825 value >>= shift_amount;
1826 value &= 0xffff;
1827 inst->operands[1].imm.value = value;
1828 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
1829 inst->operands[1].shifter.amount = shift_amount;
1830 }
1831
1832 /* MOV <Wd>, #<imm>
1833 is equivalent to:
1834 ORR <Wd>, WZR, #<imm>. */
1835
1836 static void
1837 convert_mov_to_movebitmask (aarch64_inst *inst)
1838 {
1839 copy_operand_info (inst, 2, 1);
1840 inst->operands[1].reg.regno = 0x1f;
1841 inst->operands[1].skip = 0;
1842 }
1843
1844 /* Some alias opcodes are assembled by being converted to their real-form. */
1845
1846 static void
1847 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
1848 {
1849 const aarch64_opcode *alias = inst->opcode;
1850
1851 if ((alias->flags & F_CONV) == 0)
1852 goto convert_to_real_return;
1853
1854 switch (alias->op)
1855 {
1856 case OP_ASR_IMM:
1857 case OP_LSR_IMM:
1858 convert_sr_to_bfm (inst);
1859 break;
1860 case OP_LSL_IMM:
1861 convert_lsl_to_ubfm (inst);
1862 break;
1863 case OP_CINC:
1864 case OP_CINV:
1865 case OP_CNEG:
1866 convert_to_csel (inst);
1867 break;
1868 case OP_CSET:
1869 case OP_CSETM:
1870 convert_cset_to_csinc (inst);
1871 break;
1872 case OP_UBFX:
1873 case OP_BFXIL:
1874 case OP_SBFX:
1875 convert_bfx_to_bfm (inst);
1876 break;
1877 case OP_SBFIZ:
1878 case OP_BFI:
1879 case OP_UBFIZ:
1880 convert_bfi_to_bfm (inst);
1881 break;
1882 case OP_BFC:
1883 convert_bfc_to_bfm (inst);
1884 break;
1885 case OP_MOV_V:
1886 convert_mov_to_orr (inst);
1887 break;
1888 case OP_MOV_IMM_WIDE:
1889 case OP_MOV_IMM_WIDEN:
1890 convert_mov_to_movewide (inst);
1891 break;
1892 case OP_MOV_IMM_LOG:
1893 convert_mov_to_movebitmask (inst);
1894 break;
1895 case OP_ROR_IMM:
1896 convert_ror_to_extr (inst);
1897 break;
1898 case OP_SXTL:
1899 case OP_SXTL2:
1900 case OP_UXTL:
1901 case OP_UXTL2:
1902 convert_xtl_to_shll (inst);
1903 break;
1904 default:
1905 break;
1906 }
1907
1908 convert_to_real_return:
1909 aarch64_replace_opcode (inst, real);
1910 }
1911
1912 /* Encode *INST_ORI of the opcode code OPCODE.
1913 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
1914 matched operand qualifier sequence in *QLF_SEQ. */
1915
1916 bfd_boolean
1917 aarch64_opcode_encode (const aarch64_opcode *opcode,
1918 const aarch64_inst *inst_ori, aarch64_insn *code,
1919 aarch64_opnd_qualifier_t *qlf_seq,
1920 aarch64_operand_error *mismatch_detail)
1921 {
1922 int i;
1923 const aarch64_opcode *aliased;
1924 aarch64_inst copy, *inst;
1925
1926 DEBUG_TRACE ("enter with %s", opcode->name);
1927
1928 /* Create a copy of *INST_ORI, so that we can do any change we want. */
1929 copy = *inst_ori;
1930 inst = &copy;
1931
1932 assert (inst->opcode == NULL || inst->opcode == opcode);
1933 if (inst->opcode == NULL)
1934 inst->opcode = opcode;
1935
1936 /* Constrain the operands.
1937 After passing this, the encoding is guaranteed to succeed. */
1938 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
1939 {
1940 DEBUG_TRACE ("FAIL since operand constraint not met");
1941 return 0;
1942 }
1943
1944 /* Get the base value.
1945 Note: this has to be before the aliasing handling below in order to
1946 get the base value from the alias opcode before we move on to the
1947 aliased opcode for encoding. */
1948 inst->value = opcode->opcode;
1949
1950 /* No need to do anything else if the opcode does not have any operand. */
1951 if (aarch64_num_of_operands (opcode) == 0)
1952 goto encoding_exit;
1953
1954 /* Assign operand indexes and check types. Also put the matched
1955 operand qualifiers in *QLF_SEQ to return. */
1956 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1957 {
1958 assert (opcode->operands[i] == inst->operands[i].type);
1959 inst->operands[i].idx = i;
1960 if (qlf_seq != NULL)
1961 *qlf_seq = inst->operands[i].qualifier;
1962 }
1963
1964 aliased = aarch64_find_real_opcode (opcode);
1965 /* If the opcode is an alias and it does not ask for direct encoding by
1966 itself, the instruction will be transformed to the form of real opcode
1967 and the encoding will be carried out using the rules for the aliased
1968 opcode. */
1969 if (aliased != NULL && (opcode->flags & F_CONV))
1970 {
1971 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
1972 aliased->name, opcode->name);
1973 /* Convert the operands to the form of the real opcode. */
1974 convert_to_real (inst, aliased);
1975 opcode = aliased;
1976 }
1977
1978 aarch64_opnd_info *info = inst->operands;
1979
1980 /* Call the inserter of each operand. */
1981 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
1982 {
1983 const aarch64_operand *opnd;
1984 enum aarch64_opnd type = opcode->operands[i];
1985 if (type == AARCH64_OPND_NIL)
1986 break;
1987 if (info->skip)
1988 {
1989 DEBUG_TRACE ("skip the incomplete operand %d", i);
1990 continue;
1991 }
1992 opnd = &aarch64_operands[type];
1993 if (operand_has_inserter (opnd)
1994 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
1995 mismatch_detail))
1996 return FALSE;
1997 }
1998
1999 /* Call opcode encoders indicated by flags. */
2000 if (opcode_has_special_coder (opcode))
2001 do_special_encoding (inst);
2002
2003 /* Possibly use the instruction class to encode the chosen qualifier
2004 variant. */
2005 aarch64_encode_variant_using_iclass (inst);
2006
2007 encoding_exit:
2008 DEBUG_TRACE ("exit with %s", opcode->name);
2009
2010 *code = inst->value;
2011
2012 return TRUE;
2013 }
This page took 0.183495 seconds and 5 git commands to generate.