gas:
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 /* Bit N indicates that an R_ARM_NONE relocation has been output for
81 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
82 emitted only once per section, to save unnecessary bloat. */
83 static unsigned int marked_pr_dependency = 0;
84
85 #endif /* OBJ_ELF */
86
87 /* Results from operand parsing worker functions. */
88
89 typedef enum
90 {
91 PARSE_OPERAND_SUCCESS,
92 PARSE_OPERAND_FAIL,
93 PARSE_OPERAND_FAIL_NO_BACKTRACK
94 } parse_operand_result;
95
96 enum arm_float_abi
97 {
98 ARM_FLOAT_ABI_HARD,
99 ARM_FLOAT_ABI_SOFTFP,
100 ARM_FLOAT_ABI_SOFT
101 };
102
103 /* Types of processor to assemble for. */
104 #ifndef CPU_DEFAULT
105 #if defined __XSCALE__
106 #define CPU_DEFAULT ARM_ARCH_XSCALE
107 #else
108 #if defined __thumb__
109 #define CPU_DEFAULT ARM_ARCH_V5T
110 #endif
111 #endif
112 #endif
113
114 #ifndef FPU_DEFAULT
115 # ifdef TE_LINUX
116 # define FPU_DEFAULT FPU_ARCH_FPA
117 # elif defined (TE_NetBSD)
118 # ifdef OBJ_ELF
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
120 # else
121 /* Legacy a.out format. */
122 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
123 # endif
124 # elif defined (TE_VXWORKS)
125 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
126 # else
127 /* For backwards compatibility, default to FPA. */
128 # define FPU_DEFAULT FPU_ARCH_FPA
129 # endif
130 #endif /* ifndef FPU_DEFAULT */
131
132 #define streq(a, b) (strcmp (a, b) == 0)
133
134 static arm_feature_set cpu_variant;
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
137
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
147
148
149 /* Variables that we set while parsing command-line options. Once all
150 options have been read we re-process these values to set the real
151 assembly flags. */
152 static const arm_feature_set *legacy_cpu = NULL;
153 static const arm_feature_set *legacy_fpu = NULL;
154
155 static const arm_feature_set *mcpu_cpu_opt = NULL;
156 static const arm_feature_set *mcpu_fpu_opt = NULL;
157 static const arm_feature_set *march_cpu_opt = NULL;
158 static const arm_feature_set *march_fpu_opt = NULL;
159 static const arm_feature_set *mfpu_opt = NULL;
160 static const arm_feature_set *object_arch = NULL;
161
162 /* Constants for known architecture features. */
163 static const arm_feature_set fpu_default = FPU_DEFAULT;
164 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
165 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
166 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
167 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
168 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
169 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
170 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
171 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
172
173 #ifdef CPU_DEFAULT
174 static const arm_feature_set cpu_default = CPU_DEFAULT;
175 #endif
176
177 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
178 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
179 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
180 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
181 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
182 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
183 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
184 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v4t_5 =
186 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
187 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
188 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
189 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
190 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
191 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
192 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
193 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
194 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
195 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
196 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
197 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
198 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
199 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
200 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
201 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
202 static const arm_feature_set arm_ext_m =
203 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
204
205 static const arm_feature_set arm_arch_any = ARM_ANY;
206 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
208 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
209
210 static const arm_feature_set arm_cext_iwmmxt2 =
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
212 static const arm_feature_set arm_cext_iwmmxt =
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
214 static const arm_feature_set arm_cext_xscale =
215 ARM_FEATURE (0, ARM_CEXT_XSCALE);
216 static const arm_feature_set arm_cext_maverick =
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
218 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
219 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
220 static const arm_feature_set fpu_vfp_ext_v1xd =
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
222 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
223 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
224 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
225 static const arm_feature_set fpu_vfp_ext_d32 =
226 ARM_FEATURE (0, FPU_VFP_EXT_D32);
227 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
228 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
229 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
230 static const arm_feature_set fpu_neon_fp16 = ARM_FEATURE (0, FPU_NEON_FP16);
231
232 static int mfloat_abi_opt = -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name[16];
237 #ifdef OBJ_ELF
238 # ifdef EABI_DEFAULT
239 static int meabi_flags = EABI_DEFAULT;
240 # else
241 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
242 # endif
243
244 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
245
246 bfd_boolean
247 arm_is_eabi (void)
248 {
249 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
250 }
251 #endif
252
253 #ifdef OBJ_ELF
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS * GOT_symbol;
256 #endif
257
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
261 instructions. */
262 static int thumb_mode = 0;
263
264 /* If unified_syntax is true, we are processing the new unified
265 ARM/Thumb syntax. Important differences from the old ARM mode:
266
267 - Immediate operands do not require a # prefix.
268 - Conditional affixes always appear at the end of the
269 instruction. (For backward compatibility, those instructions
270 that formerly had them in the middle, continue to accept them
271 there.)
272 - The IT instruction may appear, and if it does is validated
273 against subsequent conditional affixes. It does not generate
274 machine code.
275
276 Important differences from the old Thumb mode:
277
278 - Immediate operands do not require a # prefix.
279 - Most of the V6T2 instructions are only available in unified mode.
280 - The .N and .W suffixes are recognized and honored (it is an error
281 if they cannot be honored).
282 - All instructions set the flags if and only if they have an 's' affix.
283 - Conditional affixes may be used. They are validated against
284 preceding IT instructions. Unlike ARM mode, you cannot use a
285 conditional affix except in the scope of an IT instruction. */
286
287 static bfd_boolean unified_syntax = FALSE;
288
289 enum neon_el_type
290 {
291 NT_invtype,
292 NT_untyped,
293 NT_integer,
294 NT_float,
295 NT_poly,
296 NT_signed,
297 NT_unsigned
298 };
299
300 struct neon_type_el
301 {
302 enum neon_el_type type;
303 unsigned size;
304 };
305
306 #define NEON_MAX_TYPE_ELS 4
307
308 struct neon_type
309 {
310 struct neon_type_el el[NEON_MAX_TYPE_ELS];
311 unsigned elems;
312 };
313
314 struct arm_it
315 {
316 const char * error;
317 unsigned long instruction;
318 int size;
319 int size_req;
320 int cond;
321 /* "uncond_value" is set to the value in place of the conditional field in
322 unconditional versions of the instruction, or -1 if nothing is
323 appropriate. */
324 int uncond_value;
325 struct neon_type vectype;
326 /* Set to the opcode if the instruction needs relaxation.
327 Zero if the instruction is not relaxed. */
328 unsigned long relax;
329 struct
330 {
331 bfd_reloc_code_real_type type;
332 expressionS exp;
333 int pc_rel;
334 } reloc;
335
336 struct
337 {
338 unsigned reg;
339 signed int imm;
340 struct neon_type_el vectype;
341 unsigned present : 1; /* Operand present. */
342 unsigned isreg : 1; /* Operand was a register. */
343 unsigned immisreg : 1; /* .imm field is a second register. */
344 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
345 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
346 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
347 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
348 instructions. This allows us to disambiguate ARM <-> vector insns. */
349 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
350 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
351 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
352 unsigned issingle : 1; /* Operand is VFP single-precision register. */
353 unsigned hasreloc : 1; /* Operand has relocation suffix. */
354 unsigned writeback : 1; /* Operand has trailing ! */
355 unsigned preind : 1; /* Preindexed address. */
356 unsigned postind : 1; /* Postindexed address. */
357 unsigned negative : 1; /* Index register was negated. */
358 unsigned shifted : 1; /* Shift applied to operation. */
359 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
360 } operands[6];
361 };
362
363 static struct arm_it inst;
364
365 #define NUM_FLOAT_VALS 8
366
367 const char * fp_const[] =
368 {
369 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
370 };
371
372 /* Number of littlenums required to hold an extended precision number. */
373 #define MAX_LITTLENUMS 6
374
375 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
376
377 #define FAIL (-1)
378 #define SUCCESS (0)
379
380 #define SUFF_S 1
381 #define SUFF_D 2
382 #define SUFF_E 3
383 #define SUFF_P 4
384
385 #define CP_T_X 0x00008000
386 #define CP_T_Y 0x00400000
387
388 #define CONDS_BIT 0x00100000
389 #define LOAD_BIT 0x00100000
390
391 #define DOUBLE_LOAD_FLAG 0x00000001
392
393 struct asm_cond
394 {
395 const char * template;
396 unsigned long value;
397 };
398
399 #define COND_ALWAYS 0xE
400
401 struct asm_psr
402 {
403 const char *template;
404 unsigned long field;
405 };
406
407 struct asm_barrier_opt
408 {
409 const char *template;
410 unsigned long value;
411 };
412
413 /* The bit that distinguishes CPSR and SPSR. */
414 #define SPSR_BIT (1 << 22)
415
416 /* The individual PSR flag bits. */
417 #define PSR_c (1 << 16)
418 #define PSR_x (1 << 17)
419 #define PSR_s (1 << 18)
420 #define PSR_f (1 << 19)
421
422 struct reloc_entry
423 {
424 char *name;
425 bfd_reloc_code_real_type reloc;
426 };
427
428 enum vfp_reg_pos
429 {
430 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
431 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
432 };
433
434 enum vfp_ldstm_type
435 {
436 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
437 };
438
439 /* Bits for DEFINED field in neon_typed_alias. */
440 #define NTA_HASTYPE 1
441 #define NTA_HASINDEX 2
442
443 struct neon_typed_alias
444 {
445 unsigned char defined;
446 unsigned char index;
447 struct neon_type_el eltype;
448 };
449
450 /* ARM register categories. This includes coprocessor numbers and various
451 architecture extensions' registers. */
452 enum arm_reg_type
453 {
454 REG_TYPE_RN,
455 REG_TYPE_CP,
456 REG_TYPE_CN,
457 REG_TYPE_FN,
458 REG_TYPE_VFS,
459 REG_TYPE_VFD,
460 REG_TYPE_NQ,
461 REG_TYPE_VFSD,
462 REG_TYPE_NDQ,
463 REG_TYPE_NSDQ,
464 REG_TYPE_VFC,
465 REG_TYPE_MVF,
466 REG_TYPE_MVD,
467 REG_TYPE_MVFX,
468 REG_TYPE_MVDX,
469 REG_TYPE_MVAX,
470 REG_TYPE_DSPSC,
471 REG_TYPE_MMXWR,
472 REG_TYPE_MMXWC,
473 REG_TYPE_MMXWCG,
474 REG_TYPE_XSCALE,
475 };
476
477 /* Structure for a hash table entry for a register.
478 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
479 information which states whether a vector type or index is specified (for a
480 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
481 struct reg_entry
482 {
483 const char *name;
484 unsigned char number;
485 unsigned char type;
486 unsigned char builtin;
487 struct neon_typed_alias *neon;
488 };
489
490 /* Diagnostics used when we don't get a register of the expected type. */
491 const char *const reg_expected_msgs[] =
492 {
493 N_("ARM register expected"),
494 N_("bad or missing co-processor number"),
495 N_("co-processor register expected"),
496 N_("FPA register expected"),
497 N_("VFP single precision register expected"),
498 N_("VFP/Neon double precision register expected"),
499 N_("Neon quad precision register expected"),
500 N_("VFP single or double precision register expected"),
501 N_("Neon double or quad precision register expected"),
502 N_("VFP single, double or Neon quad precision register expected"),
503 N_("VFP system register expected"),
504 N_("Maverick MVF register expected"),
505 N_("Maverick MVD register expected"),
506 N_("Maverick MVFX register expected"),
507 N_("Maverick MVDX register expected"),
508 N_("Maverick MVAX register expected"),
509 N_("Maverick DSPSC register expected"),
510 N_("iWMMXt data register expected"),
511 N_("iWMMXt control register expected"),
512 N_("iWMMXt scalar register expected"),
513 N_("XScale accumulator register expected"),
514 };
515
516 /* Some well known registers that we refer to directly elsewhere. */
517 #define REG_SP 13
518 #define REG_LR 14
519 #define REG_PC 15
520
521 /* ARM instructions take 4bytes in the object file, Thumb instructions
522 take 2: */
523 #define INSN_SIZE 4
524
525 struct asm_opcode
526 {
527 /* Basic string to match. */
528 const char *template;
529
530 /* Parameters to instruction. */
531 unsigned char operands[8];
532
533 /* Conditional tag - see opcode_lookup. */
534 unsigned int tag : 4;
535
536 /* Basic instruction code. */
537 unsigned int avalue : 28;
538
539 /* Thumb-format instruction code. */
540 unsigned int tvalue;
541
542 /* Which architecture variant provides this instruction. */
543 const arm_feature_set *avariant;
544 const arm_feature_set *tvariant;
545
546 /* Function to call to encode instruction in ARM format. */
547 void (* aencode) (void);
548
549 /* Function to call to encode instruction in Thumb format. */
550 void (* tencode) (void);
551 };
552
553 /* Defines for various bits that we will want to toggle. */
554 #define INST_IMMEDIATE 0x02000000
555 #define OFFSET_REG 0x02000000
556 #define HWOFFSET_IMM 0x00400000
557 #define SHIFT_BY_REG 0x00000010
558 #define PRE_INDEX 0x01000000
559 #define INDEX_UP 0x00800000
560 #define WRITE_BACK 0x00200000
561 #define LDM_TYPE_2_OR_3 0x00400000
562 #define CPSI_MMOD 0x00020000
563
564 #define LITERAL_MASK 0xf000f000
565 #define OPCODE_MASK 0xfe1fffff
566 #define V4_STR_BIT 0x00000020
567
568 #define T2_SUBS_PC_LR 0xf3de8f00
569
570 #define DATA_OP_SHIFT 21
571
572 #define T2_OPCODE_MASK 0xfe1fffff
573 #define T2_DATA_OP_SHIFT 21
574
575 /* Codes to distinguish the arithmetic instructions. */
576 #define OPCODE_AND 0
577 #define OPCODE_EOR 1
578 #define OPCODE_SUB 2
579 #define OPCODE_RSB 3
580 #define OPCODE_ADD 4
581 #define OPCODE_ADC 5
582 #define OPCODE_SBC 6
583 #define OPCODE_RSC 7
584 #define OPCODE_TST 8
585 #define OPCODE_TEQ 9
586 #define OPCODE_CMP 10
587 #define OPCODE_CMN 11
588 #define OPCODE_ORR 12
589 #define OPCODE_MOV 13
590 #define OPCODE_BIC 14
591 #define OPCODE_MVN 15
592
593 #define T2_OPCODE_AND 0
594 #define T2_OPCODE_BIC 1
595 #define T2_OPCODE_ORR 2
596 #define T2_OPCODE_ORN 3
597 #define T2_OPCODE_EOR 4
598 #define T2_OPCODE_ADD 8
599 #define T2_OPCODE_ADC 10
600 #define T2_OPCODE_SBC 11
601 #define T2_OPCODE_SUB 13
602 #define T2_OPCODE_RSB 14
603
604 #define T_OPCODE_MUL 0x4340
605 #define T_OPCODE_TST 0x4200
606 #define T_OPCODE_CMN 0x42c0
607 #define T_OPCODE_NEG 0x4240
608 #define T_OPCODE_MVN 0x43c0
609
610 #define T_OPCODE_ADD_R3 0x1800
611 #define T_OPCODE_SUB_R3 0x1a00
612 #define T_OPCODE_ADD_HI 0x4400
613 #define T_OPCODE_ADD_ST 0xb000
614 #define T_OPCODE_SUB_ST 0xb080
615 #define T_OPCODE_ADD_SP 0xa800
616 #define T_OPCODE_ADD_PC 0xa000
617 #define T_OPCODE_ADD_I8 0x3000
618 #define T_OPCODE_SUB_I8 0x3800
619 #define T_OPCODE_ADD_I3 0x1c00
620 #define T_OPCODE_SUB_I3 0x1e00
621
622 #define T_OPCODE_ASR_R 0x4100
623 #define T_OPCODE_LSL_R 0x4080
624 #define T_OPCODE_LSR_R 0x40c0
625 #define T_OPCODE_ROR_R 0x41c0
626 #define T_OPCODE_ASR_I 0x1000
627 #define T_OPCODE_LSL_I 0x0000
628 #define T_OPCODE_LSR_I 0x0800
629
630 #define T_OPCODE_MOV_I8 0x2000
631 #define T_OPCODE_CMP_I8 0x2800
632 #define T_OPCODE_CMP_LR 0x4280
633 #define T_OPCODE_MOV_HR 0x4600
634 #define T_OPCODE_CMP_HR 0x4500
635
636 #define T_OPCODE_LDR_PC 0x4800
637 #define T_OPCODE_LDR_SP 0x9800
638 #define T_OPCODE_STR_SP 0x9000
639 #define T_OPCODE_LDR_IW 0x6800
640 #define T_OPCODE_STR_IW 0x6000
641 #define T_OPCODE_LDR_IH 0x8800
642 #define T_OPCODE_STR_IH 0x8000
643 #define T_OPCODE_LDR_IB 0x7800
644 #define T_OPCODE_STR_IB 0x7000
645 #define T_OPCODE_LDR_RW 0x5800
646 #define T_OPCODE_STR_RW 0x5000
647 #define T_OPCODE_LDR_RH 0x5a00
648 #define T_OPCODE_STR_RH 0x5200
649 #define T_OPCODE_LDR_RB 0x5c00
650 #define T_OPCODE_STR_RB 0x5400
651
652 #define T_OPCODE_PUSH 0xb400
653 #define T_OPCODE_POP 0xbc00
654
655 #define T_OPCODE_BRANCH 0xe000
656
657 #define THUMB_SIZE 2 /* Size of thumb instruction. */
658 #define THUMB_PP_PC_LR 0x0100
659 #define THUMB_LOAD_BIT 0x0800
660 #define THUMB2_LOAD_BIT 0x00100000
661
662 #define BAD_ARGS _("bad arguments to instruction")
663 #define BAD_SP _("r13 not allowed here")
664 #define BAD_PC _("r15 not allowed here")
665 #define BAD_COND _("instruction cannot be conditional")
666 #define BAD_OVERLAP _("registers may not be the same")
667 #define BAD_HIREG _("lo register required")
668 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
669 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
670 #define BAD_BRANCH _("branch must be last instruction in IT block")
671 #define BAD_NOT_IT _("instruction not allowed in IT block")
672 #define BAD_FPU _("selected FPU does not support instruction")
673
674 static struct hash_control *arm_ops_hsh;
675 static struct hash_control *arm_cond_hsh;
676 static struct hash_control *arm_shift_hsh;
677 static struct hash_control *arm_psr_hsh;
678 static struct hash_control *arm_v7m_psr_hsh;
679 static struct hash_control *arm_reg_hsh;
680 static struct hash_control *arm_reloc_hsh;
681 static struct hash_control *arm_barrier_opt_hsh;
682
683 /* Stuff needed to resolve the label ambiguity
684 As:
685 ...
686 label: <insn>
687 may differ from:
688 ...
689 label:
690 <insn> */
691
692 symbolS * last_label_seen;
693 static int label_is_thumb_function_name = FALSE;
694 \f
695 /* Literal pool structure. Held on a per-section
696 and per-sub-section basis. */
697
698 #define MAX_LITERAL_POOL_SIZE 1024
699 typedef struct literal_pool
700 {
701 expressionS literals [MAX_LITERAL_POOL_SIZE];
702 unsigned int next_free_entry;
703 unsigned int id;
704 symbolS * symbol;
705 segT section;
706 subsegT sub_section;
707 struct literal_pool * next;
708 } literal_pool;
709
710 /* Pointer to a linked list of literal pools. */
711 literal_pool * list_of_pools = NULL;
712
713 /* State variables for IT block handling. */
714 static bfd_boolean current_it_mask = 0;
715 static int current_cc;
716 \f
717 /* Pure syntax. */
718
719 /* This array holds the chars that always start a comment. If the
720 pre-processor is disabled, these aren't very useful. */
721 const char comment_chars[] = "@";
722
723 /* This array holds the chars that only start a comment at the beginning of
724 a line. If the line seems to have the form '# 123 filename'
725 .line and .file directives will appear in the pre-processed output. */
726 /* Note that input_file.c hand checks for '#' at the beginning of the
727 first line of the input file. This is because the compiler outputs
728 #NO_APP at the beginning of its output. */
729 /* Also note that comments like this one will always work. */
730 const char line_comment_chars[] = "#";
731
732 const char line_separator_chars[] = ";";
733
734 /* Chars that can be used to separate mant
735 from exp in floating point numbers. */
736 const char EXP_CHARS[] = "eE";
737
738 /* Chars that mean this number is a floating point constant. */
739 /* As in 0f12.456 */
740 /* or 0d1.2345e12 */
741
742 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
743
744 /* Prefix characters that indicate the start of an immediate
745 value. */
746 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
747
748 /* Separator character handling. */
749
750 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
751
752 static inline int
753 skip_past_char (char ** str, char c)
754 {
755 if (**str == c)
756 {
757 (*str)++;
758 return SUCCESS;
759 }
760 else
761 return FAIL;
762 }
763 #define skip_past_comma(str) skip_past_char (str, ',')
764
765 /* Arithmetic expressions (possibly involving symbols). */
766
767 /* Return TRUE if anything in the expression is a bignum. */
768
769 static int
770 walk_no_bignums (symbolS * sp)
771 {
772 if (symbol_get_value_expression (sp)->X_op == O_big)
773 return 1;
774
775 if (symbol_get_value_expression (sp)->X_add_symbol)
776 {
777 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
778 || (symbol_get_value_expression (sp)->X_op_symbol
779 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
780 }
781
782 return 0;
783 }
784
785 static int in_my_get_expression = 0;
786
787 /* Third argument to my_get_expression. */
788 #define GE_NO_PREFIX 0
789 #define GE_IMM_PREFIX 1
790 #define GE_OPT_PREFIX 2
791 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
792 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
793 #define GE_OPT_PREFIX_BIG 3
794
795 static int
796 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
797 {
798 char * save_in;
799 segT seg;
800
801 /* In unified syntax, all prefixes are optional. */
802 if (unified_syntax)
803 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
804 : GE_OPT_PREFIX;
805
806 switch (prefix_mode)
807 {
808 case GE_NO_PREFIX: break;
809 case GE_IMM_PREFIX:
810 if (!is_immediate_prefix (**str))
811 {
812 inst.error = _("immediate expression requires a # prefix");
813 return FAIL;
814 }
815 (*str)++;
816 break;
817 case GE_OPT_PREFIX:
818 case GE_OPT_PREFIX_BIG:
819 if (is_immediate_prefix (**str))
820 (*str)++;
821 break;
822 default: abort ();
823 }
824
825 memset (ep, 0, sizeof (expressionS));
826
827 save_in = input_line_pointer;
828 input_line_pointer = *str;
829 in_my_get_expression = 1;
830 seg = expression (ep);
831 in_my_get_expression = 0;
832
833 if (ep->X_op == O_illegal)
834 {
835 /* We found a bad expression in md_operand(). */
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
838 if (inst.error == NULL)
839 inst.error = _("bad expression");
840 return 1;
841 }
842
843 #ifdef OBJ_AOUT
844 if (seg != absolute_section
845 && seg != text_section
846 && seg != data_section
847 && seg != bss_section
848 && seg != undefined_section)
849 {
850 inst.error = _("bad segment");
851 *str = input_line_pointer;
852 input_line_pointer = save_in;
853 return 1;
854 }
855 #endif
856
857 /* Get rid of any bignums now, so that we don't generate an error for which
858 we can't establish a line number later on. Big numbers are never valid
859 in instructions, which is where this routine is always called. */
860 if (prefix_mode != GE_OPT_PREFIX_BIG
861 && (ep->X_op == O_big
862 || (ep->X_add_symbol
863 && (walk_no_bignums (ep->X_add_symbol)
864 || (ep->X_op_symbol
865 && walk_no_bignums (ep->X_op_symbol))))))
866 {
867 inst.error = _("invalid constant");
868 *str = input_line_pointer;
869 input_line_pointer = save_in;
870 return 1;
871 }
872
873 *str = input_line_pointer;
874 input_line_pointer = save_in;
875 return 0;
876 }
877
878 /* Turn a string in input_line_pointer into a floating point constant
879 of type TYPE, and store the appropriate bytes in *LITP. The number
880 of LITTLENUMS emitted is stored in *SIZEP. An error message is
881 returned, or NULL on OK.
882
883 Note that fp constants aren't represent in the normal way on the ARM.
884 In big endian mode, things are as expected. However, in little endian
885 mode fp constants are big-endian word-wise, and little-endian byte-wise
886 within the words. For example, (double) 1.1 in big endian mode is
887 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
888 the byte sequence 99 99 f1 3f 9a 99 99 99.
889
890 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
891
892 char *
893 md_atof (int type, char * litP, int * sizeP)
894 {
895 int prec;
896 LITTLENUM_TYPE words[MAX_LITTLENUMS];
897 char *t;
898 int i;
899
900 switch (type)
901 {
902 case 'f':
903 case 'F':
904 case 's':
905 case 'S':
906 prec = 2;
907 break;
908
909 case 'd':
910 case 'D':
911 case 'r':
912 case 'R':
913 prec = 4;
914 break;
915
916 case 'x':
917 case 'X':
918 prec = 5;
919 break;
920
921 case 'p':
922 case 'P':
923 prec = 5;
924 break;
925
926 default:
927 *sizeP = 0;
928 return _("Unrecognized or unsupported floating point constant");
929 }
930
931 t = atof_ieee (input_line_pointer, type, words);
932 if (t)
933 input_line_pointer = t;
934 *sizeP = prec * sizeof (LITTLENUM_TYPE);
935
936 if (target_big_endian)
937 {
938 for (i = 0; i < prec; i++)
939 {
940 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
941 litP += sizeof (LITTLENUM_TYPE);
942 }
943 }
944 else
945 {
946 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
947 for (i = prec - 1; i >= 0; i--)
948 {
949 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
950 litP += sizeof (LITTLENUM_TYPE);
951 }
952 else
953 /* For a 4 byte float the order of elements in `words' is 1 0.
954 For an 8 byte float the order is 1 0 3 2. */
955 for (i = 0; i < prec; i += 2)
956 {
957 md_number_to_chars (litP, (valueT) words[i + 1],
958 sizeof (LITTLENUM_TYPE));
959 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
960 (valueT) words[i], sizeof (LITTLENUM_TYPE));
961 litP += 2 * sizeof (LITTLENUM_TYPE);
962 }
963 }
964
965 return NULL;
966 }
967
968 /* We handle all bad expressions here, so that we can report the faulty
969 instruction in the error message. */
970 void
971 md_operand (expressionS * expr)
972 {
973 if (in_my_get_expression)
974 expr->X_op = O_illegal;
975 }
976
977 /* Immediate values. */
978
979 /* Generic immediate-value read function for use in directives.
980 Accepts anything that 'expression' can fold to a constant.
981 *val receives the number. */
982 #ifdef OBJ_ELF
983 static int
984 immediate_for_directive (int *val)
985 {
986 expressionS exp;
987 exp.X_op = O_illegal;
988
989 if (is_immediate_prefix (*input_line_pointer))
990 {
991 input_line_pointer++;
992 expression (&exp);
993 }
994
995 if (exp.X_op != O_constant)
996 {
997 as_bad (_("expected #constant"));
998 ignore_rest_of_line ();
999 return FAIL;
1000 }
1001 *val = exp.X_add_number;
1002 return SUCCESS;
1003 }
1004 #endif
1005
1006 /* Register parsing. */
1007
1008 /* Generic register parser. CCP points to what should be the
1009 beginning of a register name. If it is indeed a valid register
1010 name, advance CCP over it and return the reg_entry structure;
1011 otherwise return NULL. Does not issue diagnostics. */
1012
1013 static struct reg_entry *
1014 arm_reg_parse_multi (char **ccp)
1015 {
1016 char *start = *ccp;
1017 char *p;
1018 struct reg_entry *reg;
1019
1020 #ifdef REGISTER_PREFIX
1021 if (*start != REGISTER_PREFIX)
1022 return NULL;
1023 start++;
1024 #endif
1025 #ifdef OPTIONAL_REGISTER_PREFIX
1026 if (*start == OPTIONAL_REGISTER_PREFIX)
1027 start++;
1028 #endif
1029
1030 p = start;
1031 if (!ISALPHA (*p) || !is_name_beginner (*p))
1032 return NULL;
1033
1034 do
1035 p++;
1036 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1037
1038 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1039
1040 if (!reg)
1041 return NULL;
1042
1043 *ccp = p;
1044 return reg;
1045 }
1046
1047 static int
1048 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1049 enum arm_reg_type type)
1050 {
1051 /* Alternative syntaxes are accepted for a few register classes. */
1052 switch (type)
1053 {
1054 case REG_TYPE_MVF:
1055 case REG_TYPE_MVD:
1056 case REG_TYPE_MVFX:
1057 case REG_TYPE_MVDX:
1058 /* Generic coprocessor register names are allowed for these. */
1059 if (reg && reg->type == REG_TYPE_CN)
1060 return reg->number;
1061 break;
1062
1063 case REG_TYPE_CP:
1064 /* For backward compatibility, a bare number is valid here. */
1065 {
1066 unsigned long processor = strtoul (start, ccp, 10);
1067 if (*ccp != start && processor <= 15)
1068 return processor;
1069 }
1070
1071 case REG_TYPE_MMXWC:
1072 /* WC includes WCG. ??? I'm not sure this is true for all
1073 instructions that take WC registers. */
1074 if (reg && reg->type == REG_TYPE_MMXWCG)
1075 return reg->number;
1076 break;
1077
1078 default:
1079 break;
1080 }
1081
1082 return FAIL;
1083 }
1084
1085 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1086 return value is the register number or FAIL. */
1087
1088 static int
1089 arm_reg_parse (char **ccp, enum arm_reg_type type)
1090 {
1091 char *start = *ccp;
1092 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1093 int ret;
1094
1095 /* Do not allow a scalar (reg+index) to parse as a register. */
1096 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1097 return FAIL;
1098
1099 if (reg && reg->type == type)
1100 return reg->number;
1101
1102 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1103 return ret;
1104
1105 *ccp = start;
1106 return FAIL;
1107 }
1108
1109 /* Parse a Neon type specifier. *STR should point at the leading '.'
1110 character. Does no verification at this stage that the type fits the opcode
1111 properly. E.g.,
1112
1113 .i32.i32.s16
1114 .s32.f32
1115 .u16
1116
1117 Can all be legally parsed by this function.
1118
1119 Fills in neon_type struct pointer with parsed information, and updates STR
1120 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1121 type, FAIL if not. */
1122
1123 static int
1124 parse_neon_type (struct neon_type *type, char **str)
1125 {
1126 char *ptr = *str;
1127
1128 if (type)
1129 type->elems = 0;
1130
1131 while (type->elems < NEON_MAX_TYPE_ELS)
1132 {
1133 enum neon_el_type thistype = NT_untyped;
1134 unsigned thissize = -1u;
1135
1136 if (*ptr != '.')
1137 break;
1138
1139 ptr++;
1140
1141 /* Just a size without an explicit type. */
1142 if (ISDIGIT (*ptr))
1143 goto parsesize;
1144
1145 switch (TOLOWER (*ptr))
1146 {
1147 case 'i': thistype = NT_integer; break;
1148 case 'f': thistype = NT_float; break;
1149 case 'p': thistype = NT_poly; break;
1150 case 's': thistype = NT_signed; break;
1151 case 'u': thistype = NT_unsigned; break;
1152 case 'd':
1153 thistype = NT_float;
1154 thissize = 64;
1155 ptr++;
1156 goto done;
1157 default:
1158 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1159 return FAIL;
1160 }
1161
1162 ptr++;
1163
1164 /* .f is an abbreviation for .f32. */
1165 if (thistype == NT_float && !ISDIGIT (*ptr))
1166 thissize = 32;
1167 else
1168 {
1169 parsesize:
1170 thissize = strtoul (ptr, &ptr, 10);
1171
1172 if (thissize != 8 && thissize != 16 && thissize != 32
1173 && thissize != 64)
1174 {
1175 as_bad (_("bad size %d in type specifier"), thissize);
1176 return FAIL;
1177 }
1178 }
1179
1180 done:
1181 if (type)
1182 {
1183 type->el[type->elems].type = thistype;
1184 type->el[type->elems].size = thissize;
1185 type->elems++;
1186 }
1187 }
1188
1189 /* Empty/missing type is not a successful parse. */
1190 if (type->elems == 0)
1191 return FAIL;
1192
1193 *str = ptr;
1194
1195 return SUCCESS;
1196 }
1197
1198 /* Errors may be set multiple times during parsing or bit encoding
1199 (particularly in the Neon bits), but usually the earliest error which is set
1200 will be the most meaningful. Avoid overwriting it with later (cascading)
1201 errors by calling this function. */
1202
1203 static void
1204 first_error (const char *err)
1205 {
1206 if (!inst.error)
1207 inst.error = err;
1208 }
1209
1210 /* Parse a single type, e.g. ".s32", leading period included. */
1211 static int
1212 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1213 {
1214 char *str = *ccp;
1215 struct neon_type optype;
1216
1217 if (*str == '.')
1218 {
1219 if (parse_neon_type (&optype, &str) == SUCCESS)
1220 {
1221 if (optype.elems == 1)
1222 *vectype = optype.el[0];
1223 else
1224 {
1225 first_error (_("only one type should be specified for operand"));
1226 return FAIL;
1227 }
1228 }
1229 else
1230 {
1231 first_error (_("vector type expected"));
1232 return FAIL;
1233 }
1234 }
1235 else
1236 return FAIL;
1237
1238 *ccp = str;
1239
1240 return SUCCESS;
1241 }
1242
1243 /* Special meanings for indices (which have a range of 0-7), which will fit into
1244 a 4-bit integer. */
1245
1246 #define NEON_ALL_LANES 15
1247 #define NEON_INTERLEAVE_LANES 14
1248
1249 /* Parse either a register or a scalar, with an optional type. Return the
1250 register number, and optionally fill in the actual type of the register
1251 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1252 type/index information in *TYPEINFO. */
1253
1254 static int
1255 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1256 enum arm_reg_type *rtype,
1257 struct neon_typed_alias *typeinfo)
1258 {
1259 char *str = *ccp;
1260 struct reg_entry *reg = arm_reg_parse_multi (&str);
1261 struct neon_typed_alias atype;
1262 struct neon_type_el parsetype;
1263
1264 atype.defined = 0;
1265 atype.index = -1;
1266 atype.eltype.type = NT_invtype;
1267 atype.eltype.size = -1;
1268
1269 /* Try alternate syntax for some types of register. Note these are mutually
1270 exclusive with the Neon syntax extensions. */
1271 if (reg == NULL)
1272 {
1273 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1274 if (altreg != FAIL)
1275 *ccp = str;
1276 if (typeinfo)
1277 *typeinfo = atype;
1278 return altreg;
1279 }
1280
1281 /* Undo polymorphism when a set of register types may be accepted. */
1282 if ((type == REG_TYPE_NDQ
1283 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1284 || (type == REG_TYPE_VFSD
1285 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1286 || (type == REG_TYPE_NSDQ
1287 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1288 || reg->type == REG_TYPE_NQ))
1289 || (type == REG_TYPE_MMXWC
1290 && (reg->type == REG_TYPE_MMXWCG)))
1291 type = reg->type;
1292
1293 if (type != reg->type)
1294 return FAIL;
1295
1296 if (reg->neon)
1297 atype = *reg->neon;
1298
1299 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1300 {
1301 if ((atype.defined & NTA_HASTYPE) != 0)
1302 {
1303 first_error (_("can't redefine type for operand"));
1304 return FAIL;
1305 }
1306 atype.defined |= NTA_HASTYPE;
1307 atype.eltype = parsetype;
1308 }
1309
1310 if (skip_past_char (&str, '[') == SUCCESS)
1311 {
1312 if (type != REG_TYPE_VFD)
1313 {
1314 first_error (_("only D registers may be indexed"));
1315 return FAIL;
1316 }
1317
1318 if ((atype.defined & NTA_HASINDEX) != 0)
1319 {
1320 first_error (_("can't change index for operand"));
1321 return FAIL;
1322 }
1323
1324 atype.defined |= NTA_HASINDEX;
1325
1326 if (skip_past_char (&str, ']') == SUCCESS)
1327 atype.index = NEON_ALL_LANES;
1328 else
1329 {
1330 expressionS exp;
1331
1332 my_get_expression (&exp, &str, GE_NO_PREFIX);
1333
1334 if (exp.X_op != O_constant)
1335 {
1336 first_error (_("constant expression required"));
1337 return FAIL;
1338 }
1339
1340 if (skip_past_char (&str, ']') == FAIL)
1341 return FAIL;
1342
1343 atype.index = exp.X_add_number;
1344 }
1345 }
1346
1347 if (typeinfo)
1348 *typeinfo = atype;
1349
1350 if (rtype)
1351 *rtype = type;
1352
1353 *ccp = str;
1354
1355 return reg->number;
1356 }
1357
1358 /* Like arm_reg_parse, but allow allow the following extra features:
1359 - If RTYPE is non-zero, return the (possibly restricted) type of the
1360 register (e.g. Neon double or quad reg when either has been requested).
1361 - If this is a Neon vector type with additional type information, fill
1362 in the struct pointed to by VECTYPE (if non-NULL).
1363 This function will fault on encountering a scalar. */
1364
1365 static int
1366 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1367 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1368 {
1369 struct neon_typed_alias atype;
1370 char *str = *ccp;
1371 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1372
1373 if (reg == FAIL)
1374 return FAIL;
1375
1376 /* Do not allow a scalar (reg+index) to parse as a register. */
1377 if ((atype.defined & NTA_HASINDEX) != 0)
1378 {
1379 first_error (_("register operand expected, but got scalar"));
1380 return FAIL;
1381 }
1382
1383 if (vectype)
1384 *vectype = atype.eltype;
1385
1386 *ccp = str;
1387
1388 return reg;
1389 }
1390
1391 #define NEON_SCALAR_REG(X) ((X) >> 4)
1392 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1393
1394 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1395 have enough information to be able to do a good job bounds-checking. So, we
1396 just do easy checks here, and do further checks later. */
1397
1398 static int
1399 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1400 {
1401 int reg;
1402 char *str = *ccp;
1403 struct neon_typed_alias atype;
1404
1405 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1406
1407 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1408 return FAIL;
1409
1410 if (atype.index == NEON_ALL_LANES)
1411 {
1412 first_error (_("scalar must have an index"));
1413 return FAIL;
1414 }
1415 else if (atype.index >= 64 / elsize)
1416 {
1417 first_error (_("scalar index out of range"));
1418 return FAIL;
1419 }
1420
1421 if (type)
1422 *type = atype.eltype;
1423
1424 *ccp = str;
1425
1426 return reg * 16 + atype.index;
1427 }
1428
1429 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1430 static long
1431 parse_reg_list (char ** strp)
1432 {
1433 char * str = * strp;
1434 long range = 0;
1435 int another_range;
1436
1437 /* We come back here if we get ranges concatenated by '+' or '|'. */
1438 do
1439 {
1440 another_range = 0;
1441
1442 if (*str == '{')
1443 {
1444 int in_range = 0;
1445 int cur_reg = -1;
1446
1447 str++;
1448 do
1449 {
1450 int reg;
1451
1452 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1453 {
1454 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1455 return FAIL;
1456 }
1457
1458 if (in_range)
1459 {
1460 int i;
1461
1462 if (reg <= cur_reg)
1463 {
1464 first_error (_("bad range in register list"));
1465 return FAIL;
1466 }
1467
1468 for (i = cur_reg + 1; i < reg; i++)
1469 {
1470 if (range & (1 << i))
1471 as_tsktsk
1472 (_("Warning: duplicated register (r%d) in register list"),
1473 i);
1474 else
1475 range |= 1 << i;
1476 }
1477 in_range = 0;
1478 }
1479
1480 if (range & (1 << reg))
1481 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1482 reg);
1483 else if (reg <= cur_reg)
1484 as_tsktsk (_("Warning: register range not in ascending order"));
1485
1486 range |= 1 << reg;
1487 cur_reg = reg;
1488 }
1489 while (skip_past_comma (&str) != FAIL
1490 || (in_range = 1, *str++ == '-'));
1491 str--;
1492
1493 if (*str++ != '}')
1494 {
1495 first_error (_("missing `}'"));
1496 return FAIL;
1497 }
1498 }
1499 else
1500 {
1501 expressionS expr;
1502
1503 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1504 return FAIL;
1505
1506 if (expr.X_op == O_constant)
1507 {
1508 if (expr.X_add_number
1509 != (expr.X_add_number & 0x0000ffff))
1510 {
1511 inst.error = _("invalid register mask");
1512 return FAIL;
1513 }
1514
1515 if ((range & expr.X_add_number) != 0)
1516 {
1517 int regno = range & expr.X_add_number;
1518
1519 regno &= -regno;
1520 regno = (1 << regno) - 1;
1521 as_tsktsk
1522 (_("Warning: duplicated register (r%d) in register list"),
1523 regno);
1524 }
1525
1526 range |= expr.X_add_number;
1527 }
1528 else
1529 {
1530 if (inst.reloc.type != 0)
1531 {
1532 inst.error = _("expression too complex");
1533 return FAIL;
1534 }
1535
1536 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1537 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1538 inst.reloc.pc_rel = 0;
1539 }
1540 }
1541
1542 if (*str == '|' || *str == '+')
1543 {
1544 str++;
1545 another_range = 1;
1546 }
1547 }
1548 while (another_range);
1549
1550 *strp = str;
1551 return range;
1552 }
1553
1554 /* Types of registers in a list. */
1555
1556 enum reg_list_els
1557 {
1558 REGLIST_VFP_S,
1559 REGLIST_VFP_D,
1560 REGLIST_NEON_D
1561 };
1562
1563 /* Parse a VFP register list. If the string is invalid return FAIL.
1564 Otherwise return the number of registers, and set PBASE to the first
1565 register. Parses registers of type ETYPE.
1566 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1567 - Q registers can be used to specify pairs of D registers
1568 - { } can be omitted from around a singleton register list
1569 FIXME: This is not implemented, as it would require backtracking in
1570 some cases, e.g.:
1571 vtbl.8 d3,d4,d5
1572 This could be done (the meaning isn't really ambiguous), but doesn't
1573 fit in well with the current parsing framework.
1574 - 32 D registers may be used (also true for VFPv3).
1575 FIXME: Types are ignored in these register lists, which is probably a
1576 bug. */
1577
1578 static int
1579 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1580 {
1581 char *str = *ccp;
1582 int base_reg;
1583 int new_base;
1584 enum arm_reg_type regtype = 0;
1585 int max_regs = 0;
1586 int count = 0;
1587 int warned = 0;
1588 unsigned long mask = 0;
1589 int i;
1590
1591 if (*str != '{')
1592 {
1593 inst.error = _("expecting {");
1594 return FAIL;
1595 }
1596
1597 str++;
1598
1599 switch (etype)
1600 {
1601 case REGLIST_VFP_S:
1602 regtype = REG_TYPE_VFS;
1603 max_regs = 32;
1604 break;
1605
1606 case REGLIST_VFP_D:
1607 regtype = REG_TYPE_VFD;
1608 break;
1609
1610 case REGLIST_NEON_D:
1611 regtype = REG_TYPE_NDQ;
1612 break;
1613 }
1614
1615 if (etype != REGLIST_VFP_S)
1616 {
1617 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1618 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1619 {
1620 max_regs = 32;
1621 if (thumb_mode)
1622 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1623 fpu_vfp_ext_d32);
1624 else
1625 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1626 fpu_vfp_ext_d32);
1627 }
1628 else
1629 max_regs = 16;
1630 }
1631
1632 base_reg = max_regs;
1633
1634 do
1635 {
1636 int setmask = 1, addregs = 1;
1637
1638 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1639
1640 if (new_base == FAIL)
1641 {
1642 first_error (_(reg_expected_msgs[regtype]));
1643 return FAIL;
1644 }
1645
1646 if (new_base >= max_regs)
1647 {
1648 first_error (_("register out of range in list"));
1649 return FAIL;
1650 }
1651
1652 /* Note: a value of 2 * n is returned for the register Q<n>. */
1653 if (regtype == REG_TYPE_NQ)
1654 {
1655 setmask = 3;
1656 addregs = 2;
1657 }
1658
1659 if (new_base < base_reg)
1660 base_reg = new_base;
1661
1662 if (mask & (setmask << new_base))
1663 {
1664 first_error (_("invalid register list"));
1665 return FAIL;
1666 }
1667
1668 if ((mask >> new_base) != 0 && ! warned)
1669 {
1670 as_tsktsk (_("register list not in ascending order"));
1671 warned = 1;
1672 }
1673
1674 mask |= setmask << new_base;
1675 count += addregs;
1676
1677 if (*str == '-') /* We have the start of a range expression */
1678 {
1679 int high_range;
1680
1681 str++;
1682
1683 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1684 == FAIL)
1685 {
1686 inst.error = gettext (reg_expected_msgs[regtype]);
1687 return FAIL;
1688 }
1689
1690 if (high_range >= max_regs)
1691 {
1692 first_error (_("register out of range in list"));
1693 return FAIL;
1694 }
1695
1696 if (regtype == REG_TYPE_NQ)
1697 high_range = high_range + 1;
1698
1699 if (high_range <= new_base)
1700 {
1701 inst.error = _("register range not in ascending order");
1702 return FAIL;
1703 }
1704
1705 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1706 {
1707 if (mask & (setmask << new_base))
1708 {
1709 inst.error = _("invalid register list");
1710 return FAIL;
1711 }
1712
1713 mask |= setmask << new_base;
1714 count += addregs;
1715 }
1716 }
1717 }
1718 while (skip_past_comma (&str) != FAIL);
1719
1720 str++;
1721
1722 /* Sanity check -- should have raised a parse error above. */
1723 if (count == 0 || count > max_regs)
1724 abort ();
1725
1726 *pbase = base_reg;
1727
1728 /* Final test -- the registers must be consecutive. */
1729 mask >>= base_reg;
1730 for (i = 0; i < count; i++)
1731 {
1732 if ((mask & (1u << i)) == 0)
1733 {
1734 inst.error = _("non-contiguous register range");
1735 return FAIL;
1736 }
1737 }
1738
1739 *ccp = str;
1740
1741 return count;
1742 }
1743
1744 /* True if two alias types are the same. */
1745
1746 static int
1747 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1748 {
1749 if (!a && !b)
1750 return 1;
1751
1752 if (!a || !b)
1753 return 0;
1754
1755 if (a->defined != b->defined)
1756 return 0;
1757
1758 if ((a->defined & NTA_HASTYPE) != 0
1759 && (a->eltype.type != b->eltype.type
1760 || a->eltype.size != b->eltype.size))
1761 return 0;
1762
1763 if ((a->defined & NTA_HASINDEX) != 0
1764 && (a->index != b->index))
1765 return 0;
1766
1767 return 1;
1768 }
1769
1770 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1771 The base register is put in *PBASE.
1772 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1773 the return value.
1774 The register stride (minus one) is put in bit 4 of the return value.
1775 Bits [6:5] encode the list length (minus one).
1776 The type of the list elements is put in *ELTYPE, if non-NULL. */
1777
1778 #define NEON_LANE(X) ((X) & 0xf)
1779 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1780 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1781
1782 static int
1783 parse_neon_el_struct_list (char **str, unsigned *pbase,
1784 struct neon_type_el *eltype)
1785 {
1786 char *ptr = *str;
1787 int base_reg = -1;
1788 int reg_incr = -1;
1789 int count = 0;
1790 int lane = -1;
1791 int leading_brace = 0;
1792 enum arm_reg_type rtype = REG_TYPE_NDQ;
1793 int addregs = 1;
1794 const char *const incr_error = "register stride must be 1 or 2";
1795 const char *const type_error = "mismatched element/structure types in list";
1796 struct neon_typed_alias firsttype;
1797
1798 if (skip_past_char (&ptr, '{') == SUCCESS)
1799 leading_brace = 1;
1800
1801 do
1802 {
1803 struct neon_typed_alias atype;
1804 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1805
1806 if (getreg == FAIL)
1807 {
1808 first_error (_(reg_expected_msgs[rtype]));
1809 return FAIL;
1810 }
1811
1812 if (base_reg == -1)
1813 {
1814 base_reg = getreg;
1815 if (rtype == REG_TYPE_NQ)
1816 {
1817 reg_incr = 1;
1818 addregs = 2;
1819 }
1820 firsttype = atype;
1821 }
1822 else if (reg_incr == -1)
1823 {
1824 reg_incr = getreg - base_reg;
1825 if (reg_incr < 1 || reg_incr > 2)
1826 {
1827 first_error (_(incr_error));
1828 return FAIL;
1829 }
1830 }
1831 else if (getreg != base_reg + reg_incr * count)
1832 {
1833 first_error (_(incr_error));
1834 return FAIL;
1835 }
1836
1837 if (!neon_alias_types_same (&atype, &firsttype))
1838 {
1839 first_error (_(type_error));
1840 return FAIL;
1841 }
1842
1843 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1844 modes. */
1845 if (ptr[0] == '-')
1846 {
1847 struct neon_typed_alias htype;
1848 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1849 if (lane == -1)
1850 lane = NEON_INTERLEAVE_LANES;
1851 else if (lane != NEON_INTERLEAVE_LANES)
1852 {
1853 first_error (_(type_error));
1854 return FAIL;
1855 }
1856 if (reg_incr == -1)
1857 reg_incr = 1;
1858 else if (reg_incr != 1)
1859 {
1860 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1861 return FAIL;
1862 }
1863 ptr++;
1864 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1865 if (hireg == FAIL)
1866 {
1867 first_error (_(reg_expected_msgs[rtype]));
1868 return FAIL;
1869 }
1870 if (!neon_alias_types_same (&htype, &firsttype))
1871 {
1872 first_error (_(type_error));
1873 return FAIL;
1874 }
1875 count += hireg + dregs - getreg;
1876 continue;
1877 }
1878
1879 /* If we're using Q registers, we can't use [] or [n] syntax. */
1880 if (rtype == REG_TYPE_NQ)
1881 {
1882 count += 2;
1883 continue;
1884 }
1885
1886 if ((atype.defined & NTA_HASINDEX) != 0)
1887 {
1888 if (lane == -1)
1889 lane = atype.index;
1890 else if (lane != atype.index)
1891 {
1892 first_error (_(type_error));
1893 return FAIL;
1894 }
1895 }
1896 else if (lane == -1)
1897 lane = NEON_INTERLEAVE_LANES;
1898 else if (lane != NEON_INTERLEAVE_LANES)
1899 {
1900 first_error (_(type_error));
1901 return FAIL;
1902 }
1903 count++;
1904 }
1905 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1906
1907 /* No lane set by [x]. We must be interleaving structures. */
1908 if (lane == -1)
1909 lane = NEON_INTERLEAVE_LANES;
1910
1911 /* Sanity check. */
1912 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1913 || (count > 1 && reg_incr == -1))
1914 {
1915 first_error (_("error parsing element/structure list"));
1916 return FAIL;
1917 }
1918
1919 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1920 {
1921 first_error (_("expected }"));
1922 return FAIL;
1923 }
1924
1925 if (reg_incr == -1)
1926 reg_incr = 1;
1927
1928 if (eltype)
1929 *eltype = firsttype.eltype;
1930
1931 *pbase = base_reg;
1932 *str = ptr;
1933
1934 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1935 }
1936
1937 /* Parse an explicit relocation suffix on an expression. This is
1938 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1939 arm_reloc_hsh contains no entries, so this function can only
1940 succeed if there is no () after the word. Returns -1 on error,
1941 BFD_RELOC_UNUSED if there wasn't any suffix. */
1942 static int
1943 parse_reloc (char **str)
1944 {
1945 struct reloc_entry *r;
1946 char *p, *q;
1947
1948 if (**str != '(')
1949 return BFD_RELOC_UNUSED;
1950
1951 p = *str + 1;
1952 q = p;
1953
1954 while (*q && *q != ')' && *q != ',')
1955 q++;
1956 if (*q != ')')
1957 return -1;
1958
1959 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1960 return -1;
1961
1962 *str = q + 1;
1963 return r->reloc;
1964 }
1965
1966 /* Directives: register aliases. */
1967
1968 static struct reg_entry *
1969 insert_reg_alias (char *str, int number, int type)
1970 {
1971 struct reg_entry *new;
1972 const char *name;
1973
1974 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1975 {
1976 if (new->builtin)
1977 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1978
1979 /* Only warn about a redefinition if it's not defined as the
1980 same register. */
1981 else if (new->number != number || new->type != type)
1982 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1983
1984 return NULL;
1985 }
1986
1987 name = xstrdup (str);
1988 new = xmalloc (sizeof (struct reg_entry));
1989
1990 new->name = name;
1991 new->number = number;
1992 new->type = type;
1993 new->builtin = FALSE;
1994 new->neon = NULL;
1995
1996 if (hash_insert (arm_reg_hsh, name, (void *) new))
1997 abort ();
1998
1999 return new;
2000 }
2001
2002 static void
2003 insert_neon_reg_alias (char *str, int number, int type,
2004 struct neon_typed_alias *atype)
2005 {
2006 struct reg_entry *reg = insert_reg_alias (str, number, type);
2007
2008 if (!reg)
2009 {
2010 first_error (_("attempt to redefine typed alias"));
2011 return;
2012 }
2013
2014 if (atype)
2015 {
2016 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2017 *reg->neon = *atype;
2018 }
2019 }
2020
2021 /* Look for the .req directive. This is of the form:
2022
2023 new_register_name .req existing_register_name
2024
2025 If we find one, or if it looks sufficiently like one that we want to
2026 handle any error here, return TRUE. Otherwise return FALSE. */
2027
2028 static bfd_boolean
2029 create_register_alias (char * newname, char *p)
2030 {
2031 struct reg_entry *old;
2032 char *oldname, *nbuf;
2033 size_t nlen;
2034
2035 /* The input scrubber ensures that whitespace after the mnemonic is
2036 collapsed to single spaces. */
2037 oldname = p;
2038 if (strncmp (oldname, " .req ", 6) != 0)
2039 return FALSE;
2040
2041 oldname += 6;
2042 if (*oldname == '\0')
2043 return FALSE;
2044
2045 old = hash_find (arm_reg_hsh, oldname);
2046 if (!old)
2047 {
2048 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2049 return TRUE;
2050 }
2051
2052 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2053 the desired alias name, and p points to its end. If not, then
2054 the desired alias name is in the global original_case_string. */
2055 #ifdef TC_CASE_SENSITIVE
2056 nlen = p - newname;
2057 #else
2058 newname = original_case_string;
2059 nlen = strlen (newname);
2060 #endif
2061
2062 nbuf = alloca (nlen + 1);
2063 memcpy (nbuf, newname, nlen);
2064 nbuf[nlen] = '\0';
2065
2066 /* Create aliases under the new name as stated; an all-lowercase
2067 version of the new name; and an all-uppercase version of the new
2068 name. */
2069 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2070 {
2071 for (p = nbuf; *p; p++)
2072 *p = TOUPPER (*p);
2073
2074 if (strncmp (nbuf, newname, nlen))
2075 {
2076 /* If this attempt to create an additional alias fails, do not bother
2077 trying to create the all-lower case alias. We will fail and issue
2078 a second, duplicate error message. This situation arises when the
2079 programmer does something like:
2080 foo .req r0
2081 Foo .req r1
2082 The second .req creates the "Foo" alias but then fails to create
2083 the artificial FOO alias because it has already been created by the
2084 first .req. */
2085 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2086 return TRUE;
2087 }
2088
2089 for (p = nbuf; *p; p++)
2090 *p = TOLOWER (*p);
2091
2092 if (strncmp (nbuf, newname, nlen))
2093 insert_reg_alias (nbuf, old->number, old->type);
2094 }
2095
2096 return TRUE;
2097 }
2098
2099 /* Create a Neon typed/indexed register alias using directives, e.g.:
2100 X .dn d5.s32[1]
2101 Y .qn 6.s16
2102 Z .dn d7
2103 T .dn Z[0]
2104 These typed registers can be used instead of the types specified after the
2105 Neon mnemonic, so long as all operands given have types. Types can also be
2106 specified directly, e.g.:
2107 vadd d0.s32, d1.s32, d2.s32 */
2108
2109 static int
2110 create_neon_reg_alias (char *newname, char *p)
2111 {
2112 enum arm_reg_type basetype;
2113 struct reg_entry *basereg;
2114 struct reg_entry mybasereg;
2115 struct neon_type ntype;
2116 struct neon_typed_alias typeinfo;
2117 char *namebuf, *nameend;
2118 int namelen;
2119
2120 typeinfo.defined = 0;
2121 typeinfo.eltype.type = NT_invtype;
2122 typeinfo.eltype.size = -1;
2123 typeinfo.index = -1;
2124
2125 nameend = p;
2126
2127 if (strncmp (p, " .dn ", 5) == 0)
2128 basetype = REG_TYPE_VFD;
2129 else if (strncmp (p, " .qn ", 5) == 0)
2130 basetype = REG_TYPE_NQ;
2131 else
2132 return 0;
2133
2134 p += 5;
2135
2136 if (*p == '\0')
2137 return 0;
2138
2139 basereg = arm_reg_parse_multi (&p);
2140
2141 if (basereg && basereg->type != basetype)
2142 {
2143 as_bad (_("bad type for register"));
2144 return 0;
2145 }
2146
2147 if (basereg == NULL)
2148 {
2149 expressionS exp;
2150 /* Try parsing as an integer. */
2151 my_get_expression (&exp, &p, GE_NO_PREFIX);
2152 if (exp.X_op != O_constant)
2153 {
2154 as_bad (_("expression must be constant"));
2155 return 0;
2156 }
2157 basereg = &mybasereg;
2158 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2159 : exp.X_add_number;
2160 basereg->neon = 0;
2161 }
2162
2163 if (basereg->neon)
2164 typeinfo = *basereg->neon;
2165
2166 if (parse_neon_type (&ntype, &p) == SUCCESS)
2167 {
2168 /* We got a type. */
2169 if (typeinfo.defined & NTA_HASTYPE)
2170 {
2171 as_bad (_("can't redefine the type of a register alias"));
2172 return 0;
2173 }
2174
2175 typeinfo.defined |= NTA_HASTYPE;
2176 if (ntype.elems != 1)
2177 {
2178 as_bad (_("you must specify a single type only"));
2179 return 0;
2180 }
2181 typeinfo.eltype = ntype.el[0];
2182 }
2183
2184 if (skip_past_char (&p, '[') == SUCCESS)
2185 {
2186 expressionS exp;
2187 /* We got a scalar index. */
2188
2189 if (typeinfo.defined & NTA_HASINDEX)
2190 {
2191 as_bad (_("can't redefine the index of a scalar alias"));
2192 return 0;
2193 }
2194
2195 my_get_expression (&exp, &p, GE_NO_PREFIX);
2196
2197 if (exp.X_op != O_constant)
2198 {
2199 as_bad (_("scalar index must be constant"));
2200 return 0;
2201 }
2202
2203 typeinfo.defined |= NTA_HASINDEX;
2204 typeinfo.index = exp.X_add_number;
2205
2206 if (skip_past_char (&p, ']') == FAIL)
2207 {
2208 as_bad (_("expecting ]"));
2209 return 0;
2210 }
2211 }
2212
2213 namelen = nameend - newname;
2214 namebuf = alloca (namelen + 1);
2215 strncpy (namebuf, newname, namelen);
2216 namebuf[namelen] = '\0';
2217
2218 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2219 typeinfo.defined != 0 ? &typeinfo : NULL);
2220
2221 /* Insert name in all uppercase. */
2222 for (p = namebuf; *p; p++)
2223 *p = TOUPPER (*p);
2224
2225 if (strncmp (namebuf, newname, namelen))
2226 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2227 typeinfo.defined != 0 ? &typeinfo : NULL);
2228
2229 /* Insert name in all lowercase. */
2230 for (p = namebuf; *p; p++)
2231 *p = TOLOWER (*p);
2232
2233 if (strncmp (namebuf, newname, namelen))
2234 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2235 typeinfo.defined != 0 ? &typeinfo : NULL);
2236
2237 return 1;
2238 }
2239
2240 /* Should never be called, as .req goes between the alias and the
2241 register name, not at the beginning of the line. */
2242 static void
2243 s_req (int a ATTRIBUTE_UNUSED)
2244 {
2245 as_bad (_("invalid syntax for .req directive"));
2246 }
2247
2248 static void
2249 s_dn (int a ATTRIBUTE_UNUSED)
2250 {
2251 as_bad (_("invalid syntax for .dn directive"));
2252 }
2253
2254 static void
2255 s_qn (int a ATTRIBUTE_UNUSED)
2256 {
2257 as_bad (_("invalid syntax for .qn directive"));
2258 }
2259
2260 /* The .unreq directive deletes an alias which was previously defined
2261 by .req. For example:
2262
2263 my_alias .req r11
2264 .unreq my_alias */
2265
2266 static void
2267 s_unreq (int a ATTRIBUTE_UNUSED)
2268 {
2269 char * name;
2270 char saved_char;
2271
2272 name = input_line_pointer;
2273
2274 while (*input_line_pointer != 0
2275 && *input_line_pointer != ' '
2276 && *input_line_pointer != '\n')
2277 ++input_line_pointer;
2278
2279 saved_char = *input_line_pointer;
2280 *input_line_pointer = 0;
2281
2282 if (!*name)
2283 as_bad (_("invalid syntax for .unreq directive"));
2284 else
2285 {
2286 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2287
2288 if (!reg)
2289 as_bad (_("unknown register alias '%s'"), name);
2290 else if (reg->builtin)
2291 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2292 name);
2293 else
2294 {
2295 char * p;
2296 char * nbuf;
2297
2298 hash_delete (arm_reg_hsh, name, FALSE);
2299 free ((char *) reg->name);
2300 if (reg->neon)
2301 free (reg->neon);
2302 free (reg);
2303
2304 /* Also locate the all upper case and all lower case versions.
2305 Do not complain if we cannot find one or the other as it
2306 was probably deleted above. */
2307
2308 nbuf = strdup (name);
2309 for (p = nbuf; *p; p++)
2310 *p = TOUPPER (*p);
2311 reg = hash_find (arm_reg_hsh, nbuf);
2312 if (reg)
2313 {
2314 hash_delete (arm_reg_hsh, nbuf, FALSE);
2315 free ((char *) reg->name);
2316 if (reg->neon)
2317 free (reg->neon);
2318 free (reg);
2319 }
2320
2321 for (p = nbuf; *p; p++)
2322 *p = TOLOWER (*p);
2323 reg = hash_find (arm_reg_hsh, nbuf);
2324 if (reg)
2325 {
2326 hash_delete (arm_reg_hsh, nbuf, FALSE);
2327 free ((char *) reg->name);
2328 if (reg->neon)
2329 free (reg->neon);
2330 free (reg);
2331 }
2332
2333 free (nbuf);
2334 }
2335 }
2336
2337 *input_line_pointer = saved_char;
2338 demand_empty_rest_of_line ();
2339 }
2340
2341 /* Directives: Instruction set selection. */
2342
2343 #ifdef OBJ_ELF
2344 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2345 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2346 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2347 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2348
2349 static enum mstate mapstate = MAP_UNDEFINED;
2350
2351 void
2352 mapping_state (enum mstate state)
2353 {
2354 symbolS * symbolP;
2355 const char * symname;
2356 int type;
2357
2358 if (mapstate == state)
2359 /* The mapping symbol has already been emitted.
2360 There is nothing else to do. */
2361 return;
2362
2363 mapstate = state;
2364
2365 switch (state)
2366 {
2367 case MAP_DATA:
2368 symname = "$d";
2369 type = BSF_NO_FLAGS;
2370 break;
2371 case MAP_ARM:
2372 symname = "$a";
2373 type = BSF_NO_FLAGS;
2374 break;
2375 case MAP_THUMB:
2376 symname = "$t";
2377 type = BSF_NO_FLAGS;
2378 break;
2379 case MAP_UNDEFINED:
2380 return;
2381 default:
2382 abort ();
2383 }
2384
2385 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2386
2387 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2388 symbol_table_insert (symbolP);
2389 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2390
2391 switch (state)
2392 {
2393 case MAP_ARM:
2394 THUMB_SET_FUNC (symbolP, 0);
2395 ARM_SET_THUMB (symbolP, 0);
2396 ARM_SET_INTERWORK (symbolP, support_interwork);
2397 break;
2398
2399 case MAP_THUMB:
2400 THUMB_SET_FUNC (symbolP, 1);
2401 ARM_SET_THUMB (symbolP, 1);
2402 ARM_SET_INTERWORK (symbolP, support_interwork);
2403 break;
2404
2405 case MAP_DATA:
2406 default:
2407 return;
2408 }
2409 }
2410 #else
2411 #define mapping_state(x) /* nothing */
2412 #endif
2413
2414 /* Find the real, Thumb encoded start of a Thumb function. */
2415
2416 static symbolS *
2417 find_real_start (symbolS * symbolP)
2418 {
2419 char * real_start;
2420 const char * name = S_GET_NAME (symbolP);
2421 symbolS * new_target;
2422
2423 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2424 #define STUB_NAME ".real_start_of"
2425
2426 if (name == NULL)
2427 abort ();
2428
2429 /* The compiler may generate BL instructions to local labels because
2430 it needs to perform a branch to a far away location. These labels
2431 do not have a corresponding ".real_start_of" label. We check
2432 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2433 the ".real_start_of" convention for nonlocal branches. */
2434 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2435 return symbolP;
2436
2437 real_start = ACONCAT ((STUB_NAME, name, NULL));
2438 new_target = symbol_find (real_start);
2439
2440 if (new_target == NULL)
2441 {
2442 as_warn (_("Failed to find real start of function: %s\n"), name);
2443 new_target = symbolP;
2444 }
2445
2446 return new_target;
2447 }
2448
2449 static void
2450 opcode_select (int width)
2451 {
2452 switch (width)
2453 {
2454 case 16:
2455 if (! thumb_mode)
2456 {
2457 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2458 as_bad (_("selected processor does not support THUMB opcodes"));
2459
2460 thumb_mode = 1;
2461 /* No need to force the alignment, since we will have been
2462 coming from ARM mode, which is word-aligned. */
2463 record_alignment (now_seg, 1);
2464 }
2465 mapping_state (MAP_THUMB);
2466 break;
2467
2468 case 32:
2469 if (thumb_mode)
2470 {
2471 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2472 as_bad (_("selected processor does not support ARM opcodes"));
2473
2474 thumb_mode = 0;
2475
2476 if (!need_pass_2)
2477 frag_align (2, 0, 0);
2478
2479 record_alignment (now_seg, 1);
2480 }
2481 mapping_state (MAP_ARM);
2482 break;
2483
2484 default:
2485 as_bad (_("invalid instruction size selected (%d)"), width);
2486 }
2487 }
2488
2489 static void
2490 s_arm (int ignore ATTRIBUTE_UNUSED)
2491 {
2492 opcode_select (32);
2493 demand_empty_rest_of_line ();
2494 }
2495
2496 static void
2497 s_thumb (int ignore ATTRIBUTE_UNUSED)
2498 {
2499 opcode_select (16);
2500 demand_empty_rest_of_line ();
2501 }
2502
2503 static void
2504 s_code (int unused ATTRIBUTE_UNUSED)
2505 {
2506 int temp;
2507
2508 temp = get_absolute_expression ();
2509 switch (temp)
2510 {
2511 case 16:
2512 case 32:
2513 opcode_select (temp);
2514 break;
2515
2516 default:
2517 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2518 }
2519 }
2520
2521 static void
2522 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2523 {
2524 /* If we are not already in thumb mode go into it, EVEN if
2525 the target processor does not support thumb instructions.
2526 This is used by gcc/config/arm/lib1funcs.asm for example
2527 to compile interworking support functions even if the
2528 target processor should not support interworking. */
2529 if (! thumb_mode)
2530 {
2531 thumb_mode = 2;
2532 record_alignment (now_seg, 1);
2533 }
2534
2535 demand_empty_rest_of_line ();
2536 }
2537
2538 static void
2539 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2540 {
2541 s_thumb (0);
2542
2543 /* The following label is the name/address of the start of a Thumb function.
2544 We need to know this for the interworking support. */
2545 label_is_thumb_function_name = TRUE;
2546 }
2547
2548 /* Perform a .set directive, but also mark the alias as
2549 being a thumb function. */
2550
2551 static void
2552 s_thumb_set (int equiv)
2553 {
2554 /* XXX the following is a duplicate of the code for s_set() in read.c
2555 We cannot just call that code as we need to get at the symbol that
2556 is created. */
2557 char * name;
2558 char delim;
2559 char * end_name;
2560 symbolS * symbolP;
2561
2562 /* Especial apologies for the random logic:
2563 This just grew, and could be parsed much more simply!
2564 Dean - in haste. */
2565 name = input_line_pointer;
2566 delim = get_symbol_end ();
2567 end_name = input_line_pointer;
2568 *end_name = delim;
2569
2570 if (*input_line_pointer != ',')
2571 {
2572 *end_name = 0;
2573 as_bad (_("expected comma after name \"%s\""), name);
2574 *end_name = delim;
2575 ignore_rest_of_line ();
2576 return;
2577 }
2578
2579 input_line_pointer++;
2580 *end_name = 0;
2581
2582 if (name[0] == '.' && name[1] == '\0')
2583 {
2584 /* XXX - this should not happen to .thumb_set. */
2585 abort ();
2586 }
2587
2588 if ((symbolP = symbol_find (name)) == NULL
2589 && (symbolP = md_undefined_symbol (name)) == NULL)
2590 {
2591 #ifndef NO_LISTING
2592 /* When doing symbol listings, play games with dummy fragments living
2593 outside the normal fragment chain to record the file and line info
2594 for this symbol. */
2595 if (listing & LISTING_SYMBOLS)
2596 {
2597 extern struct list_info_struct * listing_tail;
2598 fragS * dummy_frag = xmalloc (sizeof (fragS));
2599
2600 memset (dummy_frag, 0, sizeof (fragS));
2601 dummy_frag->fr_type = rs_fill;
2602 dummy_frag->line = listing_tail;
2603 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2604 dummy_frag->fr_symbol = symbolP;
2605 }
2606 else
2607 #endif
2608 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2609
2610 #ifdef OBJ_COFF
2611 /* "set" symbols are local unless otherwise specified. */
2612 SF_SET_LOCAL (symbolP);
2613 #endif /* OBJ_COFF */
2614 } /* Make a new symbol. */
2615
2616 symbol_table_insert (symbolP);
2617
2618 * end_name = delim;
2619
2620 if (equiv
2621 && S_IS_DEFINED (symbolP)
2622 && S_GET_SEGMENT (symbolP) != reg_section)
2623 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2624
2625 pseudo_set (symbolP);
2626
2627 demand_empty_rest_of_line ();
2628
2629 /* XXX Now we come to the Thumb specific bit of code. */
2630
2631 THUMB_SET_FUNC (symbolP, 1);
2632 ARM_SET_THUMB (symbolP, 1);
2633 #if defined OBJ_ELF || defined OBJ_COFF
2634 ARM_SET_INTERWORK (symbolP, support_interwork);
2635 #endif
2636 }
2637
2638 /* Directives: Mode selection. */
2639
2640 /* .syntax [unified|divided] - choose the new unified syntax
2641 (same for Arm and Thumb encoding, modulo slight differences in what
2642 can be represented) or the old divergent syntax for each mode. */
2643 static void
2644 s_syntax (int unused ATTRIBUTE_UNUSED)
2645 {
2646 char *name, delim;
2647
2648 name = input_line_pointer;
2649 delim = get_symbol_end ();
2650
2651 if (!strcasecmp (name, "unified"))
2652 unified_syntax = TRUE;
2653 else if (!strcasecmp (name, "divided"))
2654 unified_syntax = FALSE;
2655 else
2656 {
2657 as_bad (_("unrecognized syntax mode \"%s\""), name);
2658 return;
2659 }
2660 *input_line_pointer = delim;
2661 demand_empty_rest_of_line ();
2662 }
2663
2664 /* Directives: sectioning and alignment. */
2665
2666 /* Same as s_align_ptwo but align 0 => align 2. */
2667
2668 static void
2669 s_align (int unused ATTRIBUTE_UNUSED)
2670 {
2671 int temp;
2672 bfd_boolean fill_p;
2673 long temp_fill;
2674 long max_alignment = 15;
2675
2676 temp = get_absolute_expression ();
2677 if (temp > max_alignment)
2678 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2679 else if (temp < 0)
2680 {
2681 as_bad (_("alignment negative. 0 assumed."));
2682 temp = 0;
2683 }
2684
2685 if (*input_line_pointer == ',')
2686 {
2687 input_line_pointer++;
2688 temp_fill = get_absolute_expression ();
2689 fill_p = TRUE;
2690 }
2691 else
2692 {
2693 fill_p = FALSE;
2694 temp_fill = 0;
2695 }
2696
2697 if (!temp)
2698 temp = 2;
2699
2700 /* Only make a frag if we HAVE to. */
2701 if (temp && !need_pass_2)
2702 {
2703 if (!fill_p && subseg_text_p (now_seg))
2704 frag_align_code (temp, 0);
2705 else
2706 frag_align (temp, (int) temp_fill, 0);
2707 }
2708 demand_empty_rest_of_line ();
2709
2710 record_alignment (now_seg, temp);
2711 }
2712
2713 static void
2714 s_bss (int ignore ATTRIBUTE_UNUSED)
2715 {
2716 /* We don't support putting frags in the BSS segment, we fake it by
2717 marking in_bss, then looking at s_skip for clues. */
2718 subseg_set (bss_section, 0);
2719 demand_empty_rest_of_line ();
2720 mapping_state (MAP_DATA);
2721 }
2722
2723 static void
2724 s_even (int ignore ATTRIBUTE_UNUSED)
2725 {
2726 /* Never make frag if expect extra pass. */
2727 if (!need_pass_2)
2728 frag_align (1, 0, 0);
2729
2730 record_alignment (now_seg, 1);
2731
2732 demand_empty_rest_of_line ();
2733 }
2734
2735 /* Directives: Literal pools. */
2736
2737 static literal_pool *
2738 find_literal_pool (void)
2739 {
2740 literal_pool * pool;
2741
2742 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2743 {
2744 if (pool->section == now_seg
2745 && pool->sub_section == now_subseg)
2746 break;
2747 }
2748
2749 return pool;
2750 }
2751
2752 static literal_pool *
2753 find_or_make_literal_pool (void)
2754 {
2755 /* Next literal pool ID number. */
2756 static unsigned int latest_pool_num = 1;
2757 literal_pool * pool;
2758
2759 pool = find_literal_pool ();
2760
2761 if (pool == NULL)
2762 {
2763 /* Create a new pool. */
2764 pool = xmalloc (sizeof (* pool));
2765 if (! pool)
2766 return NULL;
2767
2768 pool->next_free_entry = 0;
2769 pool->section = now_seg;
2770 pool->sub_section = now_subseg;
2771 pool->next = list_of_pools;
2772 pool->symbol = NULL;
2773
2774 /* Add it to the list. */
2775 list_of_pools = pool;
2776 }
2777
2778 /* New pools, and emptied pools, will have a NULL symbol. */
2779 if (pool->symbol == NULL)
2780 {
2781 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2782 (valueT) 0, &zero_address_frag);
2783 pool->id = latest_pool_num ++;
2784 }
2785
2786 /* Done. */
2787 return pool;
2788 }
2789
2790 /* Add the literal in the global 'inst'
2791 structure to the relevant literal pool. */
2792
2793 static int
2794 add_to_lit_pool (void)
2795 {
2796 literal_pool * pool;
2797 unsigned int entry;
2798
2799 pool = find_or_make_literal_pool ();
2800
2801 /* Check if this literal value is already in the pool. */
2802 for (entry = 0; entry < pool->next_free_entry; entry ++)
2803 {
2804 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2805 && (inst.reloc.exp.X_op == O_constant)
2806 && (pool->literals[entry].X_add_number
2807 == inst.reloc.exp.X_add_number)
2808 && (pool->literals[entry].X_unsigned
2809 == inst.reloc.exp.X_unsigned))
2810 break;
2811
2812 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2813 && (inst.reloc.exp.X_op == O_symbol)
2814 && (pool->literals[entry].X_add_number
2815 == inst.reloc.exp.X_add_number)
2816 && (pool->literals[entry].X_add_symbol
2817 == inst.reloc.exp.X_add_symbol)
2818 && (pool->literals[entry].X_op_symbol
2819 == inst.reloc.exp.X_op_symbol))
2820 break;
2821 }
2822
2823 /* Do we need to create a new entry? */
2824 if (entry == pool->next_free_entry)
2825 {
2826 if (entry >= MAX_LITERAL_POOL_SIZE)
2827 {
2828 inst.error = _("literal pool overflow");
2829 return FAIL;
2830 }
2831
2832 pool->literals[entry] = inst.reloc.exp;
2833 pool->next_free_entry += 1;
2834 }
2835
2836 inst.reloc.exp.X_op = O_symbol;
2837 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2838 inst.reloc.exp.X_add_symbol = pool->symbol;
2839
2840 return SUCCESS;
2841 }
2842
2843 /* Can't use symbol_new here, so have to create a symbol and then at
2844 a later date assign it a value. Thats what these functions do. */
2845
2846 static void
2847 symbol_locate (symbolS * symbolP,
2848 const char * name, /* It is copied, the caller can modify. */
2849 segT segment, /* Segment identifier (SEG_<something>). */
2850 valueT valu, /* Symbol value. */
2851 fragS * frag) /* Associated fragment. */
2852 {
2853 unsigned int name_length;
2854 char * preserved_copy_of_name;
2855
2856 name_length = strlen (name) + 1; /* +1 for \0. */
2857 obstack_grow (&notes, name, name_length);
2858 preserved_copy_of_name = obstack_finish (&notes);
2859
2860 #ifdef tc_canonicalize_symbol_name
2861 preserved_copy_of_name =
2862 tc_canonicalize_symbol_name (preserved_copy_of_name);
2863 #endif
2864
2865 S_SET_NAME (symbolP, preserved_copy_of_name);
2866
2867 S_SET_SEGMENT (symbolP, segment);
2868 S_SET_VALUE (symbolP, valu);
2869 symbol_clear_list_pointers (symbolP);
2870
2871 symbol_set_frag (symbolP, frag);
2872
2873 /* Link to end of symbol chain. */
2874 {
2875 extern int symbol_table_frozen;
2876
2877 if (symbol_table_frozen)
2878 abort ();
2879 }
2880
2881 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2882
2883 obj_symbol_new_hook (symbolP);
2884
2885 #ifdef tc_symbol_new_hook
2886 tc_symbol_new_hook (symbolP);
2887 #endif
2888
2889 #ifdef DEBUG_SYMS
2890 verify_symbol_chain (symbol_rootP, symbol_lastP);
2891 #endif /* DEBUG_SYMS */
2892 }
2893
2894
2895 static void
2896 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2897 {
2898 unsigned int entry;
2899 literal_pool * pool;
2900 char sym_name[20];
2901
2902 pool = find_literal_pool ();
2903 if (pool == NULL
2904 || pool->symbol == NULL
2905 || pool->next_free_entry == 0)
2906 return;
2907
2908 mapping_state (MAP_DATA);
2909
2910 /* Align pool as you have word accesses.
2911 Only make a frag if we have to. */
2912 if (!need_pass_2)
2913 frag_align (2, 0, 0);
2914
2915 record_alignment (now_seg, 2);
2916
2917 sprintf (sym_name, "$$lit_\002%x", pool->id);
2918
2919 symbol_locate (pool->symbol, sym_name, now_seg,
2920 (valueT) frag_now_fix (), frag_now);
2921 symbol_table_insert (pool->symbol);
2922
2923 ARM_SET_THUMB (pool->symbol, thumb_mode);
2924
2925 #if defined OBJ_COFF || defined OBJ_ELF
2926 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2927 #endif
2928
2929 for (entry = 0; entry < pool->next_free_entry; entry ++)
2930 /* First output the expression in the instruction to the pool. */
2931 emit_expr (&(pool->literals[entry]), 4); /* .word */
2932
2933 /* Mark the pool as empty. */
2934 pool->next_free_entry = 0;
2935 pool->symbol = NULL;
2936 }
2937
2938 #ifdef OBJ_ELF
2939 /* Forward declarations for functions below, in the MD interface
2940 section. */
2941 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2942 static valueT create_unwind_entry (int);
2943 static void start_unwind_section (const segT, int);
2944 static void add_unwind_opcode (valueT, int);
2945 static void flush_pending_unwind (void);
2946
2947 /* Directives: Data. */
2948
2949 static void
2950 s_arm_elf_cons (int nbytes)
2951 {
2952 expressionS exp;
2953
2954 #ifdef md_flush_pending_output
2955 md_flush_pending_output ();
2956 #endif
2957
2958 if (is_it_end_of_statement ())
2959 {
2960 demand_empty_rest_of_line ();
2961 return;
2962 }
2963
2964 #ifdef md_cons_align
2965 md_cons_align (nbytes);
2966 #endif
2967
2968 mapping_state (MAP_DATA);
2969 do
2970 {
2971 int reloc;
2972 char *base = input_line_pointer;
2973
2974 expression (& exp);
2975
2976 if (exp.X_op != O_symbol)
2977 emit_expr (&exp, (unsigned int) nbytes);
2978 else
2979 {
2980 char *before_reloc = input_line_pointer;
2981 reloc = parse_reloc (&input_line_pointer);
2982 if (reloc == -1)
2983 {
2984 as_bad (_("unrecognized relocation suffix"));
2985 ignore_rest_of_line ();
2986 return;
2987 }
2988 else if (reloc == BFD_RELOC_UNUSED)
2989 emit_expr (&exp, (unsigned int) nbytes);
2990 else
2991 {
2992 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2993 int size = bfd_get_reloc_size (howto);
2994
2995 if (reloc == BFD_RELOC_ARM_PLT32)
2996 {
2997 as_bad (_("(plt) is only valid on branch targets"));
2998 reloc = BFD_RELOC_UNUSED;
2999 size = 0;
3000 }
3001
3002 if (size > nbytes)
3003 as_bad (_("%s relocations do not fit in %d bytes"),
3004 howto->name, nbytes);
3005 else
3006 {
3007 /* We've parsed an expression stopping at O_symbol.
3008 But there may be more expression left now that we
3009 have parsed the relocation marker. Parse it again.
3010 XXX Surely there is a cleaner way to do this. */
3011 char *p = input_line_pointer;
3012 int offset;
3013 char *save_buf = alloca (input_line_pointer - base);
3014 memcpy (save_buf, base, input_line_pointer - base);
3015 memmove (base + (input_line_pointer - before_reloc),
3016 base, before_reloc - base);
3017
3018 input_line_pointer = base + (input_line_pointer-before_reloc);
3019 expression (&exp);
3020 memcpy (base, save_buf, p - base);
3021
3022 offset = nbytes - size;
3023 p = frag_more ((int) nbytes);
3024 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3025 size, &exp, 0, reloc);
3026 }
3027 }
3028 }
3029 }
3030 while (*input_line_pointer++ == ',');
3031
3032 /* Put terminator back into stream. */
3033 input_line_pointer --;
3034 demand_empty_rest_of_line ();
3035 }
3036
3037
3038 /* Parse a .rel31 directive. */
3039
3040 static void
3041 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3042 {
3043 expressionS exp;
3044 char *p;
3045 valueT highbit;
3046
3047 highbit = 0;
3048 if (*input_line_pointer == '1')
3049 highbit = 0x80000000;
3050 else if (*input_line_pointer != '0')
3051 as_bad (_("expected 0 or 1"));
3052
3053 input_line_pointer++;
3054 if (*input_line_pointer != ',')
3055 as_bad (_("missing comma"));
3056 input_line_pointer++;
3057
3058 #ifdef md_flush_pending_output
3059 md_flush_pending_output ();
3060 #endif
3061
3062 #ifdef md_cons_align
3063 md_cons_align (4);
3064 #endif
3065
3066 mapping_state (MAP_DATA);
3067
3068 expression (&exp);
3069
3070 p = frag_more (4);
3071 md_number_to_chars (p, highbit, 4);
3072 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3073 BFD_RELOC_ARM_PREL31);
3074
3075 demand_empty_rest_of_line ();
3076 }
3077
3078 /* Directives: AEABI stack-unwind tables. */
3079
3080 /* Parse an unwind_fnstart directive. Simply records the current location. */
3081
3082 static void
3083 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3084 {
3085 demand_empty_rest_of_line ();
3086 /* Mark the start of the function. */
3087 unwind.proc_start = expr_build_dot ();
3088
3089 /* Reset the rest of the unwind info. */
3090 unwind.opcode_count = 0;
3091 unwind.table_entry = NULL;
3092 unwind.personality_routine = NULL;
3093 unwind.personality_index = -1;
3094 unwind.frame_size = 0;
3095 unwind.fp_offset = 0;
3096 unwind.fp_reg = REG_SP;
3097 unwind.fp_used = 0;
3098 unwind.sp_restored = 0;
3099 }
3100
3101
3102 /* Parse a handlerdata directive. Creates the exception handling table entry
3103 for the function. */
3104
3105 static void
3106 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3107 {
3108 demand_empty_rest_of_line ();
3109 if (unwind.table_entry)
3110 as_bad (_("duplicate .handlerdata directive"));
3111
3112 create_unwind_entry (1);
3113 }
3114
3115 /* Parse an unwind_fnend directive. Generates the index table entry. */
3116
3117 static void
3118 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3119 {
3120 long where;
3121 char *ptr;
3122 valueT val;
3123
3124 demand_empty_rest_of_line ();
3125
3126 /* Add eh table entry. */
3127 if (unwind.table_entry == NULL)
3128 val = create_unwind_entry (0);
3129 else
3130 val = 0;
3131
3132 /* Add index table entry. This is two words. */
3133 start_unwind_section (unwind.saved_seg, 1);
3134 frag_align (2, 0, 0);
3135 record_alignment (now_seg, 2);
3136
3137 ptr = frag_more (8);
3138 where = frag_now_fix () - 8;
3139
3140 /* Self relative offset of the function start. */
3141 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3142 BFD_RELOC_ARM_PREL31);
3143
3144 /* Indicate dependency on EHABI-defined personality routines to the
3145 linker, if it hasn't been done already. */
3146 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3147 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3148 {
3149 static const char *const name[] =
3150 {
3151 "__aeabi_unwind_cpp_pr0",
3152 "__aeabi_unwind_cpp_pr1",
3153 "__aeabi_unwind_cpp_pr2"
3154 };
3155 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3156 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3157 marked_pr_dependency |= 1 << unwind.personality_index;
3158 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3159 = marked_pr_dependency;
3160 }
3161
3162 if (val)
3163 /* Inline exception table entry. */
3164 md_number_to_chars (ptr + 4, val, 4);
3165 else
3166 /* Self relative offset of the table entry. */
3167 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3168 BFD_RELOC_ARM_PREL31);
3169
3170 /* Restore the original section. */
3171 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3172 }
3173
3174
3175 /* Parse an unwind_cantunwind directive. */
3176
3177 static void
3178 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3179 {
3180 demand_empty_rest_of_line ();
3181 if (unwind.personality_routine || unwind.personality_index != -1)
3182 as_bad (_("personality routine specified for cantunwind frame"));
3183
3184 unwind.personality_index = -2;
3185 }
3186
3187
3188 /* Parse a personalityindex directive. */
3189
3190 static void
3191 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3192 {
3193 expressionS exp;
3194
3195 if (unwind.personality_routine || unwind.personality_index != -1)
3196 as_bad (_("duplicate .personalityindex directive"));
3197
3198 expression (&exp);
3199
3200 if (exp.X_op != O_constant
3201 || exp.X_add_number < 0 || exp.X_add_number > 15)
3202 {
3203 as_bad (_("bad personality routine number"));
3204 ignore_rest_of_line ();
3205 return;
3206 }
3207
3208 unwind.personality_index = exp.X_add_number;
3209
3210 demand_empty_rest_of_line ();
3211 }
3212
3213
3214 /* Parse a personality directive. */
3215
3216 static void
3217 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3218 {
3219 char *name, *p, c;
3220
3221 if (unwind.personality_routine || unwind.personality_index != -1)
3222 as_bad (_("duplicate .personality directive"));
3223
3224 name = input_line_pointer;
3225 c = get_symbol_end ();
3226 p = input_line_pointer;
3227 unwind.personality_routine = symbol_find_or_make (name);
3228 *p = c;
3229 demand_empty_rest_of_line ();
3230 }
3231
3232
3233 /* Parse a directive saving core registers. */
3234
3235 static void
3236 s_arm_unwind_save_core (void)
3237 {
3238 valueT op;
3239 long range;
3240 int n;
3241
3242 range = parse_reg_list (&input_line_pointer);
3243 if (range == FAIL)
3244 {
3245 as_bad (_("expected register list"));
3246 ignore_rest_of_line ();
3247 return;
3248 }
3249
3250 demand_empty_rest_of_line ();
3251
3252 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3253 into .unwind_save {..., sp...}. We aren't bothered about the value of
3254 ip because it is clobbered by calls. */
3255 if (unwind.sp_restored && unwind.fp_reg == 12
3256 && (range & 0x3000) == 0x1000)
3257 {
3258 unwind.opcode_count--;
3259 unwind.sp_restored = 0;
3260 range = (range | 0x2000) & ~0x1000;
3261 unwind.pending_offset = 0;
3262 }
3263
3264 /* Pop r4-r15. */
3265 if (range & 0xfff0)
3266 {
3267 /* See if we can use the short opcodes. These pop a block of up to 8
3268 registers starting with r4, plus maybe r14. */
3269 for (n = 0; n < 8; n++)
3270 {
3271 /* Break at the first non-saved register. */
3272 if ((range & (1 << (n + 4))) == 0)
3273 break;
3274 }
3275 /* See if there are any other bits set. */
3276 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3277 {
3278 /* Use the long form. */
3279 op = 0x8000 | ((range >> 4) & 0xfff);
3280 add_unwind_opcode (op, 2);
3281 }
3282 else
3283 {
3284 /* Use the short form. */
3285 if (range & 0x4000)
3286 op = 0xa8; /* Pop r14. */
3287 else
3288 op = 0xa0; /* Do not pop r14. */
3289 op |= (n - 1);
3290 add_unwind_opcode (op, 1);
3291 }
3292 }
3293
3294 /* Pop r0-r3. */
3295 if (range & 0xf)
3296 {
3297 op = 0xb100 | (range & 0xf);
3298 add_unwind_opcode (op, 2);
3299 }
3300
3301 /* Record the number of bytes pushed. */
3302 for (n = 0; n < 16; n++)
3303 {
3304 if (range & (1 << n))
3305 unwind.frame_size += 4;
3306 }
3307 }
3308
3309
3310 /* Parse a directive saving FPA registers. */
3311
3312 static void
3313 s_arm_unwind_save_fpa (int reg)
3314 {
3315 expressionS exp;
3316 int num_regs;
3317 valueT op;
3318
3319 /* Get Number of registers to transfer. */
3320 if (skip_past_comma (&input_line_pointer) != FAIL)
3321 expression (&exp);
3322 else
3323 exp.X_op = O_illegal;
3324
3325 if (exp.X_op != O_constant)
3326 {
3327 as_bad (_("expected , <constant>"));
3328 ignore_rest_of_line ();
3329 return;
3330 }
3331
3332 num_regs = exp.X_add_number;
3333
3334 if (num_regs < 1 || num_regs > 4)
3335 {
3336 as_bad (_("number of registers must be in the range [1:4]"));
3337 ignore_rest_of_line ();
3338 return;
3339 }
3340
3341 demand_empty_rest_of_line ();
3342
3343 if (reg == 4)
3344 {
3345 /* Short form. */
3346 op = 0xb4 | (num_regs - 1);
3347 add_unwind_opcode (op, 1);
3348 }
3349 else
3350 {
3351 /* Long form. */
3352 op = 0xc800 | (reg << 4) | (num_regs - 1);
3353 add_unwind_opcode (op, 2);
3354 }
3355 unwind.frame_size += num_regs * 12;
3356 }
3357
3358
3359 /* Parse a directive saving VFP registers for ARMv6 and above. */
3360
3361 static void
3362 s_arm_unwind_save_vfp_armv6 (void)
3363 {
3364 int count;
3365 unsigned int start;
3366 valueT op;
3367 int num_vfpv3_regs = 0;
3368 int num_regs_below_16;
3369
3370 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3371 if (count == FAIL)
3372 {
3373 as_bad (_("expected register list"));
3374 ignore_rest_of_line ();
3375 return;
3376 }
3377
3378 demand_empty_rest_of_line ();
3379
3380 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3381 than FSTMX/FLDMX-style ones). */
3382
3383 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3384 if (start >= 16)
3385 num_vfpv3_regs = count;
3386 else if (start + count > 16)
3387 num_vfpv3_regs = start + count - 16;
3388
3389 if (num_vfpv3_regs > 0)
3390 {
3391 int start_offset = start > 16 ? start - 16 : 0;
3392 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3393 add_unwind_opcode (op, 2);
3394 }
3395
3396 /* Generate opcode for registers numbered in the range 0 .. 15. */
3397 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3398 assert (num_regs_below_16 + num_vfpv3_regs == count);
3399 if (num_regs_below_16 > 0)
3400 {
3401 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3402 add_unwind_opcode (op, 2);
3403 }
3404
3405 unwind.frame_size += count * 8;
3406 }
3407
3408
3409 /* Parse a directive saving VFP registers for pre-ARMv6. */
3410
3411 static void
3412 s_arm_unwind_save_vfp (void)
3413 {
3414 int count;
3415 unsigned int reg;
3416 valueT op;
3417
3418 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3419 if (count == FAIL)
3420 {
3421 as_bad (_("expected register list"));
3422 ignore_rest_of_line ();
3423 return;
3424 }
3425
3426 demand_empty_rest_of_line ();
3427
3428 if (reg == 8)
3429 {
3430 /* Short form. */
3431 op = 0xb8 | (count - 1);
3432 add_unwind_opcode (op, 1);
3433 }
3434 else
3435 {
3436 /* Long form. */
3437 op = 0xb300 | (reg << 4) | (count - 1);
3438 add_unwind_opcode (op, 2);
3439 }
3440 unwind.frame_size += count * 8 + 4;
3441 }
3442
3443
3444 /* Parse a directive saving iWMMXt data registers. */
3445
3446 static void
3447 s_arm_unwind_save_mmxwr (void)
3448 {
3449 int reg;
3450 int hi_reg;
3451 int i;
3452 unsigned mask = 0;
3453 valueT op;
3454
3455 if (*input_line_pointer == '{')
3456 input_line_pointer++;
3457
3458 do
3459 {
3460 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3461
3462 if (reg == FAIL)
3463 {
3464 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3465 goto error;
3466 }
3467
3468 if (mask >> reg)
3469 as_tsktsk (_("register list not in ascending order"));
3470 mask |= 1 << reg;
3471
3472 if (*input_line_pointer == '-')
3473 {
3474 input_line_pointer++;
3475 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3476 if (hi_reg == FAIL)
3477 {
3478 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3479 goto error;
3480 }
3481 else if (reg >= hi_reg)
3482 {
3483 as_bad (_("bad register range"));
3484 goto error;
3485 }
3486 for (; reg < hi_reg; reg++)
3487 mask |= 1 << reg;
3488 }
3489 }
3490 while (skip_past_comma (&input_line_pointer) != FAIL);
3491
3492 if (*input_line_pointer == '}')
3493 input_line_pointer++;
3494
3495 demand_empty_rest_of_line ();
3496
3497 /* Generate any deferred opcodes because we're going to be looking at
3498 the list. */
3499 flush_pending_unwind ();
3500
3501 for (i = 0; i < 16; i++)
3502 {
3503 if (mask & (1 << i))
3504 unwind.frame_size += 8;
3505 }
3506
3507 /* Attempt to combine with a previous opcode. We do this because gcc
3508 likes to output separate unwind directives for a single block of
3509 registers. */
3510 if (unwind.opcode_count > 0)
3511 {
3512 i = unwind.opcodes[unwind.opcode_count - 1];
3513 if ((i & 0xf8) == 0xc0)
3514 {
3515 i &= 7;
3516 /* Only merge if the blocks are contiguous. */
3517 if (i < 6)
3518 {
3519 if ((mask & 0xfe00) == (1 << 9))
3520 {
3521 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3522 unwind.opcode_count--;
3523 }
3524 }
3525 else if (i == 6 && unwind.opcode_count >= 2)
3526 {
3527 i = unwind.opcodes[unwind.opcode_count - 2];
3528 reg = i >> 4;
3529 i &= 0xf;
3530
3531 op = 0xffff << (reg - 1);
3532 if (reg > 0
3533 && ((mask & op) == (1u << (reg - 1))))
3534 {
3535 op = (1 << (reg + i + 1)) - 1;
3536 op &= ~((1 << reg) - 1);
3537 mask |= op;
3538 unwind.opcode_count -= 2;
3539 }
3540 }
3541 }
3542 }
3543
3544 hi_reg = 15;
3545 /* We want to generate opcodes in the order the registers have been
3546 saved, ie. descending order. */
3547 for (reg = 15; reg >= -1; reg--)
3548 {
3549 /* Save registers in blocks. */
3550 if (reg < 0
3551 || !(mask & (1 << reg)))
3552 {
3553 /* We found an unsaved reg. Generate opcodes to save the
3554 preceding block. */
3555 if (reg != hi_reg)
3556 {
3557 if (reg == 9)
3558 {
3559 /* Short form. */
3560 op = 0xc0 | (hi_reg - 10);
3561 add_unwind_opcode (op, 1);
3562 }
3563 else
3564 {
3565 /* Long form. */
3566 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3567 add_unwind_opcode (op, 2);
3568 }
3569 }
3570 hi_reg = reg - 1;
3571 }
3572 }
3573
3574 return;
3575 error:
3576 ignore_rest_of_line ();
3577 }
3578
3579 static void
3580 s_arm_unwind_save_mmxwcg (void)
3581 {
3582 int reg;
3583 int hi_reg;
3584 unsigned mask = 0;
3585 valueT op;
3586
3587 if (*input_line_pointer == '{')
3588 input_line_pointer++;
3589
3590 do
3591 {
3592 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3593
3594 if (reg == FAIL)
3595 {
3596 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3597 goto error;
3598 }
3599
3600 reg -= 8;
3601 if (mask >> reg)
3602 as_tsktsk (_("register list not in ascending order"));
3603 mask |= 1 << reg;
3604
3605 if (*input_line_pointer == '-')
3606 {
3607 input_line_pointer++;
3608 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3609 if (hi_reg == FAIL)
3610 {
3611 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3612 goto error;
3613 }
3614 else if (reg >= hi_reg)
3615 {
3616 as_bad (_("bad register range"));
3617 goto error;
3618 }
3619 for (; reg < hi_reg; reg++)
3620 mask |= 1 << reg;
3621 }
3622 }
3623 while (skip_past_comma (&input_line_pointer) != FAIL);
3624
3625 if (*input_line_pointer == '}')
3626 input_line_pointer++;
3627
3628 demand_empty_rest_of_line ();
3629
3630 /* Generate any deferred opcodes because we're going to be looking at
3631 the list. */
3632 flush_pending_unwind ();
3633
3634 for (reg = 0; reg < 16; reg++)
3635 {
3636 if (mask & (1 << reg))
3637 unwind.frame_size += 4;
3638 }
3639 op = 0xc700 | mask;
3640 add_unwind_opcode (op, 2);
3641 return;
3642 error:
3643 ignore_rest_of_line ();
3644 }
3645
3646
3647 /* Parse an unwind_save directive.
3648 If the argument is non-zero, this is a .vsave directive. */
3649
3650 static void
3651 s_arm_unwind_save (int arch_v6)
3652 {
3653 char *peek;
3654 struct reg_entry *reg;
3655 bfd_boolean had_brace = FALSE;
3656
3657 /* Figure out what sort of save we have. */
3658 peek = input_line_pointer;
3659
3660 if (*peek == '{')
3661 {
3662 had_brace = TRUE;
3663 peek++;
3664 }
3665
3666 reg = arm_reg_parse_multi (&peek);
3667
3668 if (!reg)
3669 {
3670 as_bad (_("register expected"));
3671 ignore_rest_of_line ();
3672 return;
3673 }
3674
3675 switch (reg->type)
3676 {
3677 case REG_TYPE_FN:
3678 if (had_brace)
3679 {
3680 as_bad (_("FPA .unwind_save does not take a register list"));
3681 ignore_rest_of_line ();
3682 return;
3683 }
3684 input_line_pointer = peek;
3685 s_arm_unwind_save_fpa (reg->number);
3686 return;
3687
3688 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3689 case REG_TYPE_VFD:
3690 if (arch_v6)
3691 s_arm_unwind_save_vfp_armv6 ();
3692 else
3693 s_arm_unwind_save_vfp ();
3694 return;
3695 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3696 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3697
3698 default:
3699 as_bad (_(".unwind_save does not support this kind of register"));
3700 ignore_rest_of_line ();
3701 }
3702 }
3703
3704
3705 /* Parse an unwind_movsp directive. */
3706
3707 static void
3708 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3709 {
3710 int reg;
3711 valueT op;
3712 int offset;
3713
3714 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3715 if (reg == FAIL)
3716 {
3717 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
3718 ignore_rest_of_line ();
3719 return;
3720 }
3721
3722 /* Optional constant. */
3723 if (skip_past_comma (&input_line_pointer) != FAIL)
3724 {
3725 if (immediate_for_directive (&offset) == FAIL)
3726 return;
3727 }
3728 else
3729 offset = 0;
3730
3731 demand_empty_rest_of_line ();
3732
3733 if (reg == REG_SP || reg == REG_PC)
3734 {
3735 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3736 return;
3737 }
3738
3739 if (unwind.fp_reg != REG_SP)
3740 as_bad (_("unexpected .unwind_movsp directive"));
3741
3742 /* Generate opcode to restore the value. */
3743 op = 0x90 | reg;
3744 add_unwind_opcode (op, 1);
3745
3746 /* Record the information for later. */
3747 unwind.fp_reg = reg;
3748 unwind.fp_offset = unwind.frame_size - offset;
3749 unwind.sp_restored = 1;
3750 }
3751
3752 /* Parse an unwind_pad directive. */
3753
3754 static void
3755 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3756 {
3757 int offset;
3758
3759 if (immediate_for_directive (&offset) == FAIL)
3760 return;
3761
3762 if (offset & 3)
3763 {
3764 as_bad (_("stack increment must be multiple of 4"));
3765 ignore_rest_of_line ();
3766 return;
3767 }
3768
3769 /* Don't generate any opcodes, just record the details for later. */
3770 unwind.frame_size += offset;
3771 unwind.pending_offset += offset;
3772
3773 demand_empty_rest_of_line ();
3774 }
3775
3776 /* Parse an unwind_setfp directive. */
3777
3778 static void
3779 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3780 {
3781 int sp_reg;
3782 int fp_reg;
3783 int offset;
3784
3785 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3786 if (skip_past_comma (&input_line_pointer) == FAIL)
3787 sp_reg = FAIL;
3788 else
3789 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3790
3791 if (fp_reg == FAIL || sp_reg == FAIL)
3792 {
3793 as_bad (_("expected <reg>, <reg>"));
3794 ignore_rest_of_line ();
3795 return;
3796 }
3797
3798 /* Optional constant. */
3799 if (skip_past_comma (&input_line_pointer) != FAIL)
3800 {
3801 if (immediate_for_directive (&offset) == FAIL)
3802 return;
3803 }
3804 else
3805 offset = 0;
3806
3807 demand_empty_rest_of_line ();
3808
3809 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
3810 {
3811 as_bad (_("register must be either sp or set by a previous"
3812 "unwind_movsp directive"));
3813 return;
3814 }
3815
3816 /* Don't generate any opcodes, just record the information for later. */
3817 unwind.fp_reg = fp_reg;
3818 unwind.fp_used = 1;
3819 if (sp_reg == REG_SP)
3820 unwind.fp_offset = unwind.frame_size - offset;
3821 else
3822 unwind.fp_offset -= offset;
3823 }
3824
3825 /* Parse an unwind_raw directive. */
3826
3827 static void
3828 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3829 {
3830 expressionS exp;
3831 /* This is an arbitrary limit. */
3832 unsigned char op[16];
3833 int count;
3834
3835 expression (&exp);
3836 if (exp.X_op == O_constant
3837 && skip_past_comma (&input_line_pointer) != FAIL)
3838 {
3839 unwind.frame_size += exp.X_add_number;
3840 expression (&exp);
3841 }
3842 else
3843 exp.X_op = O_illegal;
3844
3845 if (exp.X_op != O_constant)
3846 {
3847 as_bad (_("expected <offset>, <opcode>"));
3848 ignore_rest_of_line ();
3849 return;
3850 }
3851
3852 count = 0;
3853
3854 /* Parse the opcode. */
3855 for (;;)
3856 {
3857 if (count >= 16)
3858 {
3859 as_bad (_("unwind opcode too long"));
3860 ignore_rest_of_line ();
3861 }
3862 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3863 {
3864 as_bad (_("invalid unwind opcode"));
3865 ignore_rest_of_line ();
3866 return;
3867 }
3868 op[count++] = exp.X_add_number;
3869
3870 /* Parse the next byte. */
3871 if (skip_past_comma (&input_line_pointer) == FAIL)
3872 break;
3873
3874 expression (&exp);
3875 }
3876
3877 /* Add the opcode bytes in reverse order. */
3878 while (count--)
3879 add_unwind_opcode (op[count], 1);
3880
3881 demand_empty_rest_of_line ();
3882 }
3883
3884
3885 /* Parse a .eabi_attribute directive. */
3886
3887 static void
3888 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3889 {
3890 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
3891
3892 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
3893 attributes_set_explicitly[tag] = 1;
3894 }
3895 #endif /* OBJ_ELF */
3896
3897 static void s_arm_arch (int);
3898 static void s_arm_object_arch (int);
3899 static void s_arm_cpu (int);
3900 static void s_arm_fpu (int);
3901
3902 #ifdef TE_PE
3903
3904 static void
3905 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3906 {
3907 expressionS exp;
3908
3909 do
3910 {
3911 expression (&exp);
3912 if (exp.X_op == O_symbol)
3913 exp.X_op = O_secrel;
3914
3915 emit_expr (&exp, 4);
3916 }
3917 while (*input_line_pointer++ == ',');
3918
3919 input_line_pointer--;
3920 demand_empty_rest_of_line ();
3921 }
3922 #endif /* TE_PE */
3923
3924 /* This table describes all the machine specific pseudo-ops the assembler
3925 has to support. The fields are:
3926 pseudo-op name without dot
3927 function to call to execute this pseudo-op
3928 Integer arg to pass to the function. */
3929
3930 const pseudo_typeS md_pseudo_table[] =
3931 {
3932 /* Never called because '.req' does not start a line. */
3933 { "req", s_req, 0 },
3934 /* Following two are likewise never called. */
3935 { "dn", s_dn, 0 },
3936 { "qn", s_qn, 0 },
3937 { "unreq", s_unreq, 0 },
3938 { "bss", s_bss, 0 },
3939 { "align", s_align, 0 },
3940 { "arm", s_arm, 0 },
3941 { "thumb", s_thumb, 0 },
3942 { "code", s_code, 0 },
3943 { "force_thumb", s_force_thumb, 0 },
3944 { "thumb_func", s_thumb_func, 0 },
3945 { "thumb_set", s_thumb_set, 0 },
3946 { "even", s_even, 0 },
3947 { "ltorg", s_ltorg, 0 },
3948 { "pool", s_ltorg, 0 },
3949 { "syntax", s_syntax, 0 },
3950 { "cpu", s_arm_cpu, 0 },
3951 { "arch", s_arm_arch, 0 },
3952 { "object_arch", s_arm_object_arch, 0 },
3953 { "fpu", s_arm_fpu, 0 },
3954 #ifdef OBJ_ELF
3955 { "word", s_arm_elf_cons, 4 },
3956 { "long", s_arm_elf_cons, 4 },
3957 { "rel31", s_arm_rel31, 0 },
3958 { "fnstart", s_arm_unwind_fnstart, 0 },
3959 { "fnend", s_arm_unwind_fnend, 0 },
3960 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3961 { "personality", s_arm_unwind_personality, 0 },
3962 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3963 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3964 { "save", s_arm_unwind_save, 0 },
3965 { "vsave", s_arm_unwind_save, 1 },
3966 { "movsp", s_arm_unwind_movsp, 0 },
3967 { "pad", s_arm_unwind_pad, 0 },
3968 { "setfp", s_arm_unwind_setfp, 0 },
3969 { "unwind_raw", s_arm_unwind_raw, 0 },
3970 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3971 #else
3972 { "word", cons, 4},
3973
3974 /* These are used for dwarf. */
3975 {"2byte", cons, 2},
3976 {"4byte", cons, 4},
3977 {"8byte", cons, 8},
3978 /* These are used for dwarf2. */
3979 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3980 { "loc", dwarf2_directive_loc, 0 },
3981 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3982 #endif
3983 { "extend", float_cons, 'x' },
3984 { "ldouble", float_cons, 'x' },
3985 { "packed", float_cons, 'p' },
3986 #ifdef TE_PE
3987 {"secrel32", pe_directive_secrel, 0},
3988 #endif
3989 { 0, 0, 0 }
3990 };
3991 \f
3992 /* Parser functions used exclusively in instruction operands. */
3993
3994 /* Generic immediate-value read function for use in insn parsing.
3995 STR points to the beginning of the immediate (the leading #);
3996 VAL receives the value; if the value is outside [MIN, MAX]
3997 issue an error. PREFIX_OPT is true if the immediate prefix is
3998 optional. */
3999
4000 static int
4001 parse_immediate (char **str, int *val, int min, int max,
4002 bfd_boolean prefix_opt)
4003 {
4004 expressionS exp;
4005 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4006 if (exp.X_op != O_constant)
4007 {
4008 inst.error = _("constant expression required");
4009 return FAIL;
4010 }
4011
4012 if (exp.X_add_number < min || exp.X_add_number > max)
4013 {
4014 inst.error = _("immediate value out of range");
4015 return FAIL;
4016 }
4017
4018 *val = exp.X_add_number;
4019 return SUCCESS;
4020 }
4021
4022 /* Less-generic immediate-value read function with the possibility of loading a
4023 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4024 instructions. Puts the result directly in inst.operands[i]. */
4025
4026 static int
4027 parse_big_immediate (char **str, int i)
4028 {
4029 expressionS exp;
4030 char *ptr = *str;
4031
4032 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4033
4034 if (exp.X_op == O_constant)
4035 {
4036 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4037 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4038 O_constant. We have to be careful not to break compilation for
4039 32-bit X_add_number, though. */
4040 if ((exp.X_add_number & ~0xffffffffl) != 0)
4041 {
4042 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4043 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4044 inst.operands[i].regisimm = 1;
4045 }
4046 }
4047 else if (exp.X_op == O_big
4048 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4049 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4050 {
4051 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4052 /* Bignums have their least significant bits in
4053 generic_bignum[0]. Make sure we put 32 bits in imm and
4054 32 bits in reg, in a (hopefully) portable way. */
4055 assert (parts != 0);
4056 inst.operands[i].imm = 0;
4057 for (j = 0; j < parts; j++, idx++)
4058 inst.operands[i].imm |= generic_bignum[idx]
4059 << (LITTLENUM_NUMBER_OF_BITS * j);
4060 inst.operands[i].reg = 0;
4061 for (j = 0; j < parts; j++, idx++)
4062 inst.operands[i].reg |= generic_bignum[idx]
4063 << (LITTLENUM_NUMBER_OF_BITS * j);
4064 inst.operands[i].regisimm = 1;
4065 }
4066 else
4067 return FAIL;
4068
4069 *str = ptr;
4070
4071 return SUCCESS;
4072 }
4073
4074 /* Returns the pseudo-register number of an FPA immediate constant,
4075 or FAIL if there isn't a valid constant here. */
4076
4077 static int
4078 parse_fpa_immediate (char ** str)
4079 {
4080 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4081 char * save_in;
4082 expressionS exp;
4083 int i;
4084 int j;
4085
4086 /* First try and match exact strings, this is to guarantee
4087 that some formats will work even for cross assembly. */
4088
4089 for (i = 0; fp_const[i]; i++)
4090 {
4091 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4092 {
4093 char *start = *str;
4094
4095 *str += strlen (fp_const[i]);
4096 if (is_end_of_line[(unsigned char) **str])
4097 return i + 8;
4098 *str = start;
4099 }
4100 }
4101
4102 /* Just because we didn't get a match doesn't mean that the constant
4103 isn't valid, just that it is in a format that we don't
4104 automatically recognize. Try parsing it with the standard
4105 expression routines. */
4106
4107 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4108
4109 /* Look for a raw floating point number. */
4110 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4111 && is_end_of_line[(unsigned char) *save_in])
4112 {
4113 for (i = 0; i < NUM_FLOAT_VALS; i++)
4114 {
4115 for (j = 0; j < MAX_LITTLENUMS; j++)
4116 {
4117 if (words[j] != fp_values[i][j])
4118 break;
4119 }
4120
4121 if (j == MAX_LITTLENUMS)
4122 {
4123 *str = save_in;
4124 return i + 8;
4125 }
4126 }
4127 }
4128
4129 /* Try and parse a more complex expression, this will probably fail
4130 unless the code uses a floating point prefix (eg "0f"). */
4131 save_in = input_line_pointer;
4132 input_line_pointer = *str;
4133 if (expression (&exp) == absolute_section
4134 && exp.X_op == O_big
4135 && exp.X_add_number < 0)
4136 {
4137 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4138 Ditto for 15. */
4139 if (gen_to_words (words, 5, (long) 15) == 0)
4140 {
4141 for (i = 0; i < NUM_FLOAT_VALS; i++)
4142 {
4143 for (j = 0; j < MAX_LITTLENUMS; j++)
4144 {
4145 if (words[j] != fp_values[i][j])
4146 break;
4147 }
4148
4149 if (j == MAX_LITTLENUMS)
4150 {
4151 *str = input_line_pointer;
4152 input_line_pointer = save_in;
4153 return i + 8;
4154 }
4155 }
4156 }
4157 }
4158
4159 *str = input_line_pointer;
4160 input_line_pointer = save_in;
4161 inst.error = _("invalid FPA immediate expression");
4162 return FAIL;
4163 }
4164
4165 /* Returns 1 if a number has "quarter-precision" float format
4166 0baBbbbbbc defgh000 00000000 00000000. */
4167
4168 static int
4169 is_quarter_float (unsigned imm)
4170 {
4171 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4172 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4173 }
4174
4175 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4176 0baBbbbbbc defgh000 00000000 00000000.
4177 The zero and minus-zero cases need special handling, since they can't be
4178 encoded in the "quarter-precision" float format, but can nonetheless be
4179 loaded as integer constants. */
4180
4181 static unsigned
4182 parse_qfloat_immediate (char **ccp, int *immed)
4183 {
4184 char *str = *ccp;
4185 char *fpnum;
4186 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4187 int found_fpchar = 0;
4188
4189 skip_past_char (&str, '#');
4190
4191 /* We must not accidentally parse an integer as a floating-point number. Make
4192 sure that the value we parse is not an integer by checking for special
4193 characters '.' or 'e'.
4194 FIXME: This is a horrible hack, but doing better is tricky because type
4195 information isn't in a very usable state at parse time. */
4196 fpnum = str;
4197 skip_whitespace (fpnum);
4198
4199 if (strncmp (fpnum, "0x", 2) == 0)
4200 return FAIL;
4201 else
4202 {
4203 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4204 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4205 {
4206 found_fpchar = 1;
4207 break;
4208 }
4209
4210 if (!found_fpchar)
4211 return FAIL;
4212 }
4213
4214 if ((str = atof_ieee (str, 's', words)) != NULL)
4215 {
4216 unsigned fpword = 0;
4217 int i;
4218
4219 /* Our FP word must be 32 bits (single-precision FP). */
4220 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4221 {
4222 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4223 fpword |= words[i];
4224 }
4225
4226 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4227 *immed = fpword;
4228 else
4229 return FAIL;
4230
4231 *ccp = str;
4232
4233 return SUCCESS;
4234 }
4235
4236 return FAIL;
4237 }
4238
4239 /* Shift operands. */
4240 enum shift_kind
4241 {
4242 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4243 };
4244
4245 struct asm_shift_name
4246 {
4247 const char *name;
4248 enum shift_kind kind;
4249 };
4250
4251 /* Third argument to parse_shift. */
4252 enum parse_shift_mode
4253 {
4254 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4255 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4256 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4257 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4258 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4259 };
4260
4261 /* Parse a <shift> specifier on an ARM data processing instruction.
4262 This has three forms:
4263
4264 (LSL|LSR|ASL|ASR|ROR) Rs
4265 (LSL|LSR|ASL|ASR|ROR) #imm
4266 RRX
4267
4268 Note that ASL is assimilated to LSL in the instruction encoding, and
4269 RRX to ROR #0 (which cannot be written as such). */
4270
4271 static int
4272 parse_shift (char **str, int i, enum parse_shift_mode mode)
4273 {
4274 const struct asm_shift_name *shift_name;
4275 enum shift_kind shift;
4276 char *s = *str;
4277 char *p = s;
4278 int reg;
4279
4280 for (p = *str; ISALPHA (*p); p++)
4281 ;
4282
4283 if (p == *str)
4284 {
4285 inst.error = _("shift expression expected");
4286 return FAIL;
4287 }
4288
4289 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4290
4291 if (shift_name == NULL)
4292 {
4293 inst.error = _("shift expression expected");
4294 return FAIL;
4295 }
4296
4297 shift = shift_name->kind;
4298
4299 switch (mode)
4300 {
4301 case NO_SHIFT_RESTRICT:
4302 case SHIFT_IMMEDIATE: break;
4303
4304 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4305 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4306 {
4307 inst.error = _("'LSL' or 'ASR' required");
4308 return FAIL;
4309 }
4310 break;
4311
4312 case SHIFT_LSL_IMMEDIATE:
4313 if (shift != SHIFT_LSL)
4314 {
4315 inst.error = _("'LSL' required");
4316 return FAIL;
4317 }
4318 break;
4319
4320 case SHIFT_ASR_IMMEDIATE:
4321 if (shift != SHIFT_ASR)
4322 {
4323 inst.error = _("'ASR' required");
4324 return FAIL;
4325 }
4326 break;
4327
4328 default: abort ();
4329 }
4330
4331 if (shift != SHIFT_RRX)
4332 {
4333 /* Whitespace can appear here if the next thing is a bare digit. */
4334 skip_whitespace (p);
4335
4336 if (mode == NO_SHIFT_RESTRICT
4337 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4338 {
4339 inst.operands[i].imm = reg;
4340 inst.operands[i].immisreg = 1;
4341 }
4342 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4343 return FAIL;
4344 }
4345 inst.operands[i].shift_kind = shift;
4346 inst.operands[i].shifted = 1;
4347 *str = p;
4348 return SUCCESS;
4349 }
4350
4351 /* Parse a <shifter_operand> for an ARM data processing instruction:
4352
4353 #<immediate>
4354 #<immediate>, <rotate>
4355 <Rm>
4356 <Rm>, <shift>
4357
4358 where <shift> is defined by parse_shift above, and <rotate> is a
4359 multiple of 2 between 0 and 30. Validation of immediate operands
4360 is deferred to md_apply_fix. */
4361
4362 static int
4363 parse_shifter_operand (char **str, int i)
4364 {
4365 int value;
4366 expressionS expr;
4367
4368 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4369 {
4370 inst.operands[i].reg = value;
4371 inst.operands[i].isreg = 1;
4372
4373 /* parse_shift will override this if appropriate */
4374 inst.reloc.exp.X_op = O_constant;
4375 inst.reloc.exp.X_add_number = 0;
4376
4377 if (skip_past_comma (str) == FAIL)
4378 return SUCCESS;
4379
4380 /* Shift operation on register. */
4381 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4382 }
4383
4384 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4385 return FAIL;
4386
4387 if (skip_past_comma (str) == SUCCESS)
4388 {
4389 /* #x, y -- ie explicit rotation by Y. */
4390 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4391 return FAIL;
4392
4393 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4394 {
4395 inst.error = _("constant expression expected");
4396 return FAIL;
4397 }
4398
4399 value = expr.X_add_number;
4400 if (value < 0 || value > 30 || value % 2 != 0)
4401 {
4402 inst.error = _("invalid rotation");
4403 return FAIL;
4404 }
4405 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4406 {
4407 inst.error = _("invalid constant");
4408 return FAIL;
4409 }
4410
4411 /* Convert to decoded value. md_apply_fix will put it back. */
4412 inst.reloc.exp.X_add_number
4413 = (((inst.reloc.exp.X_add_number << (32 - value))
4414 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4415 }
4416
4417 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4418 inst.reloc.pc_rel = 0;
4419 return SUCCESS;
4420 }
4421
4422 /* Group relocation information. Each entry in the table contains the
4423 textual name of the relocation as may appear in assembler source
4424 and must end with a colon.
4425 Along with this textual name are the relocation codes to be used if
4426 the corresponding instruction is an ALU instruction (ADD or SUB only),
4427 an LDR, an LDRS, or an LDC. */
4428
4429 struct group_reloc_table_entry
4430 {
4431 const char *name;
4432 int alu_code;
4433 int ldr_code;
4434 int ldrs_code;
4435 int ldc_code;
4436 };
4437
4438 typedef enum
4439 {
4440 /* Varieties of non-ALU group relocation. */
4441
4442 GROUP_LDR,
4443 GROUP_LDRS,
4444 GROUP_LDC
4445 } group_reloc_type;
4446
4447 static struct group_reloc_table_entry group_reloc_table[] =
4448 { /* Program counter relative: */
4449 { "pc_g0_nc",
4450 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4451 0, /* LDR */
4452 0, /* LDRS */
4453 0 }, /* LDC */
4454 { "pc_g0",
4455 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4456 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4457 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4458 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4459 { "pc_g1_nc",
4460 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4461 0, /* LDR */
4462 0, /* LDRS */
4463 0 }, /* LDC */
4464 { "pc_g1",
4465 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4466 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4467 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4468 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4469 { "pc_g2",
4470 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4471 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4472 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4473 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4474 /* Section base relative */
4475 { "sb_g0_nc",
4476 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4477 0, /* LDR */
4478 0, /* LDRS */
4479 0 }, /* LDC */
4480 { "sb_g0",
4481 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4482 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4483 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4484 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4485 { "sb_g1_nc",
4486 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4487 0, /* LDR */
4488 0, /* LDRS */
4489 0 }, /* LDC */
4490 { "sb_g1",
4491 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4492 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4493 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4494 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4495 { "sb_g2",
4496 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4497 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4498 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4499 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4500
4501 /* Given the address of a pointer pointing to the textual name of a group
4502 relocation as may appear in assembler source, attempt to find its details
4503 in group_reloc_table. The pointer will be updated to the character after
4504 the trailing colon. On failure, FAIL will be returned; SUCCESS
4505 otherwise. On success, *entry will be updated to point at the relevant
4506 group_reloc_table entry. */
4507
4508 static int
4509 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4510 {
4511 unsigned int i;
4512 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4513 {
4514 int length = strlen (group_reloc_table[i].name);
4515
4516 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4517 && (*str)[length] == ':')
4518 {
4519 *out = &group_reloc_table[i];
4520 *str += (length + 1);
4521 return SUCCESS;
4522 }
4523 }
4524
4525 return FAIL;
4526 }
4527
4528 /* Parse a <shifter_operand> for an ARM data processing instruction
4529 (as for parse_shifter_operand) where group relocations are allowed:
4530
4531 #<immediate>
4532 #<immediate>, <rotate>
4533 #:<group_reloc>:<expression>
4534 <Rm>
4535 <Rm>, <shift>
4536
4537 where <group_reloc> is one of the strings defined in group_reloc_table.
4538 The hashes are optional.
4539
4540 Everything else is as for parse_shifter_operand. */
4541
4542 static parse_operand_result
4543 parse_shifter_operand_group_reloc (char **str, int i)
4544 {
4545 /* Determine if we have the sequence of characters #: or just :
4546 coming next. If we do, then we check for a group relocation.
4547 If we don't, punt the whole lot to parse_shifter_operand. */
4548
4549 if (((*str)[0] == '#' && (*str)[1] == ':')
4550 || (*str)[0] == ':')
4551 {
4552 struct group_reloc_table_entry *entry;
4553
4554 if ((*str)[0] == '#')
4555 (*str) += 2;
4556 else
4557 (*str)++;
4558
4559 /* Try to parse a group relocation. Anything else is an error. */
4560 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4561 {
4562 inst.error = _("unknown group relocation");
4563 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4564 }
4565
4566 /* We now have the group relocation table entry corresponding to
4567 the name in the assembler source. Next, we parse the expression. */
4568 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4569 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4570
4571 /* Record the relocation type (always the ALU variant here). */
4572 inst.reloc.type = entry->alu_code;
4573 assert (inst.reloc.type != 0);
4574
4575 return PARSE_OPERAND_SUCCESS;
4576 }
4577 else
4578 return parse_shifter_operand (str, i) == SUCCESS
4579 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4580
4581 /* Never reached. */
4582 }
4583
4584 /* Parse all forms of an ARM address expression. Information is written
4585 to inst.operands[i] and/or inst.reloc.
4586
4587 Preindexed addressing (.preind=1):
4588
4589 [Rn, #offset] .reg=Rn .reloc.exp=offset
4590 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4591 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4592 .shift_kind=shift .reloc.exp=shift_imm
4593
4594 These three may have a trailing ! which causes .writeback to be set also.
4595
4596 Postindexed addressing (.postind=1, .writeback=1):
4597
4598 [Rn], #offset .reg=Rn .reloc.exp=offset
4599 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4600 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4601 .shift_kind=shift .reloc.exp=shift_imm
4602
4603 Unindexed addressing (.preind=0, .postind=0):
4604
4605 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4606
4607 Other:
4608
4609 [Rn]{!} shorthand for [Rn,#0]{!}
4610 =immediate .isreg=0 .reloc.exp=immediate
4611 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4612
4613 It is the caller's responsibility to check for addressing modes not
4614 supported by the instruction, and to set inst.reloc.type. */
4615
4616 static parse_operand_result
4617 parse_address_main (char **str, int i, int group_relocations,
4618 group_reloc_type group_type)
4619 {
4620 char *p = *str;
4621 int reg;
4622
4623 if (skip_past_char (&p, '[') == FAIL)
4624 {
4625 if (skip_past_char (&p, '=') == FAIL)
4626 {
4627 /* bare address - translate to PC-relative offset */
4628 inst.reloc.pc_rel = 1;
4629 inst.operands[i].reg = REG_PC;
4630 inst.operands[i].isreg = 1;
4631 inst.operands[i].preind = 1;
4632 }
4633 /* else a load-constant pseudo op, no special treatment needed here */
4634
4635 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4636 return PARSE_OPERAND_FAIL;
4637
4638 *str = p;
4639 return PARSE_OPERAND_SUCCESS;
4640 }
4641
4642 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4643 {
4644 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4645 return PARSE_OPERAND_FAIL;
4646 }
4647 inst.operands[i].reg = reg;
4648 inst.operands[i].isreg = 1;
4649
4650 if (skip_past_comma (&p) == SUCCESS)
4651 {
4652 inst.operands[i].preind = 1;
4653
4654 if (*p == '+') p++;
4655 else if (*p == '-') p++, inst.operands[i].negative = 1;
4656
4657 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4658 {
4659 inst.operands[i].imm = reg;
4660 inst.operands[i].immisreg = 1;
4661
4662 if (skip_past_comma (&p) == SUCCESS)
4663 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4664 return PARSE_OPERAND_FAIL;
4665 }
4666 else if (skip_past_char (&p, ':') == SUCCESS)
4667 {
4668 /* FIXME: '@' should be used here, but it's filtered out by generic
4669 code before we get to see it here. This may be subject to
4670 change. */
4671 expressionS exp;
4672 my_get_expression (&exp, &p, GE_NO_PREFIX);
4673 if (exp.X_op != O_constant)
4674 {
4675 inst.error = _("alignment must be constant");
4676 return PARSE_OPERAND_FAIL;
4677 }
4678 inst.operands[i].imm = exp.X_add_number << 8;
4679 inst.operands[i].immisalign = 1;
4680 /* Alignments are not pre-indexes. */
4681 inst.operands[i].preind = 0;
4682 }
4683 else
4684 {
4685 if (inst.operands[i].negative)
4686 {
4687 inst.operands[i].negative = 0;
4688 p--;
4689 }
4690
4691 if (group_relocations
4692 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4693 {
4694 struct group_reloc_table_entry *entry;
4695
4696 /* Skip over the #: or : sequence. */
4697 if (*p == '#')
4698 p += 2;
4699 else
4700 p++;
4701
4702 /* Try to parse a group relocation. Anything else is an
4703 error. */
4704 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4705 {
4706 inst.error = _("unknown group relocation");
4707 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4708 }
4709
4710 /* We now have the group relocation table entry corresponding to
4711 the name in the assembler source. Next, we parse the
4712 expression. */
4713 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4714 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4715
4716 /* Record the relocation type. */
4717 switch (group_type)
4718 {
4719 case GROUP_LDR:
4720 inst.reloc.type = entry->ldr_code;
4721 break;
4722
4723 case GROUP_LDRS:
4724 inst.reloc.type = entry->ldrs_code;
4725 break;
4726
4727 case GROUP_LDC:
4728 inst.reloc.type = entry->ldc_code;
4729 break;
4730
4731 default:
4732 assert (0);
4733 }
4734
4735 if (inst.reloc.type == 0)
4736 {
4737 inst.error = _("this group relocation is not allowed on this instruction");
4738 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4739 }
4740 }
4741 else
4742 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4743 return PARSE_OPERAND_FAIL;
4744 }
4745 }
4746
4747 if (skip_past_char (&p, ']') == FAIL)
4748 {
4749 inst.error = _("']' expected");
4750 return PARSE_OPERAND_FAIL;
4751 }
4752
4753 if (skip_past_char (&p, '!') == SUCCESS)
4754 inst.operands[i].writeback = 1;
4755
4756 else if (skip_past_comma (&p) == SUCCESS)
4757 {
4758 if (skip_past_char (&p, '{') == SUCCESS)
4759 {
4760 /* [Rn], {expr} - unindexed, with option */
4761 if (parse_immediate (&p, &inst.operands[i].imm,
4762 0, 255, TRUE) == FAIL)
4763 return PARSE_OPERAND_FAIL;
4764
4765 if (skip_past_char (&p, '}') == FAIL)
4766 {
4767 inst.error = _("'}' expected at end of 'option' field");
4768 return PARSE_OPERAND_FAIL;
4769 }
4770 if (inst.operands[i].preind)
4771 {
4772 inst.error = _("cannot combine index with option");
4773 return PARSE_OPERAND_FAIL;
4774 }
4775 *str = p;
4776 return PARSE_OPERAND_SUCCESS;
4777 }
4778 else
4779 {
4780 inst.operands[i].postind = 1;
4781 inst.operands[i].writeback = 1;
4782
4783 if (inst.operands[i].preind)
4784 {
4785 inst.error = _("cannot combine pre- and post-indexing");
4786 return PARSE_OPERAND_FAIL;
4787 }
4788
4789 if (*p == '+') p++;
4790 else if (*p == '-') p++, inst.operands[i].negative = 1;
4791
4792 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4793 {
4794 /* We might be using the immediate for alignment already. If we
4795 are, OR the register number into the low-order bits. */
4796 if (inst.operands[i].immisalign)
4797 inst.operands[i].imm |= reg;
4798 else
4799 inst.operands[i].imm = reg;
4800 inst.operands[i].immisreg = 1;
4801
4802 if (skip_past_comma (&p) == SUCCESS)
4803 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4804 return PARSE_OPERAND_FAIL;
4805 }
4806 else
4807 {
4808 if (inst.operands[i].negative)
4809 {
4810 inst.operands[i].negative = 0;
4811 p--;
4812 }
4813 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4814 return PARSE_OPERAND_FAIL;
4815 }
4816 }
4817 }
4818
4819 /* If at this point neither .preind nor .postind is set, we have a
4820 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4821 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4822 {
4823 inst.operands[i].preind = 1;
4824 inst.reloc.exp.X_op = O_constant;
4825 inst.reloc.exp.X_add_number = 0;
4826 }
4827 *str = p;
4828 return PARSE_OPERAND_SUCCESS;
4829 }
4830
4831 static int
4832 parse_address (char **str, int i)
4833 {
4834 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4835 ? SUCCESS : FAIL;
4836 }
4837
4838 static parse_operand_result
4839 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4840 {
4841 return parse_address_main (str, i, 1, type);
4842 }
4843
4844 /* Parse an operand for a MOVW or MOVT instruction. */
4845 static int
4846 parse_half (char **str)
4847 {
4848 char * p;
4849
4850 p = *str;
4851 skip_past_char (&p, '#');
4852 if (strncasecmp (p, ":lower16:", 9) == 0)
4853 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4854 else if (strncasecmp (p, ":upper16:", 9) == 0)
4855 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4856
4857 if (inst.reloc.type != BFD_RELOC_UNUSED)
4858 {
4859 p += 9;
4860 skip_whitespace (p);
4861 }
4862
4863 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4864 return FAIL;
4865
4866 if (inst.reloc.type == BFD_RELOC_UNUSED)
4867 {
4868 if (inst.reloc.exp.X_op != O_constant)
4869 {
4870 inst.error = _("constant expression expected");
4871 return FAIL;
4872 }
4873 if (inst.reloc.exp.X_add_number < 0
4874 || inst.reloc.exp.X_add_number > 0xffff)
4875 {
4876 inst.error = _("immediate value out of range");
4877 return FAIL;
4878 }
4879 }
4880 *str = p;
4881 return SUCCESS;
4882 }
4883
4884 /* Miscellaneous. */
4885
4886 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4887 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4888 static int
4889 parse_psr (char **str)
4890 {
4891 char *p;
4892 unsigned long psr_field;
4893 const struct asm_psr *psr;
4894 char *start;
4895
4896 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4897 feature for ease of use and backwards compatibility. */
4898 p = *str;
4899 if (strncasecmp (p, "SPSR", 4) == 0)
4900 psr_field = SPSR_BIT;
4901 else if (strncasecmp (p, "CPSR", 4) == 0)
4902 psr_field = 0;
4903 else
4904 {
4905 start = p;
4906 do
4907 p++;
4908 while (ISALNUM (*p) || *p == '_');
4909
4910 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4911 if (!psr)
4912 return FAIL;
4913
4914 *str = p;
4915 return psr->field;
4916 }
4917
4918 p += 4;
4919 if (*p == '_')
4920 {
4921 /* A suffix follows. */
4922 p++;
4923 start = p;
4924
4925 do
4926 p++;
4927 while (ISALNUM (*p) || *p == '_');
4928
4929 psr = hash_find_n (arm_psr_hsh, start, p - start);
4930 if (!psr)
4931 goto error;
4932
4933 psr_field |= psr->field;
4934 }
4935 else
4936 {
4937 if (ISALNUM (*p))
4938 goto error; /* Garbage after "[CS]PSR". */
4939
4940 psr_field |= (PSR_c | PSR_f);
4941 }
4942 *str = p;
4943 return psr_field;
4944
4945 error:
4946 inst.error = _("flag for {c}psr instruction expected");
4947 return FAIL;
4948 }
4949
4950 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4951 value suitable for splatting into the AIF field of the instruction. */
4952
4953 static int
4954 parse_cps_flags (char **str)
4955 {
4956 int val = 0;
4957 int saw_a_flag = 0;
4958 char *s = *str;
4959
4960 for (;;)
4961 switch (*s++)
4962 {
4963 case '\0': case ',':
4964 goto done;
4965
4966 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4967 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4968 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4969
4970 default:
4971 inst.error = _("unrecognized CPS flag");
4972 return FAIL;
4973 }
4974
4975 done:
4976 if (saw_a_flag == 0)
4977 {
4978 inst.error = _("missing CPS flags");
4979 return FAIL;
4980 }
4981
4982 *str = s - 1;
4983 return val;
4984 }
4985
4986 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4987 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4988
4989 static int
4990 parse_endian_specifier (char **str)
4991 {
4992 int little_endian;
4993 char *s = *str;
4994
4995 if (strncasecmp (s, "BE", 2))
4996 little_endian = 0;
4997 else if (strncasecmp (s, "LE", 2))
4998 little_endian = 1;
4999 else
5000 {
5001 inst.error = _("valid endian specifiers are be or le");
5002 return FAIL;
5003 }
5004
5005 if (ISALNUM (s[2]) || s[2] == '_')
5006 {
5007 inst.error = _("valid endian specifiers are be or le");
5008 return FAIL;
5009 }
5010
5011 *str = s + 2;
5012 return little_endian;
5013 }
5014
5015 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5016 value suitable for poking into the rotate field of an sxt or sxta
5017 instruction, or FAIL on error. */
5018
5019 static int
5020 parse_ror (char **str)
5021 {
5022 int rot;
5023 char *s = *str;
5024
5025 if (strncasecmp (s, "ROR", 3) == 0)
5026 s += 3;
5027 else
5028 {
5029 inst.error = _("missing rotation field after comma");
5030 return FAIL;
5031 }
5032
5033 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5034 return FAIL;
5035
5036 switch (rot)
5037 {
5038 case 0: *str = s; return 0x0;
5039 case 8: *str = s; return 0x1;
5040 case 16: *str = s; return 0x2;
5041 case 24: *str = s; return 0x3;
5042
5043 default:
5044 inst.error = _("rotation can only be 0, 8, 16, or 24");
5045 return FAIL;
5046 }
5047 }
5048
5049 /* Parse a conditional code (from conds[] below). The value returned is in the
5050 range 0 .. 14, or FAIL. */
5051 static int
5052 parse_cond (char **str)
5053 {
5054 char *q;
5055 const struct asm_cond *c;
5056 int n;
5057 /* Condition codes are always 2 characters, so matching up to
5058 3 characters is sufficient. */
5059 char cond[3];
5060
5061 q = *str;
5062 n = 0;
5063 while (ISALPHA (*q) && n < 3)
5064 {
5065 cond[n] = TOLOWER(*q);
5066 q++;
5067 n++;
5068 }
5069
5070 c = hash_find_n (arm_cond_hsh, cond, n);
5071 if (!c)
5072 {
5073 inst.error = _("condition required");
5074 return FAIL;
5075 }
5076
5077 *str = q;
5078 return c->value;
5079 }
5080
5081 /* Parse an option for a barrier instruction. Returns the encoding for the
5082 option, or FAIL. */
5083 static int
5084 parse_barrier (char **str)
5085 {
5086 char *p, *q;
5087 const struct asm_barrier_opt *o;
5088
5089 p = q = *str;
5090 while (ISALPHA (*q))
5091 q++;
5092
5093 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5094 if (!o)
5095 return FAIL;
5096
5097 *str = q;
5098 return o->value;
5099 }
5100
5101 /* Parse the operands of a table branch instruction. Similar to a memory
5102 operand. */
5103 static int
5104 parse_tb (char **str)
5105 {
5106 char * p = *str;
5107 int reg;
5108
5109 if (skip_past_char (&p, '[') == FAIL)
5110 {
5111 inst.error = _("'[' expected");
5112 return FAIL;
5113 }
5114
5115 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5116 {
5117 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5118 return FAIL;
5119 }
5120 inst.operands[0].reg = reg;
5121
5122 if (skip_past_comma (&p) == FAIL)
5123 {
5124 inst.error = _("',' expected");
5125 return FAIL;
5126 }
5127
5128 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5129 {
5130 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5131 return FAIL;
5132 }
5133 inst.operands[0].imm = reg;
5134
5135 if (skip_past_comma (&p) == SUCCESS)
5136 {
5137 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5138 return FAIL;
5139 if (inst.reloc.exp.X_add_number != 1)
5140 {
5141 inst.error = _("invalid shift");
5142 return FAIL;
5143 }
5144 inst.operands[0].shifted = 1;
5145 }
5146
5147 if (skip_past_char (&p, ']') == FAIL)
5148 {
5149 inst.error = _("']' expected");
5150 return FAIL;
5151 }
5152 *str = p;
5153 return SUCCESS;
5154 }
5155
5156 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5157 information on the types the operands can take and how they are encoded.
5158 Up to four operands may be read; this function handles setting the
5159 ".present" field for each read operand itself.
5160 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5161 else returns FAIL. */
5162
5163 static int
5164 parse_neon_mov (char **str, int *which_operand)
5165 {
5166 int i = *which_operand, val;
5167 enum arm_reg_type rtype;
5168 char *ptr = *str;
5169 struct neon_type_el optype;
5170
5171 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5172 {
5173 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5174 inst.operands[i].reg = val;
5175 inst.operands[i].isscalar = 1;
5176 inst.operands[i].vectype = optype;
5177 inst.operands[i++].present = 1;
5178
5179 if (skip_past_comma (&ptr) == FAIL)
5180 goto wanted_comma;
5181
5182 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5183 goto wanted_arm;
5184
5185 inst.operands[i].reg = val;
5186 inst.operands[i].isreg = 1;
5187 inst.operands[i].present = 1;
5188 }
5189 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5190 != FAIL)
5191 {
5192 /* Cases 0, 1, 2, 3, 5 (D only). */
5193 if (skip_past_comma (&ptr) == FAIL)
5194 goto wanted_comma;
5195
5196 inst.operands[i].reg = val;
5197 inst.operands[i].isreg = 1;
5198 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5199 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5200 inst.operands[i].isvec = 1;
5201 inst.operands[i].vectype = optype;
5202 inst.operands[i++].present = 1;
5203
5204 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5205 {
5206 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5207 Case 13: VMOV <Sd>, <Rm> */
5208 inst.operands[i].reg = val;
5209 inst.operands[i].isreg = 1;
5210 inst.operands[i].present = 1;
5211
5212 if (rtype == REG_TYPE_NQ)
5213 {
5214 first_error (_("can't use Neon quad register here"));
5215 return FAIL;
5216 }
5217 else if (rtype != REG_TYPE_VFS)
5218 {
5219 i++;
5220 if (skip_past_comma (&ptr) == FAIL)
5221 goto wanted_comma;
5222 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5223 goto wanted_arm;
5224 inst.operands[i].reg = val;
5225 inst.operands[i].isreg = 1;
5226 inst.operands[i].present = 1;
5227 }
5228 }
5229 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5230 &optype)) != FAIL)
5231 {
5232 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5233 Case 1: VMOV<c><q> <Dd>, <Dm>
5234 Case 8: VMOV.F32 <Sd>, <Sm>
5235 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5236
5237 inst.operands[i].reg = val;
5238 inst.operands[i].isreg = 1;
5239 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5240 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5241 inst.operands[i].isvec = 1;
5242 inst.operands[i].vectype = optype;
5243 inst.operands[i].present = 1;
5244
5245 if (skip_past_comma (&ptr) == SUCCESS)
5246 {
5247 /* Case 15. */
5248 i++;
5249
5250 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5251 goto wanted_arm;
5252
5253 inst.operands[i].reg = val;
5254 inst.operands[i].isreg = 1;
5255 inst.operands[i++].present = 1;
5256
5257 if (skip_past_comma (&ptr) == FAIL)
5258 goto wanted_comma;
5259
5260 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5261 goto wanted_arm;
5262
5263 inst.operands[i].reg = val;
5264 inst.operands[i].isreg = 1;
5265 inst.operands[i++].present = 1;
5266 }
5267 }
5268 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5269 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5270 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5271 Case 10: VMOV.F32 <Sd>, #<imm>
5272 Case 11: VMOV.F64 <Dd>, #<imm> */
5273 inst.operands[i].immisfloat = 1;
5274 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5275 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5276 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5277 ;
5278 else
5279 {
5280 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5281 return FAIL;
5282 }
5283 }
5284 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5285 {
5286 /* Cases 6, 7. */
5287 inst.operands[i].reg = val;
5288 inst.operands[i].isreg = 1;
5289 inst.operands[i++].present = 1;
5290
5291 if (skip_past_comma (&ptr) == FAIL)
5292 goto wanted_comma;
5293
5294 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5295 {
5296 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5297 inst.operands[i].reg = val;
5298 inst.operands[i].isscalar = 1;
5299 inst.operands[i].present = 1;
5300 inst.operands[i].vectype = optype;
5301 }
5302 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5303 {
5304 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5305 inst.operands[i].reg = val;
5306 inst.operands[i].isreg = 1;
5307 inst.operands[i++].present = 1;
5308
5309 if (skip_past_comma (&ptr) == FAIL)
5310 goto wanted_comma;
5311
5312 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5313 == FAIL)
5314 {
5315 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5316 return FAIL;
5317 }
5318
5319 inst.operands[i].reg = val;
5320 inst.operands[i].isreg = 1;
5321 inst.operands[i].isvec = 1;
5322 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5323 inst.operands[i].vectype = optype;
5324 inst.operands[i].present = 1;
5325
5326 if (rtype == REG_TYPE_VFS)
5327 {
5328 /* Case 14. */
5329 i++;
5330 if (skip_past_comma (&ptr) == FAIL)
5331 goto wanted_comma;
5332 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5333 &optype)) == FAIL)
5334 {
5335 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5336 return FAIL;
5337 }
5338 inst.operands[i].reg = val;
5339 inst.operands[i].isreg = 1;
5340 inst.operands[i].isvec = 1;
5341 inst.operands[i].issingle = 1;
5342 inst.operands[i].vectype = optype;
5343 inst.operands[i].present = 1;
5344 }
5345 }
5346 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5347 != FAIL)
5348 {
5349 /* Case 13. */
5350 inst.operands[i].reg = val;
5351 inst.operands[i].isreg = 1;
5352 inst.operands[i].isvec = 1;
5353 inst.operands[i].issingle = 1;
5354 inst.operands[i].vectype = optype;
5355 inst.operands[i++].present = 1;
5356 }
5357 }
5358 else
5359 {
5360 first_error (_("parse error"));
5361 return FAIL;
5362 }
5363
5364 /* Successfully parsed the operands. Update args. */
5365 *which_operand = i;
5366 *str = ptr;
5367 return SUCCESS;
5368
5369 wanted_comma:
5370 first_error (_("expected comma"));
5371 return FAIL;
5372
5373 wanted_arm:
5374 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5375 return FAIL;
5376 }
5377
5378 /* Matcher codes for parse_operands. */
5379 enum operand_parse_code
5380 {
5381 OP_stop, /* end of line */
5382
5383 OP_RR, /* ARM register */
5384 OP_RRnpc, /* ARM register, not r15 */
5385 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5386 OP_RRw, /* ARM register, not r15, optional trailing ! */
5387 OP_RCP, /* Coprocessor number */
5388 OP_RCN, /* Coprocessor register */
5389 OP_RF, /* FPA register */
5390 OP_RVS, /* VFP single precision register */
5391 OP_RVD, /* VFP double precision register (0..15) */
5392 OP_RND, /* Neon double precision register (0..31) */
5393 OP_RNQ, /* Neon quad precision register */
5394 OP_RVSD, /* VFP single or double precision register */
5395 OP_RNDQ, /* Neon double or quad precision register */
5396 OP_RNSDQ, /* Neon single, double or quad precision register */
5397 OP_RNSC, /* Neon scalar D[X] */
5398 OP_RVC, /* VFP control register */
5399 OP_RMF, /* Maverick F register */
5400 OP_RMD, /* Maverick D register */
5401 OP_RMFX, /* Maverick FX register */
5402 OP_RMDX, /* Maverick DX register */
5403 OP_RMAX, /* Maverick AX register */
5404 OP_RMDS, /* Maverick DSPSC register */
5405 OP_RIWR, /* iWMMXt wR register */
5406 OP_RIWC, /* iWMMXt wC register */
5407 OP_RIWG, /* iWMMXt wCG register */
5408 OP_RXA, /* XScale accumulator register */
5409
5410 OP_REGLST, /* ARM register list */
5411 OP_VRSLST, /* VFP single-precision register list */
5412 OP_VRDLST, /* VFP double-precision register list */
5413 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5414 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5415 OP_NSTRLST, /* Neon element/structure list */
5416
5417 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5418 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5419 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5420 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5421 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5422 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5423 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5424 OP_VMOV, /* Neon VMOV operands. */
5425 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5426 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5427 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5428
5429 OP_I0, /* immediate zero */
5430 OP_I7, /* immediate value 0 .. 7 */
5431 OP_I15, /* 0 .. 15 */
5432 OP_I16, /* 1 .. 16 */
5433 OP_I16z, /* 0 .. 16 */
5434 OP_I31, /* 0 .. 31 */
5435 OP_I31w, /* 0 .. 31, optional trailing ! */
5436 OP_I32, /* 1 .. 32 */
5437 OP_I32z, /* 0 .. 32 */
5438 OP_I63, /* 0 .. 63 */
5439 OP_I63s, /* -64 .. 63 */
5440 OP_I64, /* 1 .. 64 */
5441 OP_I64z, /* 0 .. 64 */
5442 OP_I255, /* 0 .. 255 */
5443
5444 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5445 OP_I7b, /* 0 .. 7 */
5446 OP_I15b, /* 0 .. 15 */
5447 OP_I31b, /* 0 .. 31 */
5448
5449 OP_SH, /* shifter operand */
5450 OP_SHG, /* shifter operand with possible group relocation */
5451 OP_ADDR, /* Memory address expression (any mode) */
5452 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5453 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5454 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5455 OP_EXP, /* arbitrary expression */
5456 OP_EXPi, /* same, with optional immediate prefix */
5457 OP_EXPr, /* same, with optional relocation suffix */
5458 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5459
5460 OP_CPSF, /* CPS flags */
5461 OP_ENDI, /* Endianness specifier */
5462 OP_PSR, /* CPSR/SPSR mask for msr */
5463 OP_COND, /* conditional code */
5464 OP_TB, /* Table branch. */
5465
5466 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5467 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5468
5469 OP_RRnpc_I0, /* ARM register or literal 0 */
5470 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5471 OP_RR_EXi, /* ARM register or expression with imm prefix */
5472 OP_RF_IF, /* FPA register or immediate */
5473 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5474 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5475
5476 /* Optional operands. */
5477 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5478 OP_oI31b, /* 0 .. 31 */
5479 OP_oI32b, /* 1 .. 32 */
5480 OP_oIffffb, /* 0 .. 65535 */
5481 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5482
5483 OP_oRR, /* ARM register */
5484 OP_oRRnpc, /* ARM register, not the PC */
5485 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5486 OP_oRND, /* Optional Neon double precision register */
5487 OP_oRNQ, /* Optional Neon quad precision register */
5488 OP_oRNDQ, /* Optional Neon double or quad precision register */
5489 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5490 OP_oSHll, /* LSL immediate */
5491 OP_oSHar, /* ASR immediate */
5492 OP_oSHllar, /* LSL or ASR immediate */
5493 OP_oROR, /* ROR 0/8/16/24 */
5494 OP_oBARRIER, /* Option argument for a barrier instruction. */
5495
5496 OP_FIRST_OPTIONAL = OP_oI7b
5497 };
5498
5499 /* Generic instruction operand parser. This does no encoding and no
5500 semantic validation; it merely squirrels values away in the inst
5501 structure. Returns SUCCESS or FAIL depending on whether the
5502 specified grammar matched. */
5503 static int
5504 parse_operands (char *str, const unsigned char *pattern)
5505 {
5506 unsigned const char *upat = pattern;
5507 char *backtrack_pos = 0;
5508 const char *backtrack_error = 0;
5509 int i, val, backtrack_index = 0;
5510 enum arm_reg_type rtype;
5511 parse_operand_result result;
5512
5513 #define po_char_or_fail(chr) do { \
5514 if (skip_past_char (&str, chr) == FAIL) \
5515 goto bad_args; \
5516 } while (0)
5517
5518 #define po_reg_or_fail(regtype) do { \
5519 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5520 &inst.operands[i].vectype); \
5521 if (val == FAIL) \
5522 { \
5523 first_error (_(reg_expected_msgs[regtype])); \
5524 goto failure; \
5525 } \
5526 inst.operands[i].reg = val; \
5527 inst.operands[i].isreg = 1; \
5528 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5529 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5530 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5531 || rtype == REG_TYPE_VFD \
5532 || rtype == REG_TYPE_NQ); \
5533 } while (0)
5534
5535 #define po_reg_or_goto(regtype, label) do { \
5536 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5537 &inst.operands[i].vectype); \
5538 if (val == FAIL) \
5539 goto label; \
5540 \
5541 inst.operands[i].reg = val; \
5542 inst.operands[i].isreg = 1; \
5543 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5544 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5545 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5546 || rtype == REG_TYPE_VFD \
5547 || rtype == REG_TYPE_NQ); \
5548 } while (0)
5549
5550 #define po_imm_or_fail(min, max, popt) do { \
5551 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5552 goto failure; \
5553 inst.operands[i].imm = val; \
5554 } while (0)
5555
5556 #define po_scalar_or_goto(elsz, label) do { \
5557 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5558 if (val == FAIL) \
5559 goto label; \
5560 inst.operands[i].reg = val; \
5561 inst.operands[i].isscalar = 1; \
5562 } while (0)
5563
5564 #define po_misc_or_fail(expr) do { \
5565 if (expr) \
5566 goto failure; \
5567 } while (0)
5568
5569 #define po_misc_or_fail_no_backtrack(expr) do { \
5570 result = expr; \
5571 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5572 backtrack_pos = 0; \
5573 if (result != PARSE_OPERAND_SUCCESS) \
5574 goto failure; \
5575 } while (0)
5576
5577 skip_whitespace (str);
5578
5579 for (i = 0; upat[i] != OP_stop; i++)
5580 {
5581 if (upat[i] >= OP_FIRST_OPTIONAL)
5582 {
5583 /* Remember where we are in case we need to backtrack. */
5584 assert (!backtrack_pos);
5585 backtrack_pos = str;
5586 backtrack_error = inst.error;
5587 backtrack_index = i;
5588 }
5589
5590 if (i > 0 && (i > 1 || inst.operands[0].present))
5591 po_char_or_fail (',');
5592
5593 switch (upat[i])
5594 {
5595 /* Registers */
5596 case OP_oRRnpc:
5597 case OP_RRnpc:
5598 case OP_oRR:
5599 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5600 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5601 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5602 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5603 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5604 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5605 case OP_oRND:
5606 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5607 case OP_RVC:
5608 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5609 break;
5610 /* Also accept generic coprocessor regs for unknown registers. */
5611 coproc_reg:
5612 po_reg_or_fail (REG_TYPE_CN);
5613 break;
5614 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5615 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5616 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5617 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5618 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5619 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5620 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5621 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5622 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5623 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5624 case OP_oRNQ:
5625 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5626 case OP_oRNDQ:
5627 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5628 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5629 case OP_oRNSDQ:
5630 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5631
5632 /* Neon scalar. Using an element size of 8 means that some invalid
5633 scalars are accepted here, so deal with those in later code. */
5634 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5635
5636 /* WARNING: We can expand to two operands here. This has the potential
5637 to totally confuse the backtracking mechanism! It will be OK at
5638 least as long as we don't try to use optional args as well,
5639 though. */
5640 case OP_NILO:
5641 {
5642 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5643 inst.operands[i].present = 1;
5644 i++;
5645 skip_past_comma (&str);
5646 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5647 break;
5648 one_reg_only:
5649 /* Optional register operand was omitted. Unfortunately, it's in
5650 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5651 here (this is a bit grotty). */
5652 inst.operands[i] = inst.operands[i-1];
5653 inst.operands[i-1].present = 0;
5654 break;
5655 try_imm:
5656 /* There's a possibility of getting a 64-bit immediate here, so
5657 we need special handling. */
5658 if (parse_big_immediate (&str, i) == FAIL)
5659 {
5660 inst.error = _("immediate value is out of range");
5661 goto failure;
5662 }
5663 }
5664 break;
5665
5666 case OP_RNDQ_I0:
5667 {
5668 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5669 break;
5670 try_imm0:
5671 po_imm_or_fail (0, 0, TRUE);
5672 }
5673 break;
5674
5675 case OP_RVSD_I0:
5676 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5677 break;
5678
5679 case OP_RR_RNSC:
5680 {
5681 po_scalar_or_goto (8, try_rr);
5682 break;
5683 try_rr:
5684 po_reg_or_fail (REG_TYPE_RN);
5685 }
5686 break;
5687
5688 case OP_RNSDQ_RNSC:
5689 {
5690 po_scalar_or_goto (8, try_nsdq);
5691 break;
5692 try_nsdq:
5693 po_reg_or_fail (REG_TYPE_NSDQ);
5694 }
5695 break;
5696
5697 case OP_RNDQ_RNSC:
5698 {
5699 po_scalar_or_goto (8, try_ndq);
5700 break;
5701 try_ndq:
5702 po_reg_or_fail (REG_TYPE_NDQ);
5703 }
5704 break;
5705
5706 case OP_RND_RNSC:
5707 {
5708 po_scalar_or_goto (8, try_vfd);
5709 break;
5710 try_vfd:
5711 po_reg_or_fail (REG_TYPE_VFD);
5712 }
5713 break;
5714
5715 case OP_VMOV:
5716 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5717 not careful then bad things might happen. */
5718 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5719 break;
5720
5721 case OP_RNDQ_IMVNb:
5722 {
5723 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5724 break;
5725 try_mvnimm:
5726 /* There's a possibility of getting a 64-bit immediate here, so
5727 we need special handling. */
5728 if (parse_big_immediate (&str, i) == FAIL)
5729 {
5730 inst.error = _("immediate value is out of range");
5731 goto failure;
5732 }
5733 }
5734 break;
5735
5736 case OP_RNDQ_I63b:
5737 {
5738 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5739 break;
5740 try_shimm:
5741 po_imm_or_fail (0, 63, TRUE);
5742 }
5743 break;
5744
5745 case OP_RRnpcb:
5746 po_char_or_fail ('[');
5747 po_reg_or_fail (REG_TYPE_RN);
5748 po_char_or_fail (']');
5749 break;
5750
5751 case OP_RRw:
5752 case OP_oRRw:
5753 po_reg_or_fail (REG_TYPE_RN);
5754 if (skip_past_char (&str, '!') == SUCCESS)
5755 inst.operands[i].writeback = 1;
5756 break;
5757
5758 /* Immediates */
5759 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5760 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5761 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5762 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5763 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5764 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5765 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5766 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5767 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5768 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5769 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5770 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5771
5772 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5773 case OP_oI7b:
5774 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5775 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5776 case OP_oI31b:
5777 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5778 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5779 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5780
5781 /* Immediate variants */
5782 case OP_oI255c:
5783 po_char_or_fail ('{');
5784 po_imm_or_fail (0, 255, TRUE);
5785 po_char_or_fail ('}');
5786 break;
5787
5788 case OP_I31w:
5789 /* The expression parser chokes on a trailing !, so we have
5790 to find it first and zap it. */
5791 {
5792 char *s = str;
5793 while (*s && *s != ',')
5794 s++;
5795 if (s[-1] == '!')
5796 {
5797 s[-1] = '\0';
5798 inst.operands[i].writeback = 1;
5799 }
5800 po_imm_or_fail (0, 31, TRUE);
5801 if (str == s - 1)
5802 str = s;
5803 }
5804 break;
5805
5806 /* Expressions */
5807 case OP_EXPi: EXPi:
5808 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5809 GE_OPT_PREFIX));
5810 break;
5811
5812 case OP_EXP:
5813 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5814 GE_NO_PREFIX));
5815 break;
5816
5817 case OP_EXPr: EXPr:
5818 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5819 GE_NO_PREFIX));
5820 if (inst.reloc.exp.X_op == O_symbol)
5821 {
5822 val = parse_reloc (&str);
5823 if (val == -1)
5824 {
5825 inst.error = _("unrecognized relocation suffix");
5826 goto failure;
5827 }
5828 else if (val != BFD_RELOC_UNUSED)
5829 {
5830 inst.operands[i].imm = val;
5831 inst.operands[i].hasreloc = 1;
5832 }
5833 }
5834 break;
5835
5836 /* Operand for MOVW or MOVT. */
5837 case OP_HALF:
5838 po_misc_or_fail (parse_half (&str));
5839 break;
5840
5841 /* Register or expression */
5842 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5843 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5844
5845 /* Register or immediate */
5846 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5847 I0: po_imm_or_fail (0, 0, FALSE); break;
5848
5849 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5850 IF:
5851 if (!is_immediate_prefix (*str))
5852 goto bad_args;
5853 str++;
5854 val = parse_fpa_immediate (&str);
5855 if (val == FAIL)
5856 goto failure;
5857 /* FPA immediates are encoded as registers 8-15.
5858 parse_fpa_immediate has already applied the offset. */
5859 inst.operands[i].reg = val;
5860 inst.operands[i].isreg = 1;
5861 break;
5862
5863 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5864 I32z: po_imm_or_fail (0, 32, FALSE); break;
5865
5866 /* Two kinds of register */
5867 case OP_RIWR_RIWC:
5868 {
5869 struct reg_entry *rege = arm_reg_parse_multi (&str);
5870 if (!rege
5871 || (rege->type != REG_TYPE_MMXWR
5872 && rege->type != REG_TYPE_MMXWC
5873 && rege->type != REG_TYPE_MMXWCG))
5874 {
5875 inst.error = _("iWMMXt data or control register expected");
5876 goto failure;
5877 }
5878 inst.operands[i].reg = rege->number;
5879 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5880 }
5881 break;
5882
5883 case OP_RIWC_RIWG:
5884 {
5885 struct reg_entry *rege = arm_reg_parse_multi (&str);
5886 if (!rege
5887 || (rege->type != REG_TYPE_MMXWC
5888 && rege->type != REG_TYPE_MMXWCG))
5889 {
5890 inst.error = _("iWMMXt control register expected");
5891 goto failure;
5892 }
5893 inst.operands[i].reg = rege->number;
5894 inst.operands[i].isreg = 1;
5895 }
5896 break;
5897
5898 /* Misc */
5899 case OP_CPSF: val = parse_cps_flags (&str); break;
5900 case OP_ENDI: val = parse_endian_specifier (&str); break;
5901 case OP_oROR: val = parse_ror (&str); break;
5902 case OP_PSR: val = parse_psr (&str); break;
5903 case OP_COND: val = parse_cond (&str); break;
5904 case OP_oBARRIER:val = parse_barrier (&str); break;
5905
5906 case OP_RVC_PSR:
5907 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5908 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5909 break;
5910 try_psr:
5911 val = parse_psr (&str);
5912 break;
5913
5914 case OP_APSR_RR:
5915 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5916 break;
5917 try_apsr:
5918 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5919 instruction). */
5920 if (strncasecmp (str, "APSR_", 5) == 0)
5921 {
5922 unsigned found = 0;
5923 str += 5;
5924 while (found < 15)
5925 switch (*str++)
5926 {
5927 case 'c': found = (found & 1) ? 16 : found | 1; break;
5928 case 'n': found = (found & 2) ? 16 : found | 2; break;
5929 case 'z': found = (found & 4) ? 16 : found | 4; break;
5930 case 'v': found = (found & 8) ? 16 : found | 8; break;
5931 default: found = 16;
5932 }
5933 if (found != 15)
5934 goto failure;
5935 inst.operands[i].isvec = 1;
5936 }
5937 else
5938 goto failure;
5939 break;
5940
5941 case OP_TB:
5942 po_misc_or_fail (parse_tb (&str));
5943 break;
5944
5945 /* Register lists */
5946 case OP_REGLST:
5947 val = parse_reg_list (&str);
5948 if (*str == '^')
5949 {
5950 inst.operands[1].writeback = 1;
5951 str++;
5952 }
5953 break;
5954
5955 case OP_VRSLST:
5956 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5957 break;
5958
5959 case OP_VRDLST:
5960 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5961 break;
5962
5963 case OP_VRSDLST:
5964 /* Allow Q registers too. */
5965 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5966 REGLIST_NEON_D);
5967 if (val == FAIL)
5968 {
5969 inst.error = NULL;
5970 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5971 REGLIST_VFP_S);
5972 inst.operands[i].issingle = 1;
5973 }
5974 break;
5975
5976 case OP_NRDLST:
5977 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5978 REGLIST_NEON_D);
5979 break;
5980
5981 case OP_NSTRLST:
5982 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5983 &inst.operands[i].vectype);
5984 break;
5985
5986 /* Addressing modes */
5987 case OP_ADDR:
5988 po_misc_or_fail (parse_address (&str, i));
5989 break;
5990
5991 case OP_ADDRGLDR:
5992 po_misc_or_fail_no_backtrack (
5993 parse_address_group_reloc (&str, i, GROUP_LDR));
5994 break;
5995
5996 case OP_ADDRGLDRS:
5997 po_misc_or_fail_no_backtrack (
5998 parse_address_group_reloc (&str, i, GROUP_LDRS));
5999 break;
6000
6001 case OP_ADDRGLDC:
6002 po_misc_or_fail_no_backtrack (
6003 parse_address_group_reloc (&str, i, GROUP_LDC));
6004 break;
6005
6006 case OP_SH:
6007 po_misc_or_fail (parse_shifter_operand (&str, i));
6008 break;
6009
6010 case OP_SHG:
6011 po_misc_or_fail_no_backtrack (
6012 parse_shifter_operand_group_reloc (&str, i));
6013 break;
6014
6015 case OP_oSHll:
6016 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6017 break;
6018
6019 case OP_oSHar:
6020 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6021 break;
6022
6023 case OP_oSHllar:
6024 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6025 break;
6026
6027 default:
6028 as_fatal (_("unhandled operand code %d"), upat[i]);
6029 }
6030
6031 /* Various value-based sanity checks and shared operations. We
6032 do not signal immediate failures for the register constraints;
6033 this allows a syntax error to take precedence. */
6034 switch (upat[i])
6035 {
6036 case OP_oRRnpc:
6037 case OP_RRnpc:
6038 case OP_RRnpcb:
6039 case OP_RRw:
6040 case OP_oRRw:
6041 case OP_RRnpc_I0:
6042 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6043 inst.error = BAD_PC;
6044 break;
6045
6046 case OP_CPSF:
6047 case OP_ENDI:
6048 case OP_oROR:
6049 case OP_PSR:
6050 case OP_RVC_PSR:
6051 case OP_COND:
6052 case OP_oBARRIER:
6053 case OP_REGLST:
6054 case OP_VRSLST:
6055 case OP_VRDLST:
6056 case OP_VRSDLST:
6057 case OP_NRDLST:
6058 case OP_NSTRLST:
6059 if (val == FAIL)
6060 goto failure;
6061 inst.operands[i].imm = val;
6062 break;
6063
6064 default:
6065 break;
6066 }
6067
6068 /* If we get here, this operand was successfully parsed. */
6069 inst.operands[i].present = 1;
6070 continue;
6071
6072 bad_args:
6073 inst.error = BAD_ARGS;
6074
6075 failure:
6076 if (!backtrack_pos)
6077 {
6078 /* The parse routine should already have set inst.error, but set a
6079 default here just in case. */
6080 if (!inst.error)
6081 inst.error = _("syntax error");
6082 return FAIL;
6083 }
6084
6085 /* Do not backtrack over a trailing optional argument that
6086 absorbed some text. We will only fail again, with the
6087 'garbage following instruction' error message, which is
6088 probably less helpful than the current one. */
6089 if (backtrack_index == i && backtrack_pos != str
6090 && upat[i+1] == OP_stop)
6091 {
6092 if (!inst.error)
6093 inst.error = _("syntax error");
6094 return FAIL;
6095 }
6096
6097 /* Try again, skipping the optional argument at backtrack_pos. */
6098 str = backtrack_pos;
6099 inst.error = backtrack_error;
6100 inst.operands[backtrack_index].present = 0;
6101 i = backtrack_index;
6102 backtrack_pos = 0;
6103 }
6104
6105 /* Check that we have parsed all the arguments. */
6106 if (*str != '\0' && !inst.error)
6107 inst.error = _("garbage following instruction");
6108
6109 return inst.error ? FAIL : SUCCESS;
6110 }
6111
6112 #undef po_char_or_fail
6113 #undef po_reg_or_fail
6114 #undef po_reg_or_goto
6115 #undef po_imm_or_fail
6116 #undef po_scalar_or_fail
6117 \f
6118 /* Shorthand macro for instruction encoding functions issuing errors. */
6119 #define constraint(expr, err) do { \
6120 if (expr) \
6121 { \
6122 inst.error = err; \
6123 return; \
6124 } \
6125 } while (0)
6126
6127 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6128 instructions are unpredictable if these registers are used. This
6129 is the BadReg predicate in ARM's Thumb-2 documentation. */
6130 #define reject_bad_reg(reg) \
6131 do \
6132 if (reg == REG_SP || reg == REG_PC) \
6133 { \
6134 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6135 return; \
6136 } \
6137 while (0)
6138
6139 /* Functions for operand encoding. ARM, then Thumb. */
6140
6141 #define rotate_left(v, n) (v << n | v >> (32 - n))
6142
6143 /* If VAL can be encoded in the immediate field of an ARM instruction,
6144 return the encoded form. Otherwise, return FAIL. */
6145
6146 static unsigned int
6147 encode_arm_immediate (unsigned int val)
6148 {
6149 unsigned int a, i;
6150
6151 for (i = 0; i < 32; i += 2)
6152 if ((a = rotate_left (val, i)) <= 0xff)
6153 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6154
6155 return FAIL;
6156 }
6157
6158 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6159 return the encoded form. Otherwise, return FAIL. */
6160 static unsigned int
6161 encode_thumb32_immediate (unsigned int val)
6162 {
6163 unsigned int a, i;
6164
6165 if (val <= 0xff)
6166 return val;
6167
6168 for (i = 1; i <= 24; i++)
6169 {
6170 a = val >> i;
6171 if ((val & ~(0xff << i)) == 0)
6172 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6173 }
6174
6175 a = val & 0xff;
6176 if (val == ((a << 16) | a))
6177 return 0x100 | a;
6178 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6179 return 0x300 | a;
6180
6181 a = val & 0xff00;
6182 if (val == ((a << 16) | a))
6183 return 0x200 | (a >> 8);
6184
6185 return FAIL;
6186 }
6187 /* Encode a VFP SP or DP register number into inst.instruction. */
6188
6189 static void
6190 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6191 {
6192 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6193 && reg > 15)
6194 {
6195 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6196 {
6197 if (thumb_mode)
6198 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6199 fpu_vfp_ext_d32);
6200 else
6201 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6202 fpu_vfp_ext_d32);
6203 }
6204 else
6205 {
6206 first_error (_("D register out of range for selected VFP version"));
6207 return;
6208 }
6209 }
6210
6211 switch (pos)
6212 {
6213 case VFP_REG_Sd:
6214 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6215 break;
6216
6217 case VFP_REG_Sn:
6218 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6219 break;
6220
6221 case VFP_REG_Sm:
6222 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6223 break;
6224
6225 case VFP_REG_Dd:
6226 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6227 break;
6228
6229 case VFP_REG_Dn:
6230 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6231 break;
6232
6233 case VFP_REG_Dm:
6234 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6235 break;
6236
6237 default:
6238 abort ();
6239 }
6240 }
6241
6242 /* Encode a <shift> in an ARM-format instruction. The immediate,
6243 if any, is handled by md_apply_fix. */
6244 static void
6245 encode_arm_shift (int i)
6246 {
6247 if (inst.operands[i].shift_kind == SHIFT_RRX)
6248 inst.instruction |= SHIFT_ROR << 5;
6249 else
6250 {
6251 inst.instruction |= inst.operands[i].shift_kind << 5;
6252 if (inst.operands[i].immisreg)
6253 {
6254 inst.instruction |= SHIFT_BY_REG;
6255 inst.instruction |= inst.operands[i].imm << 8;
6256 }
6257 else
6258 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6259 }
6260 }
6261
6262 static void
6263 encode_arm_shifter_operand (int i)
6264 {
6265 if (inst.operands[i].isreg)
6266 {
6267 inst.instruction |= inst.operands[i].reg;
6268 encode_arm_shift (i);
6269 }
6270 else
6271 inst.instruction |= INST_IMMEDIATE;
6272 }
6273
6274 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6275 static void
6276 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6277 {
6278 assert (inst.operands[i].isreg);
6279 inst.instruction |= inst.operands[i].reg << 16;
6280
6281 if (inst.operands[i].preind)
6282 {
6283 if (is_t)
6284 {
6285 inst.error = _("instruction does not accept preindexed addressing");
6286 return;
6287 }
6288 inst.instruction |= PRE_INDEX;
6289 if (inst.operands[i].writeback)
6290 inst.instruction |= WRITE_BACK;
6291
6292 }
6293 else if (inst.operands[i].postind)
6294 {
6295 assert (inst.operands[i].writeback);
6296 if (is_t)
6297 inst.instruction |= WRITE_BACK;
6298 }
6299 else /* unindexed - only for coprocessor */
6300 {
6301 inst.error = _("instruction does not accept unindexed addressing");
6302 return;
6303 }
6304
6305 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6306 && (((inst.instruction & 0x000f0000) >> 16)
6307 == ((inst.instruction & 0x0000f000) >> 12)))
6308 as_warn ((inst.instruction & LOAD_BIT)
6309 ? _("destination register same as write-back base")
6310 : _("source register same as write-back base"));
6311 }
6312
6313 /* inst.operands[i] was set up by parse_address. Encode it into an
6314 ARM-format mode 2 load or store instruction. If is_t is true,
6315 reject forms that cannot be used with a T instruction (i.e. not
6316 post-indexed). */
6317 static void
6318 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6319 {
6320 encode_arm_addr_mode_common (i, is_t);
6321
6322 if (inst.operands[i].immisreg)
6323 {
6324 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6325 inst.instruction |= inst.operands[i].imm;
6326 if (!inst.operands[i].negative)
6327 inst.instruction |= INDEX_UP;
6328 if (inst.operands[i].shifted)
6329 {
6330 if (inst.operands[i].shift_kind == SHIFT_RRX)
6331 inst.instruction |= SHIFT_ROR << 5;
6332 else
6333 {
6334 inst.instruction |= inst.operands[i].shift_kind << 5;
6335 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6336 }
6337 }
6338 }
6339 else /* immediate offset in inst.reloc */
6340 {
6341 if (inst.reloc.type == BFD_RELOC_UNUSED)
6342 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6343 }
6344 }
6345
6346 /* inst.operands[i] was set up by parse_address. Encode it into an
6347 ARM-format mode 3 load or store instruction. Reject forms that
6348 cannot be used with such instructions. If is_t is true, reject
6349 forms that cannot be used with a T instruction (i.e. not
6350 post-indexed). */
6351 static void
6352 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6353 {
6354 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6355 {
6356 inst.error = _("instruction does not accept scaled register index");
6357 return;
6358 }
6359
6360 encode_arm_addr_mode_common (i, is_t);
6361
6362 if (inst.operands[i].immisreg)
6363 {
6364 inst.instruction |= inst.operands[i].imm;
6365 if (!inst.operands[i].negative)
6366 inst.instruction |= INDEX_UP;
6367 }
6368 else /* immediate offset in inst.reloc */
6369 {
6370 inst.instruction |= HWOFFSET_IMM;
6371 if (inst.reloc.type == BFD_RELOC_UNUSED)
6372 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6373 }
6374 }
6375
6376 /* inst.operands[i] was set up by parse_address. Encode it into an
6377 ARM-format instruction. Reject all forms which cannot be encoded
6378 into a coprocessor load/store instruction. If wb_ok is false,
6379 reject use of writeback; if unind_ok is false, reject use of
6380 unindexed addressing. If reloc_override is not 0, use it instead
6381 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6382 (in which case it is preserved). */
6383
6384 static int
6385 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6386 {
6387 inst.instruction |= inst.operands[i].reg << 16;
6388
6389 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6390
6391 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6392 {
6393 assert (!inst.operands[i].writeback);
6394 if (!unind_ok)
6395 {
6396 inst.error = _("instruction does not support unindexed addressing");
6397 return FAIL;
6398 }
6399 inst.instruction |= inst.operands[i].imm;
6400 inst.instruction |= INDEX_UP;
6401 return SUCCESS;
6402 }
6403
6404 if (inst.operands[i].preind)
6405 inst.instruction |= PRE_INDEX;
6406
6407 if (inst.operands[i].writeback)
6408 {
6409 if (inst.operands[i].reg == REG_PC)
6410 {
6411 inst.error = _("pc may not be used with write-back");
6412 return FAIL;
6413 }
6414 if (!wb_ok)
6415 {
6416 inst.error = _("instruction does not support writeback");
6417 return FAIL;
6418 }
6419 inst.instruction |= WRITE_BACK;
6420 }
6421
6422 if (reloc_override)
6423 inst.reloc.type = reloc_override;
6424 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6425 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6426 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6427 {
6428 if (thumb_mode)
6429 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6430 else
6431 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6432 }
6433
6434 return SUCCESS;
6435 }
6436
6437 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6438 Determine whether it can be performed with a move instruction; if
6439 it can, convert inst.instruction to that move instruction and
6440 return 1; if it can't, convert inst.instruction to a literal-pool
6441 load and return 0. If this is not a valid thing to do in the
6442 current context, set inst.error and return 1.
6443
6444 inst.operands[i] describes the destination register. */
6445
6446 static int
6447 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6448 {
6449 unsigned long tbit;
6450
6451 if (thumb_p)
6452 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6453 else
6454 tbit = LOAD_BIT;
6455
6456 if ((inst.instruction & tbit) == 0)
6457 {
6458 inst.error = _("invalid pseudo operation");
6459 return 1;
6460 }
6461 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6462 {
6463 inst.error = _("constant expression expected");
6464 return 1;
6465 }
6466 if (inst.reloc.exp.X_op == O_constant)
6467 {
6468 if (thumb_p)
6469 {
6470 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6471 {
6472 /* This can be done with a mov(1) instruction. */
6473 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6474 inst.instruction |= inst.reloc.exp.X_add_number;
6475 return 1;
6476 }
6477 }
6478 else
6479 {
6480 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6481 if (value != FAIL)
6482 {
6483 /* This can be done with a mov instruction. */
6484 inst.instruction &= LITERAL_MASK;
6485 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6486 inst.instruction |= value & 0xfff;
6487 return 1;
6488 }
6489
6490 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6491 if (value != FAIL)
6492 {
6493 /* This can be done with a mvn instruction. */
6494 inst.instruction &= LITERAL_MASK;
6495 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6496 inst.instruction |= value & 0xfff;
6497 return 1;
6498 }
6499 }
6500 }
6501
6502 if (add_to_lit_pool () == FAIL)
6503 {
6504 inst.error = _("literal pool insertion failed");
6505 return 1;
6506 }
6507 inst.operands[1].reg = REG_PC;
6508 inst.operands[1].isreg = 1;
6509 inst.operands[1].preind = 1;
6510 inst.reloc.pc_rel = 1;
6511 inst.reloc.type = (thumb_p
6512 ? BFD_RELOC_ARM_THUMB_OFFSET
6513 : (mode_3
6514 ? BFD_RELOC_ARM_HWLITERAL
6515 : BFD_RELOC_ARM_LITERAL));
6516 return 0;
6517 }
6518
6519 /* Functions for instruction encoding, sorted by sub-architecture.
6520 First some generics; their names are taken from the conventional
6521 bit positions for register arguments in ARM format instructions. */
6522
6523 static void
6524 do_noargs (void)
6525 {
6526 }
6527
6528 static void
6529 do_rd (void)
6530 {
6531 inst.instruction |= inst.operands[0].reg << 12;
6532 }
6533
6534 static void
6535 do_rd_rm (void)
6536 {
6537 inst.instruction |= inst.operands[0].reg << 12;
6538 inst.instruction |= inst.operands[1].reg;
6539 }
6540
6541 static void
6542 do_rd_rn (void)
6543 {
6544 inst.instruction |= inst.operands[0].reg << 12;
6545 inst.instruction |= inst.operands[1].reg << 16;
6546 }
6547
6548 static void
6549 do_rn_rd (void)
6550 {
6551 inst.instruction |= inst.operands[0].reg << 16;
6552 inst.instruction |= inst.operands[1].reg << 12;
6553 }
6554
6555 static void
6556 do_rd_rm_rn (void)
6557 {
6558 unsigned Rn = inst.operands[2].reg;
6559 /* Enforce restrictions on SWP instruction. */
6560 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6561 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6562 _("Rn must not overlap other operands"));
6563 inst.instruction |= inst.operands[0].reg << 12;
6564 inst.instruction |= inst.operands[1].reg;
6565 inst.instruction |= Rn << 16;
6566 }
6567
6568 static void
6569 do_rd_rn_rm (void)
6570 {
6571 inst.instruction |= inst.operands[0].reg << 12;
6572 inst.instruction |= inst.operands[1].reg << 16;
6573 inst.instruction |= inst.operands[2].reg;
6574 }
6575
6576 static void
6577 do_rm_rd_rn (void)
6578 {
6579 inst.instruction |= inst.operands[0].reg;
6580 inst.instruction |= inst.operands[1].reg << 12;
6581 inst.instruction |= inst.operands[2].reg << 16;
6582 }
6583
6584 static void
6585 do_imm0 (void)
6586 {
6587 inst.instruction |= inst.operands[0].imm;
6588 }
6589
6590 static void
6591 do_rd_cpaddr (void)
6592 {
6593 inst.instruction |= inst.operands[0].reg << 12;
6594 encode_arm_cp_address (1, TRUE, TRUE, 0);
6595 }
6596
6597 /* ARM instructions, in alphabetical order by function name (except
6598 that wrapper functions appear immediately after the function they
6599 wrap). */
6600
6601 /* This is a pseudo-op of the form "adr rd, label" to be converted
6602 into a relative address of the form "add rd, pc, #label-.-8". */
6603
6604 static void
6605 do_adr (void)
6606 {
6607 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6608
6609 /* Frag hacking will turn this into a sub instruction if the offset turns
6610 out to be negative. */
6611 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6612 inst.reloc.pc_rel = 1;
6613 inst.reloc.exp.X_add_number -= 8;
6614 }
6615
6616 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6617 into a relative address of the form:
6618 add rd, pc, #low(label-.-8)"
6619 add rd, rd, #high(label-.-8)" */
6620
6621 static void
6622 do_adrl (void)
6623 {
6624 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6625
6626 /* Frag hacking will turn this into a sub instruction if the offset turns
6627 out to be negative. */
6628 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6629 inst.reloc.pc_rel = 1;
6630 inst.size = INSN_SIZE * 2;
6631 inst.reloc.exp.X_add_number -= 8;
6632 }
6633
6634 static void
6635 do_arit (void)
6636 {
6637 if (!inst.operands[1].present)
6638 inst.operands[1].reg = inst.operands[0].reg;
6639 inst.instruction |= inst.operands[0].reg << 12;
6640 inst.instruction |= inst.operands[1].reg << 16;
6641 encode_arm_shifter_operand (2);
6642 }
6643
6644 static void
6645 do_barrier (void)
6646 {
6647 if (inst.operands[0].present)
6648 {
6649 constraint ((inst.instruction & 0xf0) != 0x40
6650 && inst.operands[0].imm != 0xf,
6651 _("bad barrier type"));
6652 inst.instruction |= inst.operands[0].imm;
6653 }
6654 else
6655 inst.instruction |= 0xf;
6656 }
6657
6658 static void
6659 do_bfc (void)
6660 {
6661 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6662 constraint (msb > 32, _("bit-field extends past end of register"));
6663 /* The instruction encoding stores the LSB and MSB,
6664 not the LSB and width. */
6665 inst.instruction |= inst.operands[0].reg << 12;
6666 inst.instruction |= inst.operands[1].imm << 7;
6667 inst.instruction |= (msb - 1) << 16;
6668 }
6669
6670 static void
6671 do_bfi (void)
6672 {
6673 unsigned int msb;
6674
6675 /* #0 in second position is alternative syntax for bfc, which is
6676 the same instruction but with REG_PC in the Rm field. */
6677 if (!inst.operands[1].isreg)
6678 inst.operands[1].reg = REG_PC;
6679
6680 msb = inst.operands[2].imm + inst.operands[3].imm;
6681 constraint (msb > 32, _("bit-field extends past end of register"));
6682 /* The instruction encoding stores the LSB and MSB,
6683 not the LSB and width. */
6684 inst.instruction |= inst.operands[0].reg << 12;
6685 inst.instruction |= inst.operands[1].reg;
6686 inst.instruction |= inst.operands[2].imm << 7;
6687 inst.instruction |= (msb - 1) << 16;
6688 }
6689
6690 static void
6691 do_bfx (void)
6692 {
6693 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6694 _("bit-field extends past end of register"));
6695 inst.instruction |= inst.operands[0].reg << 12;
6696 inst.instruction |= inst.operands[1].reg;
6697 inst.instruction |= inst.operands[2].imm << 7;
6698 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6699 }
6700
6701 /* ARM V5 breakpoint instruction (argument parse)
6702 BKPT <16 bit unsigned immediate>
6703 Instruction is not conditional.
6704 The bit pattern given in insns[] has the COND_ALWAYS condition,
6705 and it is an error if the caller tried to override that. */
6706
6707 static void
6708 do_bkpt (void)
6709 {
6710 /* Top 12 of 16 bits to bits 19:8. */
6711 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6712
6713 /* Bottom 4 of 16 bits to bits 3:0. */
6714 inst.instruction |= inst.operands[0].imm & 0xf;
6715 }
6716
6717 static void
6718 encode_branch (int default_reloc)
6719 {
6720 if (inst.operands[0].hasreloc)
6721 {
6722 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6723 _("the only suffix valid here is '(plt)'"));
6724 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6725 }
6726 else
6727 {
6728 inst.reloc.type = default_reloc;
6729 }
6730 inst.reloc.pc_rel = 1;
6731 }
6732
6733 static void
6734 do_branch (void)
6735 {
6736 #ifdef OBJ_ELF
6737 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6738 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6739 else
6740 #endif
6741 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6742 }
6743
6744 static void
6745 do_bl (void)
6746 {
6747 #ifdef OBJ_ELF
6748 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6749 {
6750 if (inst.cond == COND_ALWAYS)
6751 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6752 else
6753 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6754 }
6755 else
6756 #endif
6757 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6758 }
6759
6760 /* ARM V5 branch-link-exchange instruction (argument parse)
6761 BLX <target_addr> ie BLX(1)
6762 BLX{<condition>} <Rm> ie BLX(2)
6763 Unfortunately, there are two different opcodes for this mnemonic.
6764 So, the insns[].value is not used, and the code here zaps values
6765 into inst.instruction.
6766 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6767
6768 static void
6769 do_blx (void)
6770 {
6771 if (inst.operands[0].isreg)
6772 {
6773 /* Arg is a register; the opcode provided by insns[] is correct.
6774 It is not illegal to do "blx pc", just useless. */
6775 if (inst.operands[0].reg == REG_PC)
6776 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6777
6778 inst.instruction |= inst.operands[0].reg;
6779 }
6780 else
6781 {
6782 /* Arg is an address; this instruction cannot be executed
6783 conditionally, and the opcode must be adjusted. */
6784 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6785 inst.instruction = 0xfa000000;
6786 #ifdef OBJ_ELF
6787 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6788 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6789 else
6790 #endif
6791 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6792 }
6793 }
6794
6795 static void
6796 do_bx (void)
6797 {
6798 bfd_boolean want_reloc;
6799
6800 if (inst.operands[0].reg == REG_PC)
6801 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6802
6803 inst.instruction |= inst.operands[0].reg;
6804 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
6805 it is for ARMv4t or earlier. */
6806 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
6807 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
6808 want_reloc = TRUE;
6809
6810 #ifdef OBJ_ELF
6811 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
6812 #endif
6813 want_reloc = FALSE;
6814
6815 if (want_reloc)
6816 inst.reloc.type = BFD_RELOC_ARM_V4BX;
6817 }
6818
6819
6820 /* ARM v5TEJ. Jump to Jazelle code. */
6821
6822 static void
6823 do_bxj (void)
6824 {
6825 if (inst.operands[0].reg == REG_PC)
6826 as_tsktsk (_("use of r15 in bxj is not really useful"));
6827
6828 inst.instruction |= inst.operands[0].reg;
6829 }
6830
6831 /* Co-processor data operation:
6832 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6833 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6834 static void
6835 do_cdp (void)
6836 {
6837 inst.instruction |= inst.operands[0].reg << 8;
6838 inst.instruction |= inst.operands[1].imm << 20;
6839 inst.instruction |= inst.operands[2].reg << 12;
6840 inst.instruction |= inst.operands[3].reg << 16;
6841 inst.instruction |= inst.operands[4].reg;
6842 inst.instruction |= inst.operands[5].imm << 5;
6843 }
6844
6845 static void
6846 do_cmp (void)
6847 {
6848 inst.instruction |= inst.operands[0].reg << 16;
6849 encode_arm_shifter_operand (1);
6850 }
6851
6852 /* Transfer between coprocessor and ARM registers.
6853 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6854 MRC2
6855 MCR{cond}
6856 MCR2
6857
6858 No special properties. */
6859
6860 static void
6861 do_co_reg (void)
6862 {
6863 unsigned Rd;
6864
6865 Rd = inst.operands[2].reg;
6866 if (thumb_mode)
6867 {
6868 if (inst.instruction == 0xee000010
6869 || inst.instruction == 0xfe000010)
6870 /* MCR, MCR2 */
6871 reject_bad_reg (Rd);
6872 else
6873 /* MRC, MRC2 */
6874 constraint (Rd == REG_SP, BAD_SP);
6875 }
6876 else
6877 {
6878 /* MCR */
6879 if (inst.instruction == 0xe000010)
6880 constraint (Rd == REG_PC, BAD_PC);
6881 }
6882
6883
6884 inst.instruction |= inst.operands[0].reg << 8;
6885 inst.instruction |= inst.operands[1].imm << 21;
6886 inst.instruction |= Rd << 12;
6887 inst.instruction |= inst.operands[3].reg << 16;
6888 inst.instruction |= inst.operands[4].reg;
6889 inst.instruction |= inst.operands[5].imm << 5;
6890 }
6891
6892 /* Transfer between coprocessor register and pair of ARM registers.
6893 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6894 MCRR2
6895 MRRC{cond}
6896 MRRC2
6897
6898 Two XScale instructions are special cases of these:
6899
6900 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6901 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6902
6903 Result unpredictable if Rd or Rn is R15. */
6904
6905 static void
6906 do_co_reg2c (void)
6907 {
6908 unsigned Rd, Rn;
6909
6910 Rd = inst.operands[2].reg;
6911 Rn = inst.operands[3].reg;
6912
6913 if (thumb_mode)
6914 {
6915 reject_bad_reg (Rd);
6916 reject_bad_reg (Rn);
6917 }
6918 else
6919 {
6920 constraint (Rd == REG_PC, BAD_PC);
6921 constraint (Rn == REG_PC, BAD_PC);
6922 }
6923
6924 inst.instruction |= inst.operands[0].reg << 8;
6925 inst.instruction |= inst.operands[1].imm << 4;
6926 inst.instruction |= Rd << 12;
6927 inst.instruction |= Rn << 16;
6928 inst.instruction |= inst.operands[4].reg;
6929 }
6930
6931 static void
6932 do_cpsi (void)
6933 {
6934 inst.instruction |= inst.operands[0].imm << 6;
6935 if (inst.operands[1].present)
6936 {
6937 inst.instruction |= CPSI_MMOD;
6938 inst.instruction |= inst.operands[1].imm;
6939 }
6940 }
6941
6942 static void
6943 do_dbg (void)
6944 {
6945 inst.instruction |= inst.operands[0].imm;
6946 }
6947
6948 static void
6949 do_it (void)
6950 {
6951 /* There is no IT instruction in ARM mode. We
6952 process it but do not generate code for it. */
6953 inst.size = 0;
6954 }
6955
6956 static void
6957 do_ldmstm (void)
6958 {
6959 int base_reg = inst.operands[0].reg;
6960 int range = inst.operands[1].imm;
6961
6962 inst.instruction |= base_reg << 16;
6963 inst.instruction |= range;
6964
6965 if (inst.operands[1].writeback)
6966 inst.instruction |= LDM_TYPE_2_OR_3;
6967
6968 if (inst.operands[0].writeback)
6969 {
6970 inst.instruction |= WRITE_BACK;
6971 /* Check for unpredictable uses of writeback. */
6972 if (inst.instruction & LOAD_BIT)
6973 {
6974 /* Not allowed in LDM type 2. */
6975 if ((inst.instruction & LDM_TYPE_2_OR_3)
6976 && ((range & (1 << REG_PC)) == 0))
6977 as_warn (_("writeback of base register is UNPREDICTABLE"));
6978 /* Only allowed if base reg not in list for other types. */
6979 else if (range & (1 << base_reg))
6980 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6981 }
6982 else /* STM. */
6983 {
6984 /* Not allowed for type 2. */
6985 if (inst.instruction & LDM_TYPE_2_OR_3)
6986 as_warn (_("writeback of base register is UNPREDICTABLE"));
6987 /* Only allowed if base reg not in list, or first in list. */
6988 else if ((range & (1 << base_reg))
6989 && (range & ((1 << base_reg) - 1)))
6990 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6991 }
6992 }
6993 }
6994
6995 /* ARMv5TE load-consecutive (argument parse)
6996 Mode is like LDRH.
6997
6998 LDRccD R, mode
6999 STRccD R, mode. */
7000
7001 static void
7002 do_ldrd (void)
7003 {
7004 constraint (inst.operands[0].reg % 2 != 0,
7005 _("first destination register must be even"));
7006 constraint (inst.operands[1].present
7007 && inst.operands[1].reg != inst.operands[0].reg + 1,
7008 _("can only load two consecutive registers"));
7009 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7010 constraint (!inst.operands[2].isreg, _("'[' expected"));
7011
7012 if (!inst.operands[1].present)
7013 inst.operands[1].reg = inst.operands[0].reg + 1;
7014
7015 if (inst.instruction & LOAD_BIT)
7016 {
7017 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7018 register and the first register written; we have to diagnose
7019 overlap between the base and the second register written here. */
7020
7021 if (inst.operands[2].reg == inst.operands[1].reg
7022 && (inst.operands[2].writeback || inst.operands[2].postind))
7023 as_warn (_("base register written back, and overlaps "
7024 "second destination register"));
7025
7026 /* For an index-register load, the index register must not overlap the
7027 destination (even if not write-back). */
7028 else if (inst.operands[2].immisreg
7029 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7030 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7031 as_warn (_("index register overlaps destination register"));
7032 }
7033
7034 inst.instruction |= inst.operands[0].reg << 12;
7035 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7036 }
7037
7038 static void
7039 do_ldrex (void)
7040 {
7041 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7042 || inst.operands[1].postind || inst.operands[1].writeback
7043 || inst.operands[1].immisreg || inst.operands[1].shifted
7044 || inst.operands[1].negative
7045 /* This can arise if the programmer has written
7046 strex rN, rM, foo
7047 or if they have mistakenly used a register name as the last
7048 operand, eg:
7049 strex rN, rM, rX
7050 It is very difficult to distinguish between these two cases
7051 because "rX" might actually be a label. ie the register
7052 name has been occluded by a symbol of the same name. So we
7053 just generate a general 'bad addressing mode' type error
7054 message and leave it up to the programmer to discover the
7055 true cause and fix their mistake. */
7056 || (inst.operands[1].reg == REG_PC),
7057 BAD_ADDR_MODE);
7058
7059 constraint (inst.reloc.exp.X_op != O_constant
7060 || inst.reloc.exp.X_add_number != 0,
7061 _("offset must be zero in ARM encoding"));
7062
7063 inst.instruction |= inst.operands[0].reg << 12;
7064 inst.instruction |= inst.operands[1].reg << 16;
7065 inst.reloc.type = BFD_RELOC_UNUSED;
7066 }
7067
7068 static void
7069 do_ldrexd (void)
7070 {
7071 constraint (inst.operands[0].reg % 2 != 0,
7072 _("even register required"));
7073 constraint (inst.operands[1].present
7074 && inst.operands[1].reg != inst.operands[0].reg + 1,
7075 _("can only load two consecutive registers"));
7076 /* If op 1 were present and equal to PC, this function wouldn't
7077 have been called in the first place. */
7078 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7079
7080 inst.instruction |= inst.operands[0].reg << 12;
7081 inst.instruction |= inst.operands[2].reg << 16;
7082 }
7083
7084 static void
7085 do_ldst (void)
7086 {
7087 inst.instruction |= inst.operands[0].reg << 12;
7088 if (!inst.operands[1].isreg)
7089 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7090 return;
7091 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7092 }
7093
7094 static void
7095 do_ldstt (void)
7096 {
7097 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7098 reject [Rn,...]. */
7099 if (inst.operands[1].preind)
7100 {
7101 constraint (inst.reloc.exp.X_op != O_constant
7102 || inst.reloc.exp.X_add_number != 0,
7103 _("this instruction requires a post-indexed address"));
7104
7105 inst.operands[1].preind = 0;
7106 inst.operands[1].postind = 1;
7107 inst.operands[1].writeback = 1;
7108 }
7109 inst.instruction |= inst.operands[0].reg << 12;
7110 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7111 }
7112
7113 /* Halfword and signed-byte load/store operations. */
7114
7115 static void
7116 do_ldstv4 (void)
7117 {
7118 inst.instruction |= inst.operands[0].reg << 12;
7119 if (!inst.operands[1].isreg)
7120 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7121 return;
7122 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7123 }
7124
7125 static void
7126 do_ldsttv4 (void)
7127 {
7128 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7129 reject [Rn,...]. */
7130 if (inst.operands[1].preind)
7131 {
7132 constraint (inst.reloc.exp.X_op != O_constant
7133 || inst.reloc.exp.X_add_number != 0,
7134 _("this instruction requires a post-indexed address"));
7135
7136 inst.operands[1].preind = 0;
7137 inst.operands[1].postind = 1;
7138 inst.operands[1].writeback = 1;
7139 }
7140 inst.instruction |= inst.operands[0].reg << 12;
7141 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7142 }
7143
7144 /* Co-processor register load/store.
7145 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7146 static void
7147 do_lstc (void)
7148 {
7149 inst.instruction |= inst.operands[0].reg << 8;
7150 inst.instruction |= inst.operands[1].reg << 12;
7151 encode_arm_cp_address (2, TRUE, TRUE, 0);
7152 }
7153
7154 static void
7155 do_mlas (void)
7156 {
7157 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7158 if (inst.operands[0].reg == inst.operands[1].reg
7159 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7160 && !(inst.instruction & 0x00400000))
7161 as_tsktsk (_("Rd and Rm should be different in mla"));
7162
7163 inst.instruction |= inst.operands[0].reg << 16;
7164 inst.instruction |= inst.operands[1].reg;
7165 inst.instruction |= inst.operands[2].reg << 8;
7166 inst.instruction |= inst.operands[3].reg << 12;
7167 }
7168
7169 static void
7170 do_mov (void)
7171 {
7172 inst.instruction |= inst.operands[0].reg << 12;
7173 encode_arm_shifter_operand (1);
7174 }
7175
7176 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7177 static void
7178 do_mov16 (void)
7179 {
7180 bfd_vma imm;
7181 bfd_boolean top;
7182
7183 top = (inst.instruction & 0x00400000) != 0;
7184 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7185 _(":lower16: not allowed this instruction"));
7186 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7187 _(":upper16: not allowed instruction"));
7188 inst.instruction |= inst.operands[0].reg << 12;
7189 if (inst.reloc.type == BFD_RELOC_UNUSED)
7190 {
7191 imm = inst.reloc.exp.X_add_number;
7192 /* The value is in two pieces: 0:11, 16:19. */
7193 inst.instruction |= (imm & 0x00000fff);
7194 inst.instruction |= (imm & 0x0000f000) << 4;
7195 }
7196 }
7197
7198 static void do_vfp_nsyn_opcode (const char *);
7199
7200 static int
7201 do_vfp_nsyn_mrs (void)
7202 {
7203 if (inst.operands[0].isvec)
7204 {
7205 if (inst.operands[1].reg != 1)
7206 first_error (_("operand 1 must be FPSCR"));
7207 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7208 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7209 do_vfp_nsyn_opcode ("fmstat");
7210 }
7211 else if (inst.operands[1].isvec)
7212 do_vfp_nsyn_opcode ("fmrx");
7213 else
7214 return FAIL;
7215
7216 return SUCCESS;
7217 }
7218
7219 static int
7220 do_vfp_nsyn_msr (void)
7221 {
7222 if (inst.operands[0].isvec)
7223 do_vfp_nsyn_opcode ("fmxr");
7224 else
7225 return FAIL;
7226
7227 return SUCCESS;
7228 }
7229
7230 static void
7231 do_mrs (void)
7232 {
7233 if (do_vfp_nsyn_mrs () == SUCCESS)
7234 return;
7235
7236 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7237 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7238 != (PSR_c|PSR_f),
7239 _("'CPSR' or 'SPSR' expected"));
7240 inst.instruction |= inst.operands[0].reg << 12;
7241 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7242 }
7243
7244 /* Two possible forms:
7245 "{C|S}PSR_<field>, Rm",
7246 "{C|S}PSR_f, #expression". */
7247
7248 static void
7249 do_msr (void)
7250 {
7251 if (do_vfp_nsyn_msr () == SUCCESS)
7252 return;
7253
7254 inst.instruction |= inst.operands[0].imm;
7255 if (inst.operands[1].isreg)
7256 inst.instruction |= inst.operands[1].reg;
7257 else
7258 {
7259 inst.instruction |= INST_IMMEDIATE;
7260 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7261 inst.reloc.pc_rel = 0;
7262 }
7263 }
7264
7265 static void
7266 do_mul (void)
7267 {
7268 if (!inst.operands[2].present)
7269 inst.operands[2].reg = inst.operands[0].reg;
7270 inst.instruction |= inst.operands[0].reg << 16;
7271 inst.instruction |= inst.operands[1].reg;
7272 inst.instruction |= inst.operands[2].reg << 8;
7273
7274 if (inst.operands[0].reg == inst.operands[1].reg
7275 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7276 as_tsktsk (_("Rd and Rm should be different in mul"));
7277 }
7278
7279 /* Long Multiply Parser
7280 UMULL RdLo, RdHi, Rm, Rs
7281 SMULL RdLo, RdHi, Rm, Rs
7282 UMLAL RdLo, RdHi, Rm, Rs
7283 SMLAL RdLo, RdHi, Rm, Rs. */
7284
7285 static void
7286 do_mull (void)
7287 {
7288 inst.instruction |= inst.operands[0].reg << 12;
7289 inst.instruction |= inst.operands[1].reg << 16;
7290 inst.instruction |= inst.operands[2].reg;
7291 inst.instruction |= inst.operands[3].reg << 8;
7292
7293 /* rdhi and rdlo must be different. */
7294 if (inst.operands[0].reg == inst.operands[1].reg)
7295 as_tsktsk (_("rdhi and rdlo must be different"));
7296
7297 /* rdhi, rdlo and rm must all be different before armv6. */
7298 if ((inst.operands[0].reg == inst.operands[2].reg
7299 || inst.operands[1].reg == inst.operands[2].reg)
7300 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7301 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7302 }
7303
7304 static void
7305 do_nop (void)
7306 {
7307 if (inst.operands[0].present)
7308 {
7309 /* Architectural NOP hints are CPSR sets with no bits selected. */
7310 inst.instruction &= 0xf0000000;
7311 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7312 }
7313 }
7314
7315 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7316 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7317 Condition defaults to COND_ALWAYS.
7318 Error if Rd, Rn or Rm are R15. */
7319
7320 static void
7321 do_pkhbt (void)
7322 {
7323 inst.instruction |= inst.operands[0].reg << 12;
7324 inst.instruction |= inst.operands[1].reg << 16;
7325 inst.instruction |= inst.operands[2].reg;
7326 if (inst.operands[3].present)
7327 encode_arm_shift (3);
7328 }
7329
7330 /* ARM V6 PKHTB (Argument Parse). */
7331
7332 static void
7333 do_pkhtb (void)
7334 {
7335 if (!inst.operands[3].present)
7336 {
7337 /* If the shift specifier is omitted, turn the instruction
7338 into pkhbt rd, rm, rn. */
7339 inst.instruction &= 0xfff00010;
7340 inst.instruction |= inst.operands[0].reg << 12;
7341 inst.instruction |= inst.operands[1].reg;
7342 inst.instruction |= inst.operands[2].reg << 16;
7343 }
7344 else
7345 {
7346 inst.instruction |= inst.operands[0].reg << 12;
7347 inst.instruction |= inst.operands[1].reg << 16;
7348 inst.instruction |= inst.operands[2].reg;
7349 encode_arm_shift (3);
7350 }
7351 }
7352
7353 /* ARMv5TE: Preload-Cache
7354
7355 PLD <addr_mode>
7356
7357 Syntactically, like LDR with B=1, W=0, L=1. */
7358
7359 static void
7360 do_pld (void)
7361 {
7362 constraint (!inst.operands[0].isreg,
7363 _("'[' expected after PLD mnemonic"));
7364 constraint (inst.operands[0].postind,
7365 _("post-indexed expression used in preload instruction"));
7366 constraint (inst.operands[0].writeback,
7367 _("writeback used in preload instruction"));
7368 constraint (!inst.operands[0].preind,
7369 _("unindexed addressing used in preload instruction"));
7370 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7371 }
7372
7373 /* ARMv7: PLI <addr_mode> */
7374 static void
7375 do_pli (void)
7376 {
7377 constraint (!inst.operands[0].isreg,
7378 _("'[' expected after PLI mnemonic"));
7379 constraint (inst.operands[0].postind,
7380 _("post-indexed expression used in preload instruction"));
7381 constraint (inst.operands[0].writeback,
7382 _("writeback used in preload instruction"));
7383 constraint (!inst.operands[0].preind,
7384 _("unindexed addressing used in preload instruction"));
7385 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7386 inst.instruction &= ~PRE_INDEX;
7387 }
7388
7389 static void
7390 do_push_pop (void)
7391 {
7392 inst.operands[1] = inst.operands[0];
7393 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7394 inst.operands[0].isreg = 1;
7395 inst.operands[0].writeback = 1;
7396 inst.operands[0].reg = REG_SP;
7397 do_ldmstm ();
7398 }
7399
7400 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7401 word at the specified address and the following word
7402 respectively.
7403 Unconditionally executed.
7404 Error if Rn is R15. */
7405
7406 static void
7407 do_rfe (void)
7408 {
7409 inst.instruction |= inst.operands[0].reg << 16;
7410 if (inst.operands[0].writeback)
7411 inst.instruction |= WRITE_BACK;
7412 }
7413
7414 /* ARM V6 ssat (argument parse). */
7415
7416 static void
7417 do_ssat (void)
7418 {
7419 inst.instruction |= inst.operands[0].reg << 12;
7420 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7421 inst.instruction |= inst.operands[2].reg;
7422
7423 if (inst.operands[3].present)
7424 encode_arm_shift (3);
7425 }
7426
7427 /* ARM V6 usat (argument parse). */
7428
7429 static void
7430 do_usat (void)
7431 {
7432 inst.instruction |= inst.operands[0].reg << 12;
7433 inst.instruction |= inst.operands[1].imm << 16;
7434 inst.instruction |= inst.operands[2].reg;
7435
7436 if (inst.operands[3].present)
7437 encode_arm_shift (3);
7438 }
7439
7440 /* ARM V6 ssat16 (argument parse). */
7441
7442 static void
7443 do_ssat16 (void)
7444 {
7445 inst.instruction |= inst.operands[0].reg << 12;
7446 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7447 inst.instruction |= inst.operands[2].reg;
7448 }
7449
7450 static void
7451 do_usat16 (void)
7452 {
7453 inst.instruction |= inst.operands[0].reg << 12;
7454 inst.instruction |= inst.operands[1].imm << 16;
7455 inst.instruction |= inst.operands[2].reg;
7456 }
7457
7458 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7459 preserving the other bits.
7460
7461 setend <endian_specifier>, where <endian_specifier> is either
7462 BE or LE. */
7463
7464 static void
7465 do_setend (void)
7466 {
7467 if (inst.operands[0].imm)
7468 inst.instruction |= 0x200;
7469 }
7470
7471 static void
7472 do_shift (void)
7473 {
7474 unsigned int Rm = (inst.operands[1].present
7475 ? inst.operands[1].reg
7476 : inst.operands[0].reg);
7477
7478 inst.instruction |= inst.operands[0].reg << 12;
7479 inst.instruction |= Rm;
7480 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7481 {
7482 inst.instruction |= inst.operands[2].reg << 8;
7483 inst.instruction |= SHIFT_BY_REG;
7484 }
7485 else
7486 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7487 }
7488
7489 static void
7490 do_smc (void)
7491 {
7492 inst.reloc.type = BFD_RELOC_ARM_SMC;
7493 inst.reloc.pc_rel = 0;
7494 }
7495
7496 static void
7497 do_swi (void)
7498 {
7499 inst.reloc.type = BFD_RELOC_ARM_SWI;
7500 inst.reloc.pc_rel = 0;
7501 }
7502
7503 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7504 SMLAxy{cond} Rd,Rm,Rs,Rn
7505 SMLAWy{cond} Rd,Rm,Rs,Rn
7506 Error if any register is R15. */
7507
7508 static void
7509 do_smla (void)
7510 {
7511 inst.instruction |= inst.operands[0].reg << 16;
7512 inst.instruction |= inst.operands[1].reg;
7513 inst.instruction |= inst.operands[2].reg << 8;
7514 inst.instruction |= inst.operands[3].reg << 12;
7515 }
7516
7517 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7518 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7519 Error if any register is R15.
7520 Warning if Rdlo == Rdhi. */
7521
7522 static void
7523 do_smlal (void)
7524 {
7525 inst.instruction |= inst.operands[0].reg << 12;
7526 inst.instruction |= inst.operands[1].reg << 16;
7527 inst.instruction |= inst.operands[2].reg;
7528 inst.instruction |= inst.operands[3].reg << 8;
7529
7530 if (inst.operands[0].reg == inst.operands[1].reg)
7531 as_tsktsk (_("rdhi and rdlo must be different"));
7532 }
7533
7534 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7535 SMULxy{cond} Rd,Rm,Rs
7536 Error if any register is R15. */
7537
7538 static void
7539 do_smul (void)
7540 {
7541 inst.instruction |= inst.operands[0].reg << 16;
7542 inst.instruction |= inst.operands[1].reg;
7543 inst.instruction |= inst.operands[2].reg << 8;
7544 }
7545
7546 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7547 the same for both ARM and Thumb-2. */
7548
7549 static void
7550 do_srs (void)
7551 {
7552 int reg;
7553
7554 if (inst.operands[0].present)
7555 {
7556 reg = inst.operands[0].reg;
7557 constraint (reg != REG_SP, _("SRS base register must be r13"));
7558 }
7559 else
7560 reg = REG_SP;
7561
7562 inst.instruction |= reg << 16;
7563 inst.instruction |= inst.operands[1].imm;
7564 if (inst.operands[0].writeback || inst.operands[1].writeback)
7565 inst.instruction |= WRITE_BACK;
7566 }
7567
7568 /* ARM V6 strex (argument parse). */
7569
7570 static void
7571 do_strex (void)
7572 {
7573 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7574 || inst.operands[2].postind || inst.operands[2].writeback
7575 || inst.operands[2].immisreg || inst.operands[2].shifted
7576 || inst.operands[2].negative
7577 /* See comment in do_ldrex(). */
7578 || (inst.operands[2].reg == REG_PC),
7579 BAD_ADDR_MODE);
7580
7581 constraint (inst.operands[0].reg == inst.operands[1].reg
7582 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7583
7584 constraint (inst.reloc.exp.X_op != O_constant
7585 || inst.reloc.exp.X_add_number != 0,
7586 _("offset must be zero in ARM encoding"));
7587
7588 inst.instruction |= inst.operands[0].reg << 12;
7589 inst.instruction |= inst.operands[1].reg;
7590 inst.instruction |= inst.operands[2].reg << 16;
7591 inst.reloc.type = BFD_RELOC_UNUSED;
7592 }
7593
7594 static void
7595 do_strexd (void)
7596 {
7597 constraint (inst.operands[1].reg % 2 != 0,
7598 _("even register required"));
7599 constraint (inst.operands[2].present
7600 && inst.operands[2].reg != inst.operands[1].reg + 1,
7601 _("can only store two consecutive registers"));
7602 /* If op 2 were present and equal to PC, this function wouldn't
7603 have been called in the first place. */
7604 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7605
7606 constraint (inst.operands[0].reg == inst.operands[1].reg
7607 || inst.operands[0].reg == inst.operands[1].reg + 1
7608 || inst.operands[0].reg == inst.operands[3].reg,
7609 BAD_OVERLAP);
7610
7611 inst.instruction |= inst.operands[0].reg << 12;
7612 inst.instruction |= inst.operands[1].reg;
7613 inst.instruction |= inst.operands[3].reg << 16;
7614 }
7615
7616 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7617 extends it to 32-bits, and adds the result to a value in another
7618 register. You can specify a rotation by 0, 8, 16, or 24 bits
7619 before extracting the 16-bit value.
7620 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7621 Condition defaults to COND_ALWAYS.
7622 Error if any register uses R15. */
7623
7624 static void
7625 do_sxtah (void)
7626 {
7627 inst.instruction |= inst.operands[0].reg << 12;
7628 inst.instruction |= inst.operands[1].reg << 16;
7629 inst.instruction |= inst.operands[2].reg;
7630 inst.instruction |= inst.operands[3].imm << 10;
7631 }
7632
7633 /* ARM V6 SXTH.
7634
7635 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7636 Condition defaults to COND_ALWAYS.
7637 Error if any register uses R15. */
7638
7639 static void
7640 do_sxth (void)
7641 {
7642 inst.instruction |= inst.operands[0].reg << 12;
7643 inst.instruction |= inst.operands[1].reg;
7644 inst.instruction |= inst.operands[2].imm << 10;
7645 }
7646 \f
7647 /* VFP instructions. In a logical order: SP variant first, monad
7648 before dyad, arithmetic then move then load/store. */
7649
7650 static void
7651 do_vfp_sp_monadic (void)
7652 {
7653 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7654 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7655 }
7656
7657 static void
7658 do_vfp_sp_dyadic (void)
7659 {
7660 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7661 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7662 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7663 }
7664
7665 static void
7666 do_vfp_sp_compare_z (void)
7667 {
7668 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7669 }
7670
7671 static void
7672 do_vfp_dp_sp_cvt (void)
7673 {
7674 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7675 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7676 }
7677
7678 static void
7679 do_vfp_sp_dp_cvt (void)
7680 {
7681 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7682 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7683 }
7684
7685 static void
7686 do_vfp_reg_from_sp (void)
7687 {
7688 inst.instruction |= inst.operands[0].reg << 12;
7689 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7690 }
7691
7692 static void
7693 do_vfp_reg2_from_sp2 (void)
7694 {
7695 constraint (inst.operands[2].imm != 2,
7696 _("only two consecutive VFP SP registers allowed here"));
7697 inst.instruction |= inst.operands[0].reg << 12;
7698 inst.instruction |= inst.operands[1].reg << 16;
7699 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7700 }
7701
7702 static void
7703 do_vfp_sp_from_reg (void)
7704 {
7705 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7706 inst.instruction |= inst.operands[1].reg << 12;
7707 }
7708
7709 static void
7710 do_vfp_sp2_from_reg2 (void)
7711 {
7712 constraint (inst.operands[0].imm != 2,
7713 _("only two consecutive VFP SP registers allowed here"));
7714 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7715 inst.instruction |= inst.operands[1].reg << 12;
7716 inst.instruction |= inst.operands[2].reg << 16;
7717 }
7718
7719 static void
7720 do_vfp_sp_ldst (void)
7721 {
7722 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7723 encode_arm_cp_address (1, FALSE, TRUE, 0);
7724 }
7725
7726 static void
7727 do_vfp_dp_ldst (void)
7728 {
7729 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7730 encode_arm_cp_address (1, FALSE, TRUE, 0);
7731 }
7732
7733
7734 static void
7735 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7736 {
7737 if (inst.operands[0].writeback)
7738 inst.instruction |= WRITE_BACK;
7739 else
7740 constraint (ldstm_type != VFP_LDSTMIA,
7741 _("this addressing mode requires base-register writeback"));
7742 inst.instruction |= inst.operands[0].reg << 16;
7743 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7744 inst.instruction |= inst.operands[1].imm;
7745 }
7746
7747 static void
7748 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7749 {
7750 int count;
7751
7752 if (inst.operands[0].writeback)
7753 inst.instruction |= WRITE_BACK;
7754 else
7755 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7756 _("this addressing mode requires base-register writeback"));
7757
7758 inst.instruction |= inst.operands[0].reg << 16;
7759 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7760
7761 count = inst.operands[1].imm << 1;
7762 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7763 count += 1;
7764
7765 inst.instruction |= count;
7766 }
7767
7768 static void
7769 do_vfp_sp_ldstmia (void)
7770 {
7771 vfp_sp_ldstm (VFP_LDSTMIA);
7772 }
7773
7774 static void
7775 do_vfp_sp_ldstmdb (void)
7776 {
7777 vfp_sp_ldstm (VFP_LDSTMDB);
7778 }
7779
7780 static void
7781 do_vfp_dp_ldstmia (void)
7782 {
7783 vfp_dp_ldstm (VFP_LDSTMIA);
7784 }
7785
7786 static void
7787 do_vfp_dp_ldstmdb (void)
7788 {
7789 vfp_dp_ldstm (VFP_LDSTMDB);
7790 }
7791
7792 static void
7793 do_vfp_xp_ldstmia (void)
7794 {
7795 vfp_dp_ldstm (VFP_LDSTMIAX);
7796 }
7797
7798 static void
7799 do_vfp_xp_ldstmdb (void)
7800 {
7801 vfp_dp_ldstm (VFP_LDSTMDBX);
7802 }
7803
7804 static void
7805 do_vfp_dp_rd_rm (void)
7806 {
7807 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7808 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7809 }
7810
7811 static void
7812 do_vfp_dp_rn_rd (void)
7813 {
7814 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7815 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7816 }
7817
7818 static void
7819 do_vfp_dp_rd_rn (void)
7820 {
7821 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7822 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7823 }
7824
7825 static void
7826 do_vfp_dp_rd_rn_rm (void)
7827 {
7828 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7829 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7830 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7831 }
7832
7833 static void
7834 do_vfp_dp_rd (void)
7835 {
7836 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7837 }
7838
7839 static void
7840 do_vfp_dp_rm_rd_rn (void)
7841 {
7842 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7843 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7844 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7845 }
7846
7847 /* VFPv3 instructions. */
7848 static void
7849 do_vfp_sp_const (void)
7850 {
7851 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7852 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7853 inst.instruction |= (inst.operands[1].imm & 0x0f);
7854 }
7855
7856 static void
7857 do_vfp_dp_const (void)
7858 {
7859 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7860 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7861 inst.instruction |= (inst.operands[1].imm & 0x0f);
7862 }
7863
7864 static void
7865 vfp_conv (int srcsize)
7866 {
7867 unsigned immbits = srcsize - inst.operands[1].imm;
7868 inst.instruction |= (immbits & 1) << 5;
7869 inst.instruction |= (immbits >> 1);
7870 }
7871
7872 static void
7873 do_vfp_sp_conv_16 (void)
7874 {
7875 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7876 vfp_conv (16);
7877 }
7878
7879 static void
7880 do_vfp_dp_conv_16 (void)
7881 {
7882 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7883 vfp_conv (16);
7884 }
7885
7886 static void
7887 do_vfp_sp_conv_32 (void)
7888 {
7889 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7890 vfp_conv (32);
7891 }
7892
7893 static void
7894 do_vfp_dp_conv_32 (void)
7895 {
7896 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7897 vfp_conv (32);
7898 }
7899 \f
7900 /* FPA instructions. Also in a logical order. */
7901
7902 static void
7903 do_fpa_cmp (void)
7904 {
7905 inst.instruction |= inst.operands[0].reg << 16;
7906 inst.instruction |= inst.operands[1].reg;
7907 }
7908
7909 static void
7910 do_fpa_ldmstm (void)
7911 {
7912 inst.instruction |= inst.operands[0].reg << 12;
7913 switch (inst.operands[1].imm)
7914 {
7915 case 1: inst.instruction |= CP_T_X; break;
7916 case 2: inst.instruction |= CP_T_Y; break;
7917 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7918 case 4: break;
7919 default: abort ();
7920 }
7921
7922 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7923 {
7924 /* The instruction specified "ea" or "fd", so we can only accept
7925 [Rn]{!}. The instruction does not really support stacking or
7926 unstacking, so we have to emulate these by setting appropriate
7927 bits and offsets. */
7928 constraint (inst.reloc.exp.X_op != O_constant
7929 || inst.reloc.exp.X_add_number != 0,
7930 _("this instruction does not support indexing"));
7931
7932 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7933 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7934
7935 if (!(inst.instruction & INDEX_UP))
7936 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7937
7938 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7939 {
7940 inst.operands[2].preind = 0;
7941 inst.operands[2].postind = 1;
7942 }
7943 }
7944
7945 encode_arm_cp_address (2, TRUE, TRUE, 0);
7946 }
7947 \f
7948 /* iWMMXt instructions: strictly in alphabetical order. */
7949
7950 static void
7951 do_iwmmxt_tandorc (void)
7952 {
7953 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7954 }
7955
7956 static void
7957 do_iwmmxt_textrc (void)
7958 {
7959 inst.instruction |= inst.operands[0].reg << 12;
7960 inst.instruction |= inst.operands[1].imm;
7961 }
7962
7963 static void
7964 do_iwmmxt_textrm (void)
7965 {
7966 inst.instruction |= inst.operands[0].reg << 12;
7967 inst.instruction |= inst.operands[1].reg << 16;
7968 inst.instruction |= inst.operands[2].imm;
7969 }
7970
7971 static void
7972 do_iwmmxt_tinsr (void)
7973 {
7974 inst.instruction |= inst.operands[0].reg << 16;
7975 inst.instruction |= inst.operands[1].reg << 12;
7976 inst.instruction |= inst.operands[2].imm;
7977 }
7978
7979 static void
7980 do_iwmmxt_tmia (void)
7981 {
7982 inst.instruction |= inst.operands[0].reg << 5;
7983 inst.instruction |= inst.operands[1].reg;
7984 inst.instruction |= inst.operands[2].reg << 12;
7985 }
7986
7987 static void
7988 do_iwmmxt_waligni (void)
7989 {
7990 inst.instruction |= inst.operands[0].reg << 12;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 inst.instruction |= inst.operands[2].reg;
7993 inst.instruction |= inst.operands[3].imm << 20;
7994 }
7995
7996 static void
7997 do_iwmmxt_wmerge (void)
7998 {
7999 inst.instruction |= inst.operands[0].reg << 12;
8000 inst.instruction |= inst.operands[1].reg << 16;
8001 inst.instruction |= inst.operands[2].reg;
8002 inst.instruction |= inst.operands[3].imm << 21;
8003 }
8004
8005 static void
8006 do_iwmmxt_wmov (void)
8007 {
8008 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8009 inst.instruction |= inst.operands[0].reg << 12;
8010 inst.instruction |= inst.operands[1].reg << 16;
8011 inst.instruction |= inst.operands[1].reg;
8012 }
8013
8014 static void
8015 do_iwmmxt_wldstbh (void)
8016 {
8017 int reloc;
8018 inst.instruction |= inst.operands[0].reg << 12;
8019 if (thumb_mode)
8020 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8021 else
8022 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8023 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8024 }
8025
8026 static void
8027 do_iwmmxt_wldstw (void)
8028 {
8029 /* RIWR_RIWC clears .isreg for a control register. */
8030 if (!inst.operands[0].isreg)
8031 {
8032 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8033 inst.instruction |= 0xf0000000;
8034 }
8035
8036 inst.instruction |= inst.operands[0].reg << 12;
8037 encode_arm_cp_address (1, TRUE, TRUE, 0);
8038 }
8039
8040 static void
8041 do_iwmmxt_wldstd (void)
8042 {
8043 inst.instruction |= inst.operands[0].reg << 12;
8044 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8045 && inst.operands[1].immisreg)
8046 {
8047 inst.instruction &= ~0x1a000ff;
8048 inst.instruction |= (0xf << 28);
8049 if (inst.operands[1].preind)
8050 inst.instruction |= PRE_INDEX;
8051 if (!inst.operands[1].negative)
8052 inst.instruction |= INDEX_UP;
8053 if (inst.operands[1].writeback)
8054 inst.instruction |= WRITE_BACK;
8055 inst.instruction |= inst.operands[1].reg << 16;
8056 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8057 inst.instruction |= inst.operands[1].imm;
8058 }
8059 else
8060 encode_arm_cp_address (1, TRUE, FALSE, 0);
8061 }
8062
8063 static void
8064 do_iwmmxt_wshufh (void)
8065 {
8066 inst.instruction |= inst.operands[0].reg << 12;
8067 inst.instruction |= inst.operands[1].reg << 16;
8068 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8069 inst.instruction |= (inst.operands[2].imm & 0x0f);
8070 }
8071
8072 static void
8073 do_iwmmxt_wzero (void)
8074 {
8075 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8076 inst.instruction |= inst.operands[0].reg;
8077 inst.instruction |= inst.operands[0].reg << 12;
8078 inst.instruction |= inst.operands[0].reg << 16;
8079 }
8080
8081 static void
8082 do_iwmmxt_wrwrwr_or_imm5 (void)
8083 {
8084 if (inst.operands[2].isreg)
8085 do_rd_rn_rm ();
8086 else {
8087 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8088 _("immediate operand requires iWMMXt2"));
8089 do_rd_rn ();
8090 if (inst.operands[2].imm == 0)
8091 {
8092 switch ((inst.instruction >> 20) & 0xf)
8093 {
8094 case 4:
8095 case 5:
8096 case 6:
8097 case 7:
8098 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8099 inst.operands[2].imm = 16;
8100 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8101 break;
8102 case 8:
8103 case 9:
8104 case 10:
8105 case 11:
8106 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8107 inst.operands[2].imm = 32;
8108 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8109 break;
8110 case 12:
8111 case 13:
8112 case 14:
8113 case 15:
8114 {
8115 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8116 unsigned long wrn;
8117 wrn = (inst.instruction >> 16) & 0xf;
8118 inst.instruction &= 0xff0fff0f;
8119 inst.instruction |= wrn;
8120 /* Bail out here; the instruction is now assembled. */
8121 return;
8122 }
8123 }
8124 }
8125 /* Map 32 -> 0, etc. */
8126 inst.operands[2].imm &= 0x1f;
8127 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8128 }
8129 }
8130 \f
8131 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8132 operations first, then control, shift, and load/store. */
8133
8134 /* Insns like "foo X,Y,Z". */
8135
8136 static void
8137 do_mav_triple (void)
8138 {
8139 inst.instruction |= inst.operands[0].reg << 16;
8140 inst.instruction |= inst.operands[1].reg;
8141 inst.instruction |= inst.operands[2].reg << 12;
8142 }
8143
8144 /* Insns like "foo W,X,Y,Z".
8145 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8146
8147 static void
8148 do_mav_quad (void)
8149 {
8150 inst.instruction |= inst.operands[0].reg << 5;
8151 inst.instruction |= inst.operands[1].reg << 12;
8152 inst.instruction |= inst.operands[2].reg << 16;
8153 inst.instruction |= inst.operands[3].reg;
8154 }
8155
8156 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8157 static void
8158 do_mav_dspsc (void)
8159 {
8160 inst.instruction |= inst.operands[1].reg << 12;
8161 }
8162
8163 /* Maverick shift immediate instructions.
8164 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8165 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8166
8167 static void
8168 do_mav_shift (void)
8169 {
8170 int imm = inst.operands[2].imm;
8171
8172 inst.instruction |= inst.operands[0].reg << 12;
8173 inst.instruction |= inst.operands[1].reg << 16;
8174
8175 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8176 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8177 Bit 4 should be 0. */
8178 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8179
8180 inst.instruction |= imm;
8181 }
8182 \f
8183 /* XScale instructions. Also sorted arithmetic before move. */
8184
8185 /* Xscale multiply-accumulate (argument parse)
8186 MIAcc acc0,Rm,Rs
8187 MIAPHcc acc0,Rm,Rs
8188 MIAxycc acc0,Rm,Rs. */
8189
8190 static void
8191 do_xsc_mia (void)
8192 {
8193 inst.instruction |= inst.operands[1].reg;
8194 inst.instruction |= inst.operands[2].reg << 12;
8195 }
8196
8197 /* Xscale move-accumulator-register (argument parse)
8198
8199 MARcc acc0,RdLo,RdHi. */
8200
8201 static void
8202 do_xsc_mar (void)
8203 {
8204 inst.instruction |= inst.operands[1].reg << 12;
8205 inst.instruction |= inst.operands[2].reg << 16;
8206 }
8207
8208 /* Xscale move-register-accumulator (argument parse)
8209
8210 MRAcc RdLo,RdHi,acc0. */
8211
8212 static void
8213 do_xsc_mra (void)
8214 {
8215 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8216 inst.instruction |= inst.operands[0].reg << 12;
8217 inst.instruction |= inst.operands[1].reg << 16;
8218 }
8219 \f
8220 /* Encoding functions relevant only to Thumb. */
8221
8222 /* inst.operands[i] is a shifted-register operand; encode
8223 it into inst.instruction in the format used by Thumb32. */
8224
8225 static void
8226 encode_thumb32_shifted_operand (int i)
8227 {
8228 unsigned int value = inst.reloc.exp.X_add_number;
8229 unsigned int shift = inst.operands[i].shift_kind;
8230
8231 constraint (inst.operands[i].immisreg,
8232 _("shift by register not allowed in thumb mode"));
8233 inst.instruction |= inst.operands[i].reg;
8234 if (shift == SHIFT_RRX)
8235 inst.instruction |= SHIFT_ROR << 4;
8236 else
8237 {
8238 constraint (inst.reloc.exp.X_op != O_constant,
8239 _("expression too complex"));
8240
8241 constraint (value > 32
8242 || (value == 32 && (shift == SHIFT_LSL
8243 || shift == SHIFT_ROR)),
8244 _("shift expression is too large"));
8245
8246 if (value == 0)
8247 shift = SHIFT_LSL;
8248 else if (value == 32)
8249 value = 0;
8250
8251 inst.instruction |= shift << 4;
8252 inst.instruction |= (value & 0x1c) << 10;
8253 inst.instruction |= (value & 0x03) << 6;
8254 }
8255 }
8256
8257
8258 /* inst.operands[i] was set up by parse_address. Encode it into a
8259 Thumb32 format load or store instruction. Reject forms that cannot
8260 be used with such instructions. If is_t is true, reject forms that
8261 cannot be used with a T instruction; if is_d is true, reject forms
8262 that cannot be used with a D instruction. */
8263
8264 static void
8265 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8266 {
8267 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8268
8269 constraint (!inst.operands[i].isreg,
8270 _("Instruction does not support =N addresses"));
8271
8272 inst.instruction |= inst.operands[i].reg << 16;
8273 if (inst.operands[i].immisreg)
8274 {
8275 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8276 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8277 constraint (inst.operands[i].negative,
8278 _("Thumb does not support negative register indexing"));
8279 constraint (inst.operands[i].postind,
8280 _("Thumb does not support register post-indexing"));
8281 constraint (inst.operands[i].writeback,
8282 _("Thumb does not support register indexing with writeback"));
8283 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8284 _("Thumb supports only LSL in shifted register indexing"));
8285
8286 inst.instruction |= inst.operands[i].imm;
8287 if (inst.operands[i].shifted)
8288 {
8289 constraint (inst.reloc.exp.X_op != O_constant,
8290 _("expression too complex"));
8291 constraint (inst.reloc.exp.X_add_number < 0
8292 || inst.reloc.exp.X_add_number > 3,
8293 _("shift out of range"));
8294 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8295 }
8296 inst.reloc.type = BFD_RELOC_UNUSED;
8297 }
8298 else if (inst.operands[i].preind)
8299 {
8300 constraint (is_pc && inst.operands[i].writeback,
8301 _("cannot use writeback with PC-relative addressing"));
8302 constraint (is_t && inst.operands[i].writeback,
8303 _("cannot use writeback with this instruction"));
8304
8305 if (is_d)
8306 {
8307 inst.instruction |= 0x01000000;
8308 if (inst.operands[i].writeback)
8309 inst.instruction |= 0x00200000;
8310 }
8311 else
8312 {
8313 inst.instruction |= 0x00000c00;
8314 if (inst.operands[i].writeback)
8315 inst.instruction |= 0x00000100;
8316 }
8317 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8318 }
8319 else if (inst.operands[i].postind)
8320 {
8321 assert (inst.operands[i].writeback);
8322 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8323 constraint (is_t, _("cannot use post-indexing with this instruction"));
8324
8325 if (is_d)
8326 inst.instruction |= 0x00200000;
8327 else
8328 inst.instruction |= 0x00000900;
8329 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8330 }
8331 else /* unindexed - only for coprocessor */
8332 inst.error = _("instruction does not accept unindexed addressing");
8333 }
8334
8335 /* Table of Thumb instructions which exist in both 16- and 32-bit
8336 encodings (the latter only in post-V6T2 cores). The index is the
8337 value used in the insns table below. When there is more than one
8338 possible 16-bit encoding for the instruction, this table always
8339 holds variant (1).
8340 Also contains several pseudo-instructions used during relaxation. */
8341 #define T16_32_TAB \
8342 X(adc, 4140, eb400000), \
8343 X(adcs, 4140, eb500000), \
8344 X(add, 1c00, eb000000), \
8345 X(adds, 1c00, eb100000), \
8346 X(addi, 0000, f1000000), \
8347 X(addis, 0000, f1100000), \
8348 X(add_pc,000f, f20f0000), \
8349 X(add_sp,000d, f10d0000), \
8350 X(adr, 000f, f20f0000), \
8351 X(and, 4000, ea000000), \
8352 X(ands, 4000, ea100000), \
8353 X(asr, 1000, fa40f000), \
8354 X(asrs, 1000, fa50f000), \
8355 X(b, e000, f000b000), \
8356 X(bcond, d000, f0008000), \
8357 X(bic, 4380, ea200000), \
8358 X(bics, 4380, ea300000), \
8359 X(cmn, 42c0, eb100f00), \
8360 X(cmp, 2800, ebb00f00), \
8361 X(cpsie, b660, f3af8400), \
8362 X(cpsid, b670, f3af8600), \
8363 X(cpy, 4600, ea4f0000), \
8364 X(dec_sp,80dd, f1ad0d00), \
8365 X(eor, 4040, ea800000), \
8366 X(eors, 4040, ea900000), \
8367 X(inc_sp,00dd, f10d0d00), \
8368 X(ldmia, c800, e8900000), \
8369 X(ldr, 6800, f8500000), \
8370 X(ldrb, 7800, f8100000), \
8371 X(ldrh, 8800, f8300000), \
8372 X(ldrsb, 5600, f9100000), \
8373 X(ldrsh, 5e00, f9300000), \
8374 X(ldr_pc,4800, f85f0000), \
8375 X(ldr_pc2,4800, f85f0000), \
8376 X(ldr_sp,9800, f85d0000), \
8377 X(lsl, 0000, fa00f000), \
8378 X(lsls, 0000, fa10f000), \
8379 X(lsr, 0800, fa20f000), \
8380 X(lsrs, 0800, fa30f000), \
8381 X(mov, 2000, ea4f0000), \
8382 X(movs, 2000, ea5f0000), \
8383 X(mul, 4340, fb00f000), \
8384 X(muls, 4340, ffffffff), /* no 32b muls */ \
8385 X(mvn, 43c0, ea6f0000), \
8386 X(mvns, 43c0, ea7f0000), \
8387 X(neg, 4240, f1c00000), /* rsb #0 */ \
8388 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8389 X(orr, 4300, ea400000), \
8390 X(orrs, 4300, ea500000), \
8391 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8392 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8393 X(rev, ba00, fa90f080), \
8394 X(rev16, ba40, fa90f090), \
8395 X(revsh, bac0, fa90f0b0), \
8396 X(ror, 41c0, fa60f000), \
8397 X(rors, 41c0, fa70f000), \
8398 X(sbc, 4180, eb600000), \
8399 X(sbcs, 4180, eb700000), \
8400 X(stmia, c000, e8800000), \
8401 X(str, 6000, f8400000), \
8402 X(strb, 7000, f8000000), \
8403 X(strh, 8000, f8200000), \
8404 X(str_sp,9000, f84d0000), \
8405 X(sub, 1e00, eba00000), \
8406 X(subs, 1e00, ebb00000), \
8407 X(subi, 8000, f1a00000), \
8408 X(subis, 8000, f1b00000), \
8409 X(sxtb, b240, fa4ff080), \
8410 X(sxth, b200, fa0ff080), \
8411 X(tst, 4200, ea100f00), \
8412 X(uxtb, b2c0, fa5ff080), \
8413 X(uxth, b280, fa1ff080), \
8414 X(nop, bf00, f3af8000), \
8415 X(yield, bf10, f3af8001), \
8416 X(wfe, bf20, f3af8002), \
8417 X(wfi, bf30, f3af8003), \
8418 X(sev, bf40, f3af9004), /* typo, 8004? */
8419
8420 /* To catch errors in encoding functions, the codes are all offset by
8421 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8422 as 16-bit instructions. */
8423 #define X(a,b,c) T_MNEM_##a
8424 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8425 #undef X
8426
8427 #define X(a,b,c) 0x##b
8428 static const unsigned short thumb_op16[] = { T16_32_TAB };
8429 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8430 #undef X
8431
8432 #define X(a,b,c) 0x##c
8433 static const unsigned int thumb_op32[] = { T16_32_TAB };
8434 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8435 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8436 #undef X
8437 #undef T16_32_TAB
8438
8439 /* Thumb instruction encoders, in alphabetical order. */
8440
8441 /* ADDW or SUBW. */
8442 static void
8443 do_t_add_sub_w (void)
8444 {
8445 int Rd, Rn;
8446
8447 Rd = inst.operands[0].reg;
8448 Rn = inst.operands[1].reg;
8449
8450 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this is the
8451 SP-{plus,minute}-immediate form of the instruction. */
8452 reject_bad_reg (Rd);
8453
8454 inst.instruction |= (Rn << 16) | (Rd << 8);
8455 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8456 }
8457
8458 /* Parse an add or subtract instruction. We get here with inst.instruction
8459 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8460
8461 static void
8462 do_t_add_sub (void)
8463 {
8464 int Rd, Rs, Rn;
8465
8466 Rd = inst.operands[0].reg;
8467 Rs = (inst.operands[1].present
8468 ? inst.operands[1].reg /* Rd, Rs, foo */
8469 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8470
8471 if (unified_syntax)
8472 {
8473 bfd_boolean flags;
8474 bfd_boolean narrow;
8475 int opcode;
8476
8477 flags = (inst.instruction == T_MNEM_adds
8478 || inst.instruction == T_MNEM_subs);
8479 if (flags)
8480 narrow = (current_it_mask == 0);
8481 else
8482 narrow = (current_it_mask != 0);
8483 if (!inst.operands[2].isreg)
8484 {
8485 int add;
8486
8487 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8488
8489 add = (inst.instruction == T_MNEM_add
8490 || inst.instruction == T_MNEM_adds);
8491 opcode = 0;
8492 if (inst.size_req != 4)
8493 {
8494 /* Attempt to use a narrow opcode, with relaxation if
8495 appropriate. */
8496 if (Rd == REG_SP && Rs == REG_SP && !flags)
8497 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8498 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8499 opcode = T_MNEM_add_sp;
8500 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8501 opcode = T_MNEM_add_pc;
8502 else if (Rd <= 7 && Rs <= 7 && narrow)
8503 {
8504 if (flags)
8505 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8506 else
8507 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8508 }
8509 if (opcode)
8510 {
8511 inst.instruction = THUMB_OP16(opcode);
8512 inst.instruction |= (Rd << 4) | Rs;
8513 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8514 if (inst.size_req != 2)
8515 inst.relax = opcode;
8516 }
8517 else
8518 constraint (inst.size_req == 2, BAD_HIREG);
8519 }
8520 if (inst.size_req == 4
8521 || (inst.size_req != 2 && !opcode))
8522 {
8523 if (Rd == REG_PC)
8524 {
8525 constraint (add, BAD_PC);
8526 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8527 _("only SUBS PC, LR, #const allowed"));
8528 constraint (inst.reloc.exp.X_op != O_constant,
8529 _("expression too complex"));
8530 constraint (inst.reloc.exp.X_add_number < 0
8531 || inst.reloc.exp.X_add_number > 0xff,
8532 _("immediate value out of range"));
8533 inst.instruction = T2_SUBS_PC_LR
8534 | inst.reloc.exp.X_add_number;
8535 inst.reloc.type = BFD_RELOC_UNUSED;
8536 return;
8537 }
8538 else if (Rs == REG_PC)
8539 {
8540 /* Always use addw/subw. */
8541 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8542 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8543 }
8544 else
8545 {
8546 inst.instruction = THUMB_OP32 (inst.instruction);
8547 inst.instruction = (inst.instruction & 0xe1ffffff)
8548 | 0x10000000;
8549 if (flags)
8550 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8551 else
8552 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8553 }
8554 inst.instruction |= Rd << 8;
8555 inst.instruction |= Rs << 16;
8556 }
8557 }
8558 else
8559 {
8560 Rn = inst.operands[2].reg;
8561 /* See if we can do this with a 16-bit instruction. */
8562 if (!inst.operands[2].shifted && inst.size_req != 4)
8563 {
8564 if (Rd > 7 || Rs > 7 || Rn > 7)
8565 narrow = FALSE;
8566
8567 if (narrow)
8568 {
8569 inst.instruction = ((inst.instruction == T_MNEM_adds
8570 || inst.instruction == T_MNEM_add)
8571 ? T_OPCODE_ADD_R3
8572 : T_OPCODE_SUB_R3);
8573 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8574 return;
8575 }
8576
8577 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
8578 {
8579 /* Thumb-1 cores (except v6-M) require at least one high
8580 register in a narrow non flag setting add. */
8581 if (Rd > 7 || Rn > 7
8582 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
8583 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
8584 {
8585 if (Rd == Rn)
8586 {
8587 Rn = Rs;
8588 Rs = Rd;
8589 }
8590 inst.instruction = T_OPCODE_ADD_HI;
8591 inst.instruction |= (Rd & 8) << 4;
8592 inst.instruction |= (Rd & 7);
8593 inst.instruction |= Rn << 3;
8594 return;
8595 }
8596 }
8597 }
8598
8599 constraint (Rd == REG_PC, BAD_PC);
8600 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8601 constraint (Rs == REG_PC, BAD_PC);
8602 reject_bad_reg (Rn);
8603
8604 /* If we get here, it can't be done in 16 bits. */
8605 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8606 _("shift must be constant"));
8607 inst.instruction = THUMB_OP32 (inst.instruction);
8608 inst.instruction |= Rd << 8;
8609 inst.instruction |= Rs << 16;
8610 encode_thumb32_shifted_operand (2);
8611 }
8612 }
8613 else
8614 {
8615 constraint (inst.instruction == T_MNEM_adds
8616 || inst.instruction == T_MNEM_subs,
8617 BAD_THUMB32);
8618
8619 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8620 {
8621 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8622 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8623 BAD_HIREG);
8624
8625 inst.instruction = (inst.instruction == T_MNEM_add
8626 ? 0x0000 : 0x8000);
8627 inst.instruction |= (Rd << 4) | Rs;
8628 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8629 return;
8630 }
8631
8632 Rn = inst.operands[2].reg;
8633 constraint (inst.operands[2].shifted, _("unshifted register required"));
8634
8635 /* We now have Rd, Rs, and Rn set to registers. */
8636 if (Rd > 7 || Rs > 7 || Rn > 7)
8637 {
8638 /* Can't do this for SUB. */
8639 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8640 inst.instruction = T_OPCODE_ADD_HI;
8641 inst.instruction |= (Rd & 8) << 4;
8642 inst.instruction |= (Rd & 7);
8643 if (Rs == Rd)
8644 inst.instruction |= Rn << 3;
8645 else if (Rn == Rd)
8646 inst.instruction |= Rs << 3;
8647 else
8648 constraint (1, _("dest must overlap one source register"));
8649 }
8650 else
8651 {
8652 inst.instruction = (inst.instruction == T_MNEM_add
8653 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8654 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8655 }
8656 }
8657 }
8658
8659 static void
8660 do_t_adr (void)
8661 {
8662 unsigned Rd;
8663
8664 Rd = inst.operands[0].reg;
8665 reject_bad_reg (Rd);
8666
8667 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
8668 {
8669 /* Defer to section relaxation. */
8670 inst.relax = inst.instruction;
8671 inst.instruction = THUMB_OP16 (inst.instruction);
8672 inst.instruction |= Rd << 4;
8673 }
8674 else if (unified_syntax && inst.size_req != 2)
8675 {
8676 /* Generate a 32-bit opcode. */
8677 inst.instruction = THUMB_OP32 (inst.instruction);
8678 inst.instruction |= Rd << 8;
8679 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8680 inst.reloc.pc_rel = 1;
8681 }
8682 else
8683 {
8684 /* Generate a 16-bit opcode. */
8685 inst.instruction = THUMB_OP16 (inst.instruction);
8686 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8687 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8688 inst.reloc.pc_rel = 1;
8689
8690 inst.instruction |= Rd << 4;
8691 }
8692 }
8693
8694 /* Arithmetic instructions for which there is just one 16-bit
8695 instruction encoding, and it allows only two low registers.
8696 For maximal compatibility with ARM syntax, we allow three register
8697 operands even when Thumb-32 instructions are not available, as long
8698 as the first two are identical. For instance, both "sbc r0,r1" and
8699 "sbc r0,r0,r1" are allowed. */
8700 static void
8701 do_t_arit3 (void)
8702 {
8703 int Rd, Rs, Rn;
8704
8705 Rd = inst.operands[0].reg;
8706 Rs = (inst.operands[1].present
8707 ? inst.operands[1].reg /* Rd, Rs, foo */
8708 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8709 Rn = inst.operands[2].reg;
8710
8711 reject_bad_reg (Rd);
8712 reject_bad_reg (Rs);
8713 if (inst.operands[2].isreg)
8714 reject_bad_reg (Rn);
8715
8716 if (unified_syntax)
8717 {
8718 if (!inst.operands[2].isreg)
8719 {
8720 /* For an immediate, we always generate a 32-bit opcode;
8721 section relaxation will shrink it later if possible. */
8722 inst.instruction = THUMB_OP32 (inst.instruction);
8723 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8724 inst.instruction |= Rd << 8;
8725 inst.instruction |= Rs << 16;
8726 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8727 }
8728 else
8729 {
8730 bfd_boolean narrow;
8731
8732 /* See if we can do this with a 16-bit instruction. */
8733 if (THUMB_SETS_FLAGS (inst.instruction))
8734 narrow = current_it_mask == 0;
8735 else
8736 narrow = current_it_mask != 0;
8737
8738 if (Rd > 7 || Rn > 7 || Rs > 7)
8739 narrow = FALSE;
8740 if (inst.operands[2].shifted)
8741 narrow = FALSE;
8742 if (inst.size_req == 4)
8743 narrow = FALSE;
8744
8745 if (narrow
8746 && Rd == Rs)
8747 {
8748 inst.instruction = THUMB_OP16 (inst.instruction);
8749 inst.instruction |= Rd;
8750 inst.instruction |= Rn << 3;
8751 return;
8752 }
8753
8754 /* If we get here, it can't be done in 16 bits. */
8755 constraint (inst.operands[2].shifted
8756 && inst.operands[2].immisreg,
8757 _("shift must be constant"));
8758 inst.instruction = THUMB_OP32 (inst.instruction);
8759 inst.instruction |= Rd << 8;
8760 inst.instruction |= Rs << 16;
8761 encode_thumb32_shifted_operand (2);
8762 }
8763 }
8764 else
8765 {
8766 /* On its face this is a lie - the instruction does set the
8767 flags. However, the only supported mnemonic in this mode
8768 says it doesn't. */
8769 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8770
8771 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8772 _("unshifted register required"));
8773 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8774 constraint (Rd != Rs,
8775 _("dest and source1 must be the same register"));
8776
8777 inst.instruction = THUMB_OP16 (inst.instruction);
8778 inst.instruction |= Rd;
8779 inst.instruction |= Rn << 3;
8780 }
8781 }
8782
8783 /* Similarly, but for instructions where the arithmetic operation is
8784 commutative, so we can allow either of them to be different from
8785 the destination operand in a 16-bit instruction. For instance, all
8786 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8787 accepted. */
8788 static void
8789 do_t_arit3c (void)
8790 {
8791 int Rd, Rs, Rn;
8792
8793 Rd = inst.operands[0].reg;
8794 Rs = (inst.operands[1].present
8795 ? inst.operands[1].reg /* Rd, Rs, foo */
8796 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8797 Rn = inst.operands[2].reg;
8798
8799 reject_bad_reg (Rd);
8800 reject_bad_reg (Rs);
8801 if (inst.operands[2].isreg)
8802 reject_bad_reg (Rn);
8803
8804 if (unified_syntax)
8805 {
8806 if (!inst.operands[2].isreg)
8807 {
8808 /* For an immediate, we always generate a 32-bit opcode;
8809 section relaxation will shrink it later if possible. */
8810 inst.instruction = THUMB_OP32 (inst.instruction);
8811 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8812 inst.instruction |= Rd << 8;
8813 inst.instruction |= Rs << 16;
8814 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8815 }
8816 else
8817 {
8818 bfd_boolean narrow;
8819
8820 /* See if we can do this with a 16-bit instruction. */
8821 if (THUMB_SETS_FLAGS (inst.instruction))
8822 narrow = current_it_mask == 0;
8823 else
8824 narrow = current_it_mask != 0;
8825
8826 if (Rd > 7 || Rn > 7 || Rs > 7)
8827 narrow = FALSE;
8828 if (inst.operands[2].shifted)
8829 narrow = FALSE;
8830 if (inst.size_req == 4)
8831 narrow = FALSE;
8832
8833 if (narrow)
8834 {
8835 if (Rd == Rs)
8836 {
8837 inst.instruction = THUMB_OP16 (inst.instruction);
8838 inst.instruction |= Rd;
8839 inst.instruction |= Rn << 3;
8840 return;
8841 }
8842 if (Rd == Rn)
8843 {
8844 inst.instruction = THUMB_OP16 (inst.instruction);
8845 inst.instruction |= Rd;
8846 inst.instruction |= Rs << 3;
8847 return;
8848 }
8849 }
8850
8851 /* If we get here, it can't be done in 16 bits. */
8852 constraint (inst.operands[2].shifted
8853 && inst.operands[2].immisreg,
8854 _("shift must be constant"));
8855 inst.instruction = THUMB_OP32 (inst.instruction);
8856 inst.instruction |= Rd << 8;
8857 inst.instruction |= Rs << 16;
8858 encode_thumb32_shifted_operand (2);
8859 }
8860 }
8861 else
8862 {
8863 /* On its face this is a lie - the instruction does set the
8864 flags. However, the only supported mnemonic in this mode
8865 says it doesn't. */
8866 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8867
8868 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8869 _("unshifted register required"));
8870 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8871
8872 inst.instruction = THUMB_OP16 (inst.instruction);
8873 inst.instruction |= Rd;
8874
8875 if (Rd == Rs)
8876 inst.instruction |= Rn << 3;
8877 else if (Rd == Rn)
8878 inst.instruction |= Rs << 3;
8879 else
8880 constraint (1, _("dest must overlap one source register"));
8881 }
8882 }
8883
8884 static void
8885 do_t_barrier (void)
8886 {
8887 if (inst.operands[0].present)
8888 {
8889 constraint ((inst.instruction & 0xf0) != 0x40
8890 && inst.operands[0].imm != 0xf,
8891 _("bad barrier type"));
8892 inst.instruction |= inst.operands[0].imm;
8893 }
8894 else
8895 inst.instruction |= 0xf;
8896 }
8897
8898 static void
8899 do_t_bfc (void)
8900 {
8901 unsigned Rd;
8902 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8903 constraint (msb > 32, _("bit-field extends past end of register"));
8904 /* The instruction encoding stores the LSB and MSB,
8905 not the LSB and width. */
8906 Rd = inst.operands[0].reg;
8907 reject_bad_reg (Rd);
8908 inst.instruction |= Rd << 8;
8909 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8910 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8911 inst.instruction |= msb - 1;
8912 }
8913
8914 static void
8915 do_t_bfi (void)
8916 {
8917 int Rd, Rn;
8918 unsigned int msb;
8919
8920 Rd = inst.operands[0].reg;
8921 reject_bad_reg (Rd);
8922
8923 /* #0 in second position is alternative syntax for bfc, which is
8924 the same instruction but with REG_PC in the Rm field. */
8925 if (!inst.operands[1].isreg)
8926 Rn = REG_PC;
8927 else
8928 {
8929 Rn = inst.operands[1].reg;
8930 reject_bad_reg (Rn);
8931 }
8932
8933 msb = inst.operands[2].imm + inst.operands[3].imm;
8934 constraint (msb > 32, _("bit-field extends past end of register"));
8935 /* The instruction encoding stores the LSB and MSB,
8936 not the LSB and width. */
8937 inst.instruction |= Rd << 8;
8938 inst.instruction |= Rn << 16;
8939 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8940 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8941 inst.instruction |= msb - 1;
8942 }
8943
8944 static void
8945 do_t_bfx (void)
8946 {
8947 unsigned Rd, Rn;
8948
8949 Rd = inst.operands[0].reg;
8950 Rn = inst.operands[1].reg;
8951
8952 reject_bad_reg (Rd);
8953 reject_bad_reg (Rn);
8954
8955 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8956 _("bit-field extends past end of register"));
8957 inst.instruction |= Rd << 8;
8958 inst.instruction |= Rn << 16;
8959 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8960 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8961 inst.instruction |= inst.operands[3].imm - 1;
8962 }
8963
8964 /* ARM V5 Thumb BLX (argument parse)
8965 BLX <target_addr> which is BLX(1)
8966 BLX <Rm> which is BLX(2)
8967 Unfortunately, there are two different opcodes for this mnemonic.
8968 So, the insns[].value is not used, and the code here zaps values
8969 into inst.instruction.
8970
8971 ??? How to take advantage of the additional two bits of displacement
8972 available in Thumb32 mode? Need new relocation? */
8973
8974 static void
8975 do_t_blx (void)
8976 {
8977 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8978 if (inst.operands[0].isreg)
8979 {
8980 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8981 /* We have a register, so this is BLX(2). */
8982 inst.instruction |= inst.operands[0].reg << 3;
8983 }
8984 else
8985 {
8986 /* No register. This must be BLX(1). */
8987 inst.instruction = 0xf000e800;
8988 #ifdef OBJ_ELF
8989 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8990 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8991 else
8992 #endif
8993 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8994 inst.reloc.pc_rel = 1;
8995 }
8996 }
8997
8998 static void
8999 do_t_branch (void)
9000 {
9001 int opcode;
9002 int cond;
9003
9004 if (current_it_mask)
9005 {
9006 /* Conditional branches inside IT blocks are encoded as unconditional
9007 branches. */
9008 cond = COND_ALWAYS;
9009 /* A branch must be the last instruction in an IT block. */
9010 constraint (current_it_mask != 0x10, BAD_BRANCH);
9011 }
9012 else
9013 cond = inst.cond;
9014
9015 if (cond != COND_ALWAYS)
9016 opcode = T_MNEM_bcond;
9017 else
9018 opcode = inst.instruction;
9019
9020 if (unified_syntax && inst.size_req == 4)
9021 {
9022 inst.instruction = THUMB_OP32(opcode);
9023 if (cond == COND_ALWAYS)
9024 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9025 else
9026 {
9027 assert (cond != 0xF);
9028 inst.instruction |= cond << 22;
9029 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9030 }
9031 }
9032 else
9033 {
9034 inst.instruction = THUMB_OP16(opcode);
9035 if (cond == COND_ALWAYS)
9036 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9037 else
9038 {
9039 inst.instruction |= cond << 8;
9040 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9041 }
9042 /* Allow section relaxation. */
9043 if (unified_syntax && inst.size_req != 2)
9044 inst.relax = opcode;
9045 }
9046
9047 inst.reloc.pc_rel = 1;
9048 }
9049
9050 static void
9051 do_t_bkpt (void)
9052 {
9053 constraint (inst.cond != COND_ALWAYS,
9054 _("instruction is always unconditional"));
9055 if (inst.operands[0].present)
9056 {
9057 constraint (inst.operands[0].imm > 255,
9058 _("immediate value out of range"));
9059 inst.instruction |= inst.operands[0].imm;
9060 }
9061 }
9062
9063 static void
9064 do_t_branch23 (void)
9065 {
9066 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9067 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9068 inst.reloc.pc_rel = 1;
9069
9070 /* If the destination of the branch is a defined symbol which does not have
9071 the THUMB_FUNC attribute, then we must be calling a function which has
9072 the (interfacearm) attribute. We look for the Thumb entry point to that
9073 function and change the branch to refer to that function instead. */
9074 if ( inst.reloc.exp.X_op == O_symbol
9075 && inst.reloc.exp.X_add_symbol != NULL
9076 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9077 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9078 inst.reloc.exp.X_add_symbol =
9079 find_real_start (inst.reloc.exp.X_add_symbol);
9080 }
9081
9082 static void
9083 do_t_bx (void)
9084 {
9085 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9086 inst.instruction |= inst.operands[0].reg << 3;
9087 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9088 should cause the alignment to be checked once it is known. This is
9089 because BX PC only works if the instruction is word aligned. */
9090 }
9091
9092 static void
9093 do_t_bxj (void)
9094 {
9095 int Rm;
9096
9097 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9098 Rm = inst.operands[0].reg;
9099 reject_bad_reg (Rm);
9100 inst.instruction |= Rm << 16;
9101 }
9102
9103 static void
9104 do_t_clz (void)
9105 {
9106 unsigned Rd;
9107 unsigned Rm;
9108
9109 Rd = inst.operands[0].reg;
9110 Rm = inst.operands[1].reg;
9111
9112 reject_bad_reg (Rd);
9113 reject_bad_reg (Rm);
9114
9115 inst.instruction |= Rd << 8;
9116 inst.instruction |= Rm << 16;
9117 inst.instruction |= Rm;
9118 }
9119
9120 static void
9121 do_t_cps (void)
9122 {
9123 constraint (current_it_mask, BAD_NOT_IT);
9124 inst.instruction |= inst.operands[0].imm;
9125 }
9126
9127 static void
9128 do_t_cpsi (void)
9129 {
9130 constraint (current_it_mask, BAD_NOT_IT);
9131 if (unified_syntax
9132 && (inst.operands[1].present || inst.size_req == 4)
9133 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9134 {
9135 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9136 inst.instruction = 0xf3af8000;
9137 inst.instruction |= imod << 9;
9138 inst.instruction |= inst.operands[0].imm << 5;
9139 if (inst.operands[1].present)
9140 inst.instruction |= 0x100 | inst.operands[1].imm;
9141 }
9142 else
9143 {
9144 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9145 && (inst.operands[0].imm & 4),
9146 _("selected processor does not support 'A' form "
9147 "of this instruction"));
9148 constraint (inst.operands[1].present || inst.size_req == 4,
9149 _("Thumb does not support the 2-argument "
9150 "form of this instruction"));
9151 inst.instruction |= inst.operands[0].imm;
9152 }
9153 }
9154
9155 /* THUMB CPY instruction (argument parse). */
9156
9157 static void
9158 do_t_cpy (void)
9159 {
9160 if (inst.size_req == 4)
9161 {
9162 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9163 inst.instruction |= inst.operands[0].reg << 8;
9164 inst.instruction |= inst.operands[1].reg;
9165 }
9166 else
9167 {
9168 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9169 inst.instruction |= (inst.operands[0].reg & 0x7);
9170 inst.instruction |= inst.operands[1].reg << 3;
9171 }
9172 }
9173
9174 static void
9175 do_t_cbz (void)
9176 {
9177 constraint (current_it_mask, BAD_NOT_IT);
9178 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9179 inst.instruction |= inst.operands[0].reg;
9180 inst.reloc.pc_rel = 1;
9181 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9182 }
9183
9184 static void
9185 do_t_dbg (void)
9186 {
9187 inst.instruction |= inst.operands[0].imm;
9188 }
9189
9190 static void
9191 do_t_div (void)
9192 {
9193 unsigned Rd, Rn, Rm;
9194
9195 Rd = inst.operands[0].reg;
9196 Rn = (inst.operands[1].present
9197 ? inst.operands[1].reg : Rd);
9198 Rm = inst.operands[2].reg;
9199
9200 reject_bad_reg (Rd);
9201 reject_bad_reg (Rn);
9202 reject_bad_reg (Rm);
9203
9204 inst.instruction |= Rd << 8;
9205 inst.instruction |= Rn << 16;
9206 inst.instruction |= Rm;
9207 }
9208
9209 static void
9210 do_t_hint (void)
9211 {
9212 if (unified_syntax && inst.size_req == 4)
9213 inst.instruction = THUMB_OP32 (inst.instruction);
9214 else
9215 inst.instruction = THUMB_OP16 (inst.instruction);
9216 }
9217
9218 static void
9219 do_t_it (void)
9220 {
9221 unsigned int cond = inst.operands[0].imm;
9222
9223 constraint (current_it_mask, BAD_NOT_IT);
9224 current_it_mask = (inst.instruction & 0xf) | 0x10;
9225 current_cc = cond;
9226
9227 /* If the condition is a negative condition, invert the mask. */
9228 if ((cond & 0x1) == 0x0)
9229 {
9230 unsigned int mask = inst.instruction & 0x000f;
9231
9232 if ((mask & 0x7) == 0)
9233 /* no conversion needed */;
9234 else if ((mask & 0x3) == 0)
9235 mask ^= 0x8;
9236 else if ((mask & 0x1) == 0)
9237 mask ^= 0xC;
9238 else
9239 mask ^= 0xE;
9240
9241 inst.instruction &= 0xfff0;
9242 inst.instruction |= mask;
9243 }
9244
9245 inst.instruction |= cond << 4;
9246 }
9247
9248 /* Helper function used for both push/pop and ldm/stm. */
9249 static void
9250 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9251 {
9252 bfd_boolean load;
9253
9254 load = (inst.instruction & (1 << 20)) != 0;
9255
9256 if (mask & (1 << 13))
9257 inst.error = _("SP not allowed in register list");
9258 if (load)
9259 {
9260 if (mask & (1 << 14)
9261 && mask & (1 << 15))
9262 inst.error = _("LR and PC should not both be in register list");
9263
9264 if ((mask & (1 << base)) != 0
9265 && writeback)
9266 as_warn (_("base register should not be in register list "
9267 "when written back"));
9268 }
9269 else
9270 {
9271 if (mask & (1 << 15))
9272 inst.error = _("PC not allowed in register list");
9273
9274 if (mask & (1 << base))
9275 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9276 }
9277
9278 if ((mask & (mask - 1)) == 0)
9279 {
9280 /* Single register transfers implemented as str/ldr. */
9281 if (writeback)
9282 {
9283 if (inst.instruction & (1 << 23))
9284 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9285 else
9286 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9287 }
9288 else
9289 {
9290 if (inst.instruction & (1 << 23))
9291 inst.instruction = 0x00800000; /* ia -> [base] */
9292 else
9293 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9294 }
9295
9296 inst.instruction |= 0xf8400000;
9297 if (load)
9298 inst.instruction |= 0x00100000;
9299
9300 mask = ffs (mask) - 1;
9301 mask <<= 12;
9302 }
9303 else if (writeback)
9304 inst.instruction |= WRITE_BACK;
9305
9306 inst.instruction |= mask;
9307 inst.instruction |= base << 16;
9308 }
9309
9310 static void
9311 do_t_ldmstm (void)
9312 {
9313 /* This really doesn't seem worth it. */
9314 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9315 _("expression too complex"));
9316 constraint (inst.operands[1].writeback,
9317 _("Thumb load/store multiple does not support {reglist}^"));
9318
9319 if (unified_syntax)
9320 {
9321 bfd_boolean narrow;
9322 unsigned mask;
9323
9324 narrow = FALSE;
9325 /* See if we can use a 16-bit instruction. */
9326 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9327 && inst.size_req != 4
9328 && !(inst.operands[1].imm & ~0xff))
9329 {
9330 mask = 1 << inst.operands[0].reg;
9331
9332 if (inst.operands[0].reg <= 7
9333 && (inst.instruction == T_MNEM_stmia
9334 ? inst.operands[0].writeback
9335 : (inst.operands[0].writeback
9336 == !(inst.operands[1].imm & mask))))
9337 {
9338 if (inst.instruction == T_MNEM_stmia
9339 && (inst.operands[1].imm & mask)
9340 && (inst.operands[1].imm & (mask - 1)))
9341 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9342 inst.operands[0].reg);
9343
9344 inst.instruction = THUMB_OP16 (inst.instruction);
9345 inst.instruction |= inst.operands[0].reg << 8;
9346 inst.instruction |= inst.operands[1].imm;
9347 narrow = TRUE;
9348 }
9349 else if (inst.operands[0] .reg == REG_SP
9350 && inst.operands[0].writeback)
9351 {
9352 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9353 ? T_MNEM_push : T_MNEM_pop);
9354 inst.instruction |= inst.operands[1].imm;
9355 narrow = TRUE;
9356 }
9357 }
9358
9359 if (!narrow)
9360 {
9361 if (inst.instruction < 0xffff)
9362 inst.instruction = THUMB_OP32 (inst.instruction);
9363
9364 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9365 inst.operands[0].writeback);
9366 }
9367 }
9368 else
9369 {
9370 constraint (inst.operands[0].reg > 7
9371 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9372 constraint (inst.instruction != T_MNEM_ldmia
9373 && inst.instruction != T_MNEM_stmia,
9374 _("Thumb-2 instruction only valid in unified syntax"));
9375 if (inst.instruction == T_MNEM_stmia)
9376 {
9377 if (!inst.operands[0].writeback)
9378 as_warn (_("this instruction will write back the base register"));
9379 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9380 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9381 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9382 inst.operands[0].reg);
9383 }
9384 else
9385 {
9386 if (!inst.operands[0].writeback
9387 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9388 as_warn (_("this instruction will write back the base register"));
9389 else if (inst.operands[0].writeback
9390 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9391 as_warn (_("this instruction will not write back the base register"));
9392 }
9393
9394 inst.instruction = THUMB_OP16 (inst.instruction);
9395 inst.instruction |= inst.operands[0].reg << 8;
9396 inst.instruction |= inst.operands[1].imm;
9397 }
9398 }
9399
9400 static void
9401 do_t_ldrex (void)
9402 {
9403 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9404 || inst.operands[1].postind || inst.operands[1].writeback
9405 || inst.operands[1].immisreg || inst.operands[1].shifted
9406 || inst.operands[1].negative,
9407 BAD_ADDR_MODE);
9408
9409 inst.instruction |= inst.operands[0].reg << 12;
9410 inst.instruction |= inst.operands[1].reg << 16;
9411 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9412 }
9413
9414 static void
9415 do_t_ldrexd (void)
9416 {
9417 if (!inst.operands[1].present)
9418 {
9419 constraint (inst.operands[0].reg == REG_LR,
9420 _("r14 not allowed as first register "
9421 "when second register is omitted"));
9422 inst.operands[1].reg = inst.operands[0].reg + 1;
9423 }
9424 constraint (inst.operands[0].reg == inst.operands[1].reg,
9425 BAD_OVERLAP);
9426
9427 inst.instruction |= inst.operands[0].reg << 12;
9428 inst.instruction |= inst.operands[1].reg << 8;
9429 inst.instruction |= inst.operands[2].reg << 16;
9430 }
9431
9432 static void
9433 do_t_ldst (void)
9434 {
9435 unsigned long opcode;
9436 int Rn;
9437
9438 opcode = inst.instruction;
9439 if (unified_syntax)
9440 {
9441 if (!inst.operands[1].isreg)
9442 {
9443 if (opcode <= 0xffff)
9444 inst.instruction = THUMB_OP32 (opcode);
9445 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9446 return;
9447 }
9448 if (inst.operands[1].isreg
9449 && !inst.operands[1].writeback
9450 && !inst.operands[1].shifted && !inst.operands[1].postind
9451 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9452 && opcode <= 0xffff
9453 && inst.size_req != 4)
9454 {
9455 /* Insn may have a 16-bit form. */
9456 Rn = inst.operands[1].reg;
9457 if (inst.operands[1].immisreg)
9458 {
9459 inst.instruction = THUMB_OP16 (opcode);
9460 /* [Rn, Rik] */
9461 if (Rn <= 7 && inst.operands[1].imm <= 7)
9462 goto op16;
9463 }
9464 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9465 && opcode != T_MNEM_ldrsb)
9466 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9467 || (Rn == REG_SP && opcode == T_MNEM_str))
9468 {
9469 /* [Rn, #const] */
9470 if (Rn > 7)
9471 {
9472 if (Rn == REG_PC)
9473 {
9474 if (inst.reloc.pc_rel)
9475 opcode = T_MNEM_ldr_pc2;
9476 else
9477 opcode = T_MNEM_ldr_pc;
9478 }
9479 else
9480 {
9481 if (opcode == T_MNEM_ldr)
9482 opcode = T_MNEM_ldr_sp;
9483 else
9484 opcode = T_MNEM_str_sp;
9485 }
9486 inst.instruction = inst.operands[0].reg << 8;
9487 }
9488 else
9489 {
9490 inst.instruction = inst.operands[0].reg;
9491 inst.instruction |= inst.operands[1].reg << 3;
9492 }
9493 inst.instruction |= THUMB_OP16 (opcode);
9494 if (inst.size_req == 2)
9495 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9496 else
9497 inst.relax = opcode;
9498 return;
9499 }
9500 }
9501 /* Definitely a 32-bit variant. */
9502 inst.instruction = THUMB_OP32 (opcode);
9503 inst.instruction |= inst.operands[0].reg << 12;
9504 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9505 return;
9506 }
9507
9508 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9509
9510 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9511 {
9512 /* Only [Rn,Rm] is acceptable. */
9513 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9514 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9515 || inst.operands[1].postind || inst.operands[1].shifted
9516 || inst.operands[1].negative,
9517 _("Thumb does not support this addressing mode"));
9518 inst.instruction = THUMB_OP16 (inst.instruction);
9519 goto op16;
9520 }
9521
9522 inst.instruction = THUMB_OP16 (inst.instruction);
9523 if (!inst.operands[1].isreg)
9524 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9525 return;
9526
9527 constraint (!inst.operands[1].preind
9528 || inst.operands[1].shifted
9529 || inst.operands[1].writeback,
9530 _("Thumb does not support this addressing mode"));
9531 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9532 {
9533 constraint (inst.instruction & 0x0600,
9534 _("byte or halfword not valid for base register"));
9535 constraint (inst.operands[1].reg == REG_PC
9536 && !(inst.instruction & THUMB_LOAD_BIT),
9537 _("r15 based store not allowed"));
9538 constraint (inst.operands[1].immisreg,
9539 _("invalid base register for register offset"));
9540
9541 if (inst.operands[1].reg == REG_PC)
9542 inst.instruction = T_OPCODE_LDR_PC;
9543 else if (inst.instruction & THUMB_LOAD_BIT)
9544 inst.instruction = T_OPCODE_LDR_SP;
9545 else
9546 inst.instruction = T_OPCODE_STR_SP;
9547
9548 inst.instruction |= inst.operands[0].reg << 8;
9549 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9550 return;
9551 }
9552
9553 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9554 if (!inst.operands[1].immisreg)
9555 {
9556 /* Immediate offset. */
9557 inst.instruction |= inst.operands[0].reg;
9558 inst.instruction |= inst.operands[1].reg << 3;
9559 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9560 return;
9561 }
9562
9563 /* Register offset. */
9564 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9565 constraint (inst.operands[1].negative,
9566 _("Thumb does not support this addressing mode"));
9567
9568 op16:
9569 switch (inst.instruction)
9570 {
9571 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9572 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9573 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9574 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9575 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9576 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9577 case 0x5600 /* ldrsb */:
9578 case 0x5e00 /* ldrsh */: break;
9579 default: abort ();
9580 }
9581
9582 inst.instruction |= inst.operands[0].reg;
9583 inst.instruction |= inst.operands[1].reg << 3;
9584 inst.instruction |= inst.operands[1].imm << 6;
9585 }
9586
9587 static void
9588 do_t_ldstd (void)
9589 {
9590 if (!inst.operands[1].present)
9591 {
9592 inst.operands[1].reg = inst.operands[0].reg + 1;
9593 constraint (inst.operands[0].reg == REG_LR,
9594 _("r14 not allowed here"));
9595 }
9596 inst.instruction |= inst.operands[0].reg << 12;
9597 inst.instruction |= inst.operands[1].reg << 8;
9598 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9599 }
9600
9601 static void
9602 do_t_ldstt (void)
9603 {
9604 inst.instruction |= inst.operands[0].reg << 12;
9605 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9606 }
9607
9608 static void
9609 do_t_mla (void)
9610 {
9611 unsigned Rd, Rn, Rm, Ra;
9612
9613 Rd = inst.operands[0].reg;
9614 Rn = inst.operands[1].reg;
9615 Rm = inst.operands[2].reg;
9616 Ra = inst.operands[3].reg;
9617
9618 reject_bad_reg (Rd);
9619 reject_bad_reg (Rn);
9620 reject_bad_reg (Rm);
9621 reject_bad_reg (Ra);
9622
9623 inst.instruction |= Rd << 8;
9624 inst.instruction |= Rn << 16;
9625 inst.instruction |= Rm;
9626 inst.instruction |= Ra << 12;
9627 }
9628
9629 static void
9630 do_t_mlal (void)
9631 {
9632 unsigned RdLo, RdHi, Rn, Rm;
9633
9634 RdLo = inst.operands[0].reg;
9635 RdHi = inst.operands[1].reg;
9636 Rn = inst.operands[2].reg;
9637 Rm = inst.operands[3].reg;
9638
9639 reject_bad_reg (RdLo);
9640 reject_bad_reg (RdHi);
9641 reject_bad_reg (Rn);
9642 reject_bad_reg (Rm);
9643
9644 inst.instruction |= RdLo << 12;
9645 inst.instruction |= RdHi << 8;
9646 inst.instruction |= Rn << 16;
9647 inst.instruction |= Rm;
9648 }
9649
9650 static void
9651 do_t_mov_cmp (void)
9652 {
9653 unsigned Rn, Rm;
9654
9655 Rn = inst.operands[0].reg;
9656 Rm = inst.operands[1].reg;
9657
9658 if (unified_syntax)
9659 {
9660 int r0off = (inst.instruction == T_MNEM_mov
9661 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9662 unsigned long opcode;
9663 bfd_boolean narrow;
9664 bfd_boolean low_regs;
9665
9666 low_regs = (Rn <= 7 && Rm <= 7);
9667 opcode = inst.instruction;
9668 if (current_it_mask)
9669 narrow = opcode != T_MNEM_movs;
9670 else
9671 narrow = opcode != T_MNEM_movs || low_regs;
9672 if (inst.size_req == 4
9673 || inst.operands[1].shifted)
9674 narrow = FALSE;
9675
9676 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9677 if (opcode == T_MNEM_movs && inst.operands[1].isreg
9678 && !inst.operands[1].shifted
9679 && Rn == REG_PC
9680 && Rm == REG_LR)
9681 {
9682 inst.instruction = T2_SUBS_PC_LR;
9683 return;
9684 }
9685
9686 if (opcode == T_MNEM_cmp)
9687 {
9688 constraint (Rn == REG_PC, BAD_PC);
9689 reject_bad_reg (Rm);
9690 }
9691 else if (opcode == T_MNEM_mov
9692 || opcode == T_MNEM_movs)
9693 {
9694 if (inst.operands[1].isreg)
9695 {
9696 if (opcode == T_MNEM_movs)
9697 {
9698 reject_bad_reg (Rn);
9699 reject_bad_reg (Rm);
9700 }
9701 else if ((Rn == REG_SP || Rn == REG_PC)
9702 && (Rm == REG_SP || Rm == REG_PC))
9703 reject_bad_reg (Rm);
9704 }
9705 else
9706 reject_bad_reg (Rn);
9707 }
9708
9709 if (!inst.operands[1].isreg)
9710 {
9711 /* Immediate operand. */
9712 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9713 narrow = 0;
9714 if (low_regs && narrow)
9715 {
9716 inst.instruction = THUMB_OP16 (opcode);
9717 inst.instruction |= Rn << 8;
9718 if (inst.size_req == 2)
9719 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9720 else
9721 inst.relax = opcode;
9722 }
9723 else
9724 {
9725 inst.instruction = THUMB_OP32 (inst.instruction);
9726 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9727 inst.instruction |= Rn << r0off;
9728 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9729 }
9730 }
9731 else if (inst.operands[1].shifted && inst.operands[1].immisreg
9732 && (inst.instruction == T_MNEM_mov
9733 || inst.instruction == T_MNEM_movs))
9734 {
9735 /* Register shifts are encoded as separate shift instructions. */
9736 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
9737
9738 if (current_it_mask)
9739 narrow = !flags;
9740 else
9741 narrow = flags;
9742
9743 if (inst.size_req == 4)
9744 narrow = FALSE;
9745
9746 if (!low_regs || inst.operands[1].imm > 7)
9747 narrow = FALSE;
9748
9749 if (Rn != Rm)
9750 narrow = FALSE;
9751
9752 switch (inst.operands[1].shift_kind)
9753 {
9754 case SHIFT_LSL:
9755 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
9756 break;
9757 case SHIFT_ASR:
9758 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
9759 break;
9760 case SHIFT_LSR:
9761 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
9762 break;
9763 case SHIFT_ROR:
9764 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
9765 break;
9766 default:
9767 abort ();
9768 }
9769
9770 inst.instruction = opcode;
9771 if (narrow)
9772 {
9773 inst.instruction |= Rn;
9774 inst.instruction |= inst.operands[1].imm << 3;
9775 }
9776 else
9777 {
9778 if (flags)
9779 inst.instruction |= CONDS_BIT;
9780
9781 inst.instruction |= Rn << 8;
9782 inst.instruction |= Rm << 16;
9783 inst.instruction |= inst.operands[1].imm;
9784 }
9785 }
9786 else if (!narrow)
9787 {
9788 /* Some mov with immediate shift have narrow variants.
9789 Register shifts are handled above. */
9790 if (low_regs && inst.operands[1].shifted
9791 && (inst.instruction == T_MNEM_mov
9792 || inst.instruction == T_MNEM_movs))
9793 {
9794 if (current_it_mask)
9795 narrow = (inst.instruction == T_MNEM_mov);
9796 else
9797 narrow = (inst.instruction == T_MNEM_movs);
9798 }
9799
9800 if (narrow)
9801 {
9802 switch (inst.operands[1].shift_kind)
9803 {
9804 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9805 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9806 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9807 default: narrow = FALSE; break;
9808 }
9809 }
9810
9811 if (narrow)
9812 {
9813 inst.instruction |= Rn;
9814 inst.instruction |= Rm << 3;
9815 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9816 }
9817 else
9818 {
9819 inst.instruction = THUMB_OP32 (inst.instruction);
9820 inst.instruction |= Rn << r0off;
9821 encode_thumb32_shifted_operand (1);
9822 }
9823 }
9824 else
9825 switch (inst.instruction)
9826 {
9827 case T_MNEM_mov:
9828 inst.instruction = T_OPCODE_MOV_HR;
9829 inst.instruction |= (Rn & 0x8) << 4;
9830 inst.instruction |= (Rn & 0x7);
9831 inst.instruction |= Rm << 3;
9832 break;
9833
9834 case T_MNEM_movs:
9835 /* We know we have low registers at this point.
9836 Generate ADD Rd, Rs, #0. */
9837 inst.instruction = T_OPCODE_ADD_I3;
9838 inst.instruction |= Rn;
9839 inst.instruction |= Rm << 3;
9840 break;
9841
9842 case T_MNEM_cmp:
9843 if (low_regs)
9844 {
9845 inst.instruction = T_OPCODE_CMP_LR;
9846 inst.instruction |= Rn;
9847 inst.instruction |= Rm << 3;
9848 }
9849 else
9850 {
9851 inst.instruction = T_OPCODE_CMP_HR;
9852 inst.instruction |= (Rn & 0x8) << 4;
9853 inst.instruction |= (Rn & 0x7);
9854 inst.instruction |= Rm << 3;
9855 }
9856 break;
9857 }
9858 return;
9859 }
9860
9861 inst.instruction = THUMB_OP16 (inst.instruction);
9862 if (inst.operands[1].isreg)
9863 {
9864 if (Rn < 8 && Rm < 8)
9865 {
9866 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9867 since a MOV instruction produces unpredictable results. */
9868 if (inst.instruction == T_OPCODE_MOV_I8)
9869 inst.instruction = T_OPCODE_ADD_I3;
9870 else
9871 inst.instruction = T_OPCODE_CMP_LR;
9872
9873 inst.instruction |= Rn;
9874 inst.instruction |= Rm << 3;
9875 }
9876 else
9877 {
9878 if (inst.instruction == T_OPCODE_MOV_I8)
9879 inst.instruction = T_OPCODE_MOV_HR;
9880 else
9881 inst.instruction = T_OPCODE_CMP_HR;
9882 do_t_cpy ();
9883 }
9884 }
9885 else
9886 {
9887 constraint (Rn > 7,
9888 _("only lo regs allowed with immediate"));
9889 inst.instruction |= Rn << 8;
9890 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9891 }
9892 }
9893
9894 static void
9895 do_t_mov16 (void)
9896 {
9897 unsigned Rd;
9898 bfd_vma imm;
9899 bfd_boolean top;
9900
9901 top = (inst.instruction & 0x00800000) != 0;
9902 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9903 {
9904 constraint (top, _(":lower16: not allowed this instruction"));
9905 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9906 }
9907 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9908 {
9909 constraint (!top, _(":upper16: not allowed this instruction"));
9910 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9911 }
9912
9913 Rd = inst.operands[0].reg;
9914 reject_bad_reg (Rd);
9915
9916 inst.instruction |= Rd << 8;
9917 if (inst.reloc.type == BFD_RELOC_UNUSED)
9918 {
9919 imm = inst.reloc.exp.X_add_number;
9920 inst.instruction |= (imm & 0xf000) << 4;
9921 inst.instruction |= (imm & 0x0800) << 15;
9922 inst.instruction |= (imm & 0x0700) << 4;
9923 inst.instruction |= (imm & 0x00ff);
9924 }
9925 }
9926
9927 static void
9928 do_t_mvn_tst (void)
9929 {
9930 unsigned Rn, Rm;
9931
9932 Rn = inst.operands[0].reg;
9933 Rm = inst.operands[1].reg;
9934
9935 if (inst.instruction == T_MNEM_cmp
9936 || inst.instruction == T_MNEM_cmn)
9937 constraint (Rn == REG_PC, BAD_PC);
9938 else
9939 reject_bad_reg (Rn);
9940 reject_bad_reg (Rm);
9941
9942 if (unified_syntax)
9943 {
9944 int r0off = (inst.instruction == T_MNEM_mvn
9945 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9946 bfd_boolean narrow;
9947
9948 if (inst.size_req == 4
9949 || inst.instruction > 0xffff
9950 || inst.operands[1].shifted
9951 || Rn > 7 || Rm > 7)
9952 narrow = FALSE;
9953 else if (inst.instruction == T_MNEM_cmn)
9954 narrow = TRUE;
9955 else if (THUMB_SETS_FLAGS (inst.instruction))
9956 narrow = (current_it_mask == 0);
9957 else
9958 narrow = (current_it_mask != 0);
9959
9960 if (!inst.operands[1].isreg)
9961 {
9962 /* For an immediate, we always generate a 32-bit opcode;
9963 section relaxation will shrink it later if possible. */
9964 if (inst.instruction < 0xffff)
9965 inst.instruction = THUMB_OP32 (inst.instruction);
9966 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9967 inst.instruction |= Rn << r0off;
9968 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9969 }
9970 else
9971 {
9972 /* See if we can do this with a 16-bit instruction. */
9973 if (narrow)
9974 {
9975 inst.instruction = THUMB_OP16 (inst.instruction);
9976 inst.instruction |= Rn;
9977 inst.instruction |= Rm << 3;
9978 }
9979 else
9980 {
9981 constraint (inst.operands[1].shifted
9982 && inst.operands[1].immisreg,
9983 _("shift must be constant"));
9984 if (inst.instruction < 0xffff)
9985 inst.instruction = THUMB_OP32 (inst.instruction);
9986 inst.instruction |= Rn << r0off;
9987 encode_thumb32_shifted_operand (1);
9988 }
9989 }
9990 }
9991 else
9992 {
9993 constraint (inst.instruction > 0xffff
9994 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9995 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9996 _("unshifted register required"));
9997 constraint (Rn > 7 || Rm > 7,
9998 BAD_HIREG);
9999
10000 inst.instruction = THUMB_OP16 (inst.instruction);
10001 inst.instruction |= Rn;
10002 inst.instruction |= Rm << 3;
10003 }
10004 }
10005
10006 static void
10007 do_t_mrs (void)
10008 {
10009 unsigned Rd;
10010 int flags;
10011
10012 if (do_vfp_nsyn_mrs () == SUCCESS)
10013 return;
10014
10015 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10016 if (flags == 0)
10017 {
10018 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10019 _("selected processor does not support "
10020 "requested special purpose register"));
10021 }
10022 else
10023 {
10024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10025 _("selected processor does not support "
10026 "requested special purpose register"));
10027 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10028 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10029 _("'CPSR' or 'SPSR' expected"));
10030 }
10031
10032 Rd = inst.operands[0].reg;
10033 reject_bad_reg (Rd);
10034
10035 inst.instruction |= Rd << 8;
10036 inst.instruction |= (flags & SPSR_BIT) >> 2;
10037 inst.instruction |= inst.operands[1].imm & 0xff;
10038 }
10039
10040 static void
10041 do_t_msr (void)
10042 {
10043 int flags;
10044 unsigned Rn;
10045
10046 if (do_vfp_nsyn_msr () == SUCCESS)
10047 return;
10048
10049 constraint (!inst.operands[1].isreg,
10050 _("Thumb encoding does not support an immediate here"));
10051 flags = inst.operands[0].imm;
10052 if (flags & ~0xff)
10053 {
10054 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10055 _("selected processor does not support "
10056 "requested special purpose register"));
10057 }
10058 else
10059 {
10060 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10061 _("selected processor does not support "
10062 "requested special purpose register"));
10063 flags |= PSR_f;
10064 }
10065
10066 Rn = inst.operands[1].reg;
10067 reject_bad_reg (Rn);
10068
10069 inst.instruction |= (flags & SPSR_BIT) >> 2;
10070 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10071 inst.instruction |= (flags & 0xff);
10072 inst.instruction |= Rn << 16;
10073 }
10074
10075 static void
10076 do_t_mul (void)
10077 {
10078 bfd_boolean narrow;
10079 unsigned Rd, Rn, Rm;
10080
10081 if (!inst.operands[2].present)
10082 inst.operands[2].reg = inst.operands[0].reg;
10083
10084 Rd = inst.operands[0].reg;
10085 Rn = inst.operands[1].reg;
10086 Rm = inst.operands[2].reg;
10087
10088 if (unified_syntax)
10089 {
10090 if (inst.size_req == 4
10091 || (Rd != Rn
10092 && Rd != Rm)
10093 || Rn > 7
10094 || Rm > 7)
10095 narrow = FALSE;
10096 else if (inst.instruction == T_MNEM_muls)
10097 narrow = (current_it_mask == 0);
10098 else
10099 narrow = (current_it_mask != 0);
10100 }
10101 else
10102 {
10103 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10104 constraint (Rn > 7 || Rm > 7,
10105 BAD_HIREG);
10106 narrow = TRUE;
10107 }
10108
10109 if (narrow)
10110 {
10111 /* 16-bit MULS/Conditional MUL. */
10112 inst.instruction = THUMB_OP16 (inst.instruction);
10113 inst.instruction |= Rd;
10114
10115 if (Rd == Rn)
10116 inst.instruction |= Rm << 3;
10117 else if (Rd == Rm)
10118 inst.instruction |= Rn << 3;
10119 else
10120 constraint (1, _("dest must overlap one source register"));
10121 }
10122 else
10123 {
10124 constraint(inst.instruction != T_MNEM_mul,
10125 _("Thumb-2 MUL must not set flags"));
10126 /* 32-bit MUL. */
10127 inst.instruction = THUMB_OP32 (inst.instruction);
10128 inst.instruction |= Rd << 8;
10129 inst.instruction |= Rn << 16;
10130 inst.instruction |= Rm << 0;
10131
10132 reject_bad_reg (Rd);
10133 reject_bad_reg (Rn);
10134 reject_bad_reg (Rm);
10135 }
10136 }
10137
10138 static void
10139 do_t_mull (void)
10140 {
10141 unsigned RdLo, RdHi, Rn, Rm;
10142
10143 RdLo = inst.operands[0].reg;
10144 RdHi = inst.operands[1].reg;
10145 Rn = inst.operands[2].reg;
10146 Rm = inst.operands[3].reg;
10147
10148 reject_bad_reg (RdLo);
10149 reject_bad_reg (RdHi);
10150 reject_bad_reg (Rn);
10151 reject_bad_reg (Rm);
10152
10153 inst.instruction |= RdLo << 12;
10154 inst.instruction |= RdHi << 8;
10155 inst.instruction |= Rn << 16;
10156 inst.instruction |= Rm;
10157
10158 if (RdLo == RdHi)
10159 as_tsktsk (_("rdhi and rdlo must be different"));
10160 }
10161
10162 static void
10163 do_t_nop (void)
10164 {
10165 if (unified_syntax)
10166 {
10167 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10168 {
10169 inst.instruction = THUMB_OP32 (inst.instruction);
10170 inst.instruction |= inst.operands[0].imm;
10171 }
10172 else
10173 {
10174 /* PR9722: Check for Thumb2 availability before
10175 generating a thumb2 nop instruction. */
10176 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
10177 {
10178 inst.instruction = THUMB_OP16 (inst.instruction);
10179 inst.instruction |= inst.operands[0].imm << 4;
10180 }
10181 else
10182 inst.instruction = 0x46c0;
10183 }
10184 }
10185 else
10186 {
10187 constraint (inst.operands[0].present,
10188 _("Thumb does not support NOP with hints"));
10189 inst.instruction = 0x46c0;
10190 }
10191 }
10192
10193 static void
10194 do_t_neg (void)
10195 {
10196 if (unified_syntax)
10197 {
10198 bfd_boolean narrow;
10199
10200 if (THUMB_SETS_FLAGS (inst.instruction))
10201 narrow = (current_it_mask == 0);
10202 else
10203 narrow = (current_it_mask != 0);
10204 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10205 narrow = FALSE;
10206 if (inst.size_req == 4)
10207 narrow = FALSE;
10208
10209 if (!narrow)
10210 {
10211 inst.instruction = THUMB_OP32 (inst.instruction);
10212 inst.instruction |= inst.operands[0].reg << 8;
10213 inst.instruction |= inst.operands[1].reg << 16;
10214 }
10215 else
10216 {
10217 inst.instruction = THUMB_OP16 (inst.instruction);
10218 inst.instruction |= inst.operands[0].reg;
10219 inst.instruction |= inst.operands[1].reg << 3;
10220 }
10221 }
10222 else
10223 {
10224 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10225 BAD_HIREG);
10226 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10227
10228 inst.instruction = THUMB_OP16 (inst.instruction);
10229 inst.instruction |= inst.operands[0].reg;
10230 inst.instruction |= inst.operands[1].reg << 3;
10231 }
10232 }
10233
10234 static void
10235 do_t_orn (void)
10236 {
10237 unsigned Rd, Rn;
10238
10239 Rd = inst.operands[0].reg;
10240 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10241
10242 reject_bad_reg (Rd);
10243 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10244 reject_bad_reg (Rn);
10245
10246 inst.instruction |= Rd << 8;
10247 inst.instruction |= Rn << 16;
10248
10249 if (!inst.operands[2].isreg)
10250 {
10251 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10252 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10253 }
10254 else
10255 {
10256 unsigned Rm;
10257
10258 Rm = inst.operands[2].reg;
10259 reject_bad_reg (Rm);
10260
10261 constraint (inst.operands[2].shifted
10262 && inst.operands[2].immisreg,
10263 _("shift must be constant"));
10264 encode_thumb32_shifted_operand (2);
10265 }
10266 }
10267
10268 static void
10269 do_t_pkhbt (void)
10270 {
10271 unsigned Rd, Rn, Rm;
10272
10273 Rd = inst.operands[0].reg;
10274 Rn = inst.operands[1].reg;
10275 Rm = inst.operands[2].reg;
10276
10277 reject_bad_reg (Rd);
10278 reject_bad_reg (Rn);
10279 reject_bad_reg (Rm);
10280
10281 inst.instruction |= Rd << 8;
10282 inst.instruction |= Rn << 16;
10283 inst.instruction |= Rm;
10284 if (inst.operands[3].present)
10285 {
10286 unsigned int val = inst.reloc.exp.X_add_number;
10287 constraint (inst.reloc.exp.X_op != O_constant,
10288 _("expression too complex"));
10289 inst.instruction |= (val & 0x1c) << 10;
10290 inst.instruction |= (val & 0x03) << 6;
10291 }
10292 }
10293
10294 static void
10295 do_t_pkhtb (void)
10296 {
10297 if (!inst.operands[3].present)
10298 inst.instruction &= ~0x00000020;
10299 do_t_pkhbt ();
10300 }
10301
10302 static void
10303 do_t_pld (void)
10304 {
10305 if (inst.operands[0].immisreg)
10306 reject_bad_reg (inst.operands[0].imm);
10307
10308 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10309 }
10310
10311 static void
10312 do_t_push_pop (void)
10313 {
10314 unsigned mask;
10315
10316 constraint (inst.operands[0].writeback,
10317 _("push/pop do not support {reglist}^"));
10318 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10319 _("expression too complex"));
10320
10321 mask = inst.operands[0].imm;
10322 if ((mask & ~0xff) == 0)
10323 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10324 else if ((inst.instruction == T_MNEM_push
10325 && (mask & ~0xff) == 1 << REG_LR)
10326 || (inst.instruction == T_MNEM_pop
10327 && (mask & ~0xff) == 1 << REG_PC))
10328 {
10329 inst.instruction = THUMB_OP16 (inst.instruction);
10330 inst.instruction |= THUMB_PP_PC_LR;
10331 inst.instruction |= mask & 0xff;
10332 }
10333 else if (unified_syntax)
10334 {
10335 inst.instruction = THUMB_OP32 (inst.instruction);
10336 encode_thumb2_ldmstm (13, mask, TRUE);
10337 }
10338 else
10339 {
10340 inst.error = _("invalid register list to push/pop instruction");
10341 return;
10342 }
10343 }
10344
10345 static void
10346 do_t_rbit (void)
10347 {
10348 unsigned Rd, Rm;
10349
10350 Rd = inst.operands[0].reg;
10351 Rm = inst.operands[1].reg;
10352
10353 reject_bad_reg (Rd);
10354 reject_bad_reg (Rm);
10355
10356 inst.instruction |= Rd << 8;
10357 inst.instruction |= Rm << 16;
10358 inst.instruction |= Rm;
10359 }
10360
10361 static void
10362 do_t_rev (void)
10363 {
10364 unsigned Rd, Rm;
10365
10366 Rd = inst.operands[0].reg;
10367 Rm = inst.operands[1].reg;
10368
10369 reject_bad_reg (Rd);
10370 reject_bad_reg (Rm);
10371
10372 if (Rd <= 7 && Rm <= 7
10373 && inst.size_req != 4)
10374 {
10375 inst.instruction = THUMB_OP16 (inst.instruction);
10376 inst.instruction |= Rd;
10377 inst.instruction |= Rm << 3;
10378 }
10379 else if (unified_syntax)
10380 {
10381 inst.instruction = THUMB_OP32 (inst.instruction);
10382 inst.instruction |= Rd << 8;
10383 inst.instruction |= Rm << 16;
10384 inst.instruction |= Rm;
10385 }
10386 else
10387 inst.error = BAD_HIREG;
10388 }
10389
10390 static void
10391 do_t_rrx (void)
10392 {
10393 unsigned Rd, Rm;
10394
10395 Rd = inst.operands[0].reg;
10396 Rm = inst.operands[1].reg;
10397
10398 reject_bad_reg (Rd);
10399 reject_bad_reg (Rm);
10400
10401 inst.instruction |= Rd << 8;
10402 inst.instruction |= Rm;
10403 }
10404
10405 static void
10406 do_t_rsb (void)
10407 {
10408 unsigned Rd, Rs;
10409
10410 Rd = inst.operands[0].reg;
10411 Rs = (inst.operands[1].present
10412 ? inst.operands[1].reg /* Rd, Rs, foo */
10413 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10414
10415 reject_bad_reg (Rd);
10416 reject_bad_reg (Rs);
10417 if (inst.operands[2].isreg)
10418 reject_bad_reg (inst.operands[2].reg);
10419
10420 inst.instruction |= Rd << 8;
10421 inst.instruction |= Rs << 16;
10422 if (!inst.operands[2].isreg)
10423 {
10424 bfd_boolean narrow;
10425
10426 if ((inst.instruction & 0x00100000) != 0)
10427 narrow = (current_it_mask == 0);
10428 else
10429 narrow = (current_it_mask != 0);
10430
10431 if (Rd > 7 || Rs > 7)
10432 narrow = FALSE;
10433
10434 if (inst.size_req == 4 || !unified_syntax)
10435 narrow = FALSE;
10436
10437 if (inst.reloc.exp.X_op != O_constant
10438 || inst.reloc.exp.X_add_number != 0)
10439 narrow = FALSE;
10440
10441 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10442 relaxation, but it doesn't seem worth the hassle. */
10443 if (narrow)
10444 {
10445 inst.reloc.type = BFD_RELOC_UNUSED;
10446 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10447 inst.instruction |= Rs << 3;
10448 inst.instruction |= Rd;
10449 }
10450 else
10451 {
10452 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10453 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10454 }
10455 }
10456 else
10457 encode_thumb32_shifted_operand (2);
10458 }
10459
10460 static void
10461 do_t_setend (void)
10462 {
10463 constraint (current_it_mask, BAD_NOT_IT);
10464 if (inst.operands[0].imm)
10465 inst.instruction |= 0x8;
10466 }
10467
10468 static void
10469 do_t_shift (void)
10470 {
10471 if (!inst.operands[1].present)
10472 inst.operands[1].reg = inst.operands[0].reg;
10473
10474 if (unified_syntax)
10475 {
10476 bfd_boolean narrow;
10477 int shift_kind;
10478
10479 switch (inst.instruction)
10480 {
10481 case T_MNEM_asr:
10482 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10483 case T_MNEM_lsl:
10484 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10485 case T_MNEM_lsr:
10486 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10487 case T_MNEM_ror:
10488 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10489 default: abort ();
10490 }
10491
10492 if (THUMB_SETS_FLAGS (inst.instruction))
10493 narrow = (current_it_mask == 0);
10494 else
10495 narrow = (current_it_mask != 0);
10496 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10497 narrow = FALSE;
10498 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10499 narrow = FALSE;
10500 if (inst.operands[2].isreg
10501 && (inst.operands[1].reg != inst.operands[0].reg
10502 || inst.operands[2].reg > 7))
10503 narrow = FALSE;
10504 if (inst.size_req == 4)
10505 narrow = FALSE;
10506
10507 reject_bad_reg (inst.operands[0].reg);
10508 reject_bad_reg (inst.operands[1].reg);
10509
10510 if (!narrow)
10511 {
10512 if (inst.operands[2].isreg)
10513 {
10514 reject_bad_reg (inst.operands[2].reg);
10515 inst.instruction = THUMB_OP32 (inst.instruction);
10516 inst.instruction |= inst.operands[0].reg << 8;
10517 inst.instruction |= inst.operands[1].reg << 16;
10518 inst.instruction |= inst.operands[2].reg;
10519 }
10520 else
10521 {
10522 inst.operands[1].shifted = 1;
10523 inst.operands[1].shift_kind = shift_kind;
10524 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10525 ? T_MNEM_movs : T_MNEM_mov);
10526 inst.instruction |= inst.operands[0].reg << 8;
10527 encode_thumb32_shifted_operand (1);
10528 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10529 inst.reloc.type = BFD_RELOC_UNUSED;
10530 }
10531 }
10532 else
10533 {
10534 if (inst.operands[2].isreg)
10535 {
10536 switch (shift_kind)
10537 {
10538 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10539 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10540 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10541 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10542 default: abort ();
10543 }
10544
10545 inst.instruction |= inst.operands[0].reg;
10546 inst.instruction |= inst.operands[2].reg << 3;
10547 }
10548 else
10549 {
10550 switch (shift_kind)
10551 {
10552 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10553 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10554 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10555 default: abort ();
10556 }
10557 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10558 inst.instruction |= inst.operands[0].reg;
10559 inst.instruction |= inst.operands[1].reg << 3;
10560 }
10561 }
10562 }
10563 else
10564 {
10565 constraint (inst.operands[0].reg > 7
10566 || inst.operands[1].reg > 7, BAD_HIREG);
10567 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10568
10569 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10570 {
10571 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10572 constraint (inst.operands[0].reg != inst.operands[1].reg,
10573 _("source1 and dest must be same register"));
10574
10575 switch (inst.instruction)
10576 {
10577 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10578 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10579 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10580 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10581 default: abort ();
10582 }
10583
10584 inst.instruction |= inst.operands[0].reg;
10585 inst.instruction |= inst.operands[2].reg << 3;
10586 }
10587 else
10588 {
10589 switch (inst.instruction)
10590 {
10591 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10592 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10593 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10594 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10595 default: abort ();
10596 }
10597 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10598 inst.instruction |= inst.operands[0].reg;
10599 inst.instruction |= inst.operands[1].reg << 3;
10600 }
10601 }
10602 }
10603
10604 static void
10605 do_t_simd (void)
10606 {
10607 unsigned Rd, Rn, Rm;
10608
10609 Rd = inst.operands[0].reg;
10610 Rn = inst.operands[1].reg;
10611 Rm = inst.operands[2].reg;
10612
10613 reject_bad_reg (Rd);
10614 reject_bad_reg (Rn);
10615 reject_bad_reg (Rm);
10616
10617 inst.instruction |= Rd << 8;
10618 inst.instruction |= Rn << 16;
10619 inst.instruction |= Rm;
10620 }
10621
10622 static void
10623 do_t_smc (void)
10624 {
10625 unsigned int value = inst.reloc.exp.X_add_number;
10626 constraint (inst.reloc.exp.X_op != O_constant,
10627 _("expression too complex"));
10628 inst.reloc.type = BFD_RELOC_UNUSED;
10629 inst.instruction |= (value & 0xf000) >> 12;
10630 inst.instruction |= (value & 0x0ff0);
10631 inst.instruction |= (value & 0x000f) << 16;
10632 }
10633
10634 static void
10635 do_t_ssat (void)
10636 {
10637 unsigned Rd, Rn;
10638
10639 Rd = inst.operands[0].reg;
10640 Rn = inst.operands[2].reg;
10641
10642 reject_bad_reg (Rd);
10643 reject_bad_reg (Rn);
10644
10645 inst.instruction |= Rd << 8;
10646 inst.instruction |= inst.operands[1].imm - 1;
10647 inst.instruction |= Rn << 16;
10648
10649 if (inst.operands[3].present)
10650 {
10651 constraint (inst.reloc.exp.X_op != O_constant,
10652 _("expression too complex"));
10653
10654 if (inst.reloc.exp.X_add_number != 0)
10655 {
10656 if (inst.operands[3].shift_kind == SHIFT_ASR)
10657 inst.instruction |= 0x00200000; /* sh bit */
10658 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10659 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10660 }
10661 inst.reloc.type = BFD_RELOC_UNUSED;
10662 }
10663 }
10664
10665 static void
10666 do_t_ssat16 (void)
10667 {
10668 unsigned Rd, Rn;
10669
10670 Rd = inst.operands[0].reg;
10671 Rn = inst.operands[2].reg;
10672
10673 reject_bad_reg (Rd);
10674 reject_bad_reg (Rn);
10675
10676 inst.instruction |= Rd << 8;
10677 inst.instruction |= inst.operands[1].imm - 1;
10678 inst.instruction |= Rn << 16;
10679 }
10680
10681 static void
10682 do_t_strex (void)
10683 {
10684 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10685 || inst.operands[2].postind || inst.operands[2].writeback
10686 || inst.operands[2].immisreg || inst.operands[2].shifted
10687 || inst.operands[2].negative,
10688 BAD_ADDR_MODE);
10689
10690 inst.instruction |= inst.operands[0].reg << 8;
10691 inst.instruction |= inst.operands[1].reg << 12;
10692 inst.instruction |= inst.operands[2].reg << 16;
10693 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10694 }
10695
10696 static void
10697 do_t_strexd (void)
10698 {
10699 if (!inst.operands[2].present)
10700 inst.operands[2].reg = inst.operands[1].reg + 1;
10701
10702 constraint (inst.operands[0].reg == inst.operands[1].reg
10703 || inst.operands[0].reg == inst.operands[2].reg
10704 || inst.operands[0].reg == inst.operands[3].reg
10705 || inst.operands[1].reg == inst.operands[2].reg,
10706 BAD_OVERLAP);
10707
10708 inst.instruction |= inst.operands[0].reg;
10709 inst.instruction |= inst.operands[1].reg << 12;
10710 inst.instruction |= inst.operands[2].reg << 8;
10711 inst.instruction |= inst.operands[3].reg << 16;
10712 }
10713
10714 static void
10715 do_t_sxtah (void)
10716 {
10717 unsigned Rd, Rn, Rm;
10718
10719 Rd = inst.operands[0].reg;
10720 Rn = inst.operands[1].reg;
10721 Rm = inst.operands[2].reg;
10722
10723 reject_bad_reg (Rd);
10724 reject_bad_reg (Rn);
10725 reject_bad_reg (Rm);
10726
10727 inst.instruction |= Rd << 8;
10728 inst.instruction |= Rn << 16;
10729 inst.instruction |= Rm;
10730 inst.instruction |= inst.operands[3].imm << 4;
10731 }
10732
10733 static void
10734 do_t_sxth (void)
10735 {
10736 unsigned Rd, Rm;
10737
10738 Rd = inst.operands[0].reg;
10739 Rm = inst.operands[1].reg;
10740
10741 reject_bad_reg (Rd);
10742 reject_bad_reg (Rm);
10743
10744 if (inst.instruction <= 0xffff && inst.size_req != 4
10745 && Rd <= 7 && Rm <= 7
10746 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10747 {
10748 inst.instruction = THUMB_OP16 (inst.instruction);
10749 inst.instruction |= Rd;
10750 inst.instruction |= Rm << 3;
10751 }
10752 else if (unified_syntax)
10753 {
10754 if (inst.instruction <= 0xffff)
10755 inst.instruction = THUMB_OP32 (inst.instruction);
10756 inst.instruction |= Rd << 8;
10757 inst.instruction |= Rm;
10758 inst.instruction |= inst.operands[2].imm << 4;
10759 }
10760 else
10761 {
10762 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10763 _("Thumb encoding does not support rotation"));
10764 constraint (1, BAD_HIREG);
10765 }
10766 }
10767
10768 static void
10769 do_t_swi (void)
10770 {
10771 inst.reloc.type = BFD_RELOC_ARM_SWI;
10772 }
10773
10774 static void
10775 do_t_tb (void)
10776 {
10777 unsigned Rn, Rm;
10778 int half;
10779
10780 half = (inst.instruction & 0x10) != 0;
10781 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10782 constraint (inst.operands[0].immisreg,
10783 _("instruction requires register index"));
10784
10785 Rn = inst.operands[0].reg;
10786 Rm = inst.operands[0].imm;
10787
10788 constraint (Rn == REG_SP, BAD_SP);
10789 reject_bad_reg (Rm);
10790
10791 constraint (!half && inst.operands[0].shifted,
10792 _("instruction does not allow shifted index"));
10793 inst.instruction |= (Rn << 16) | Rm;
10794 }
10795
10796 static void
10797 do_t_usat (void)
10798 {
10799 unsigned Rd, Rn;
10800
10801 Rd = inst.operands[0].reg;
10802 Rn = inst.operands[2].reg;
10803
10804 reject_bad_reg (Rd);
10805 reject_bad_reg (Rn);
10806
10807 inst.instruction |= Rd << 8;
10808 inst.instruction |= inst.operands[1].imm;
10809 inst.instruction |= Rn << 16;
10810
10811 if (inst.operands[3].present)
10812 {
10813 constraint (inst.reloc.exp.X_op != O_constant,
10814 _("expression too complex"));
10815 if (inst.reloc.exp.X_add_number != 0)
10816 {
10817 if (inst.operands[3].shift_kind == SHIFT_ASR)
10818 inst.instruction |= 0x00200000; /* sh bit */
10819
10820 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10821 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10822 }
10823 inst.reloc.type = BFD_RELOC_UNUSED;
10824 }
10825 }
10826
10827 static void
10828 do_t_usat16 (void)
10829 {
10830 unsigned Rd, Rn;
10831
10832 Rd = inst.operands[0].reg;
10833 Rn = inst.operands[2].reg;
10834
10835 reject_bad_reg (Rd);
10836 reject_bad_reg (Rn);
10837
10838 inst.instruction |= Rd << 8;
10839 inst.instruction |= inst.operands[1].imm;
10840 inst.instruction |= Rn << 16;
10841 }
10842
10843 /* Neon instruction encoder helpers. */
10844
10845 /* Encodings for the different types for various Neon opcodes. */
10846
10847 /* An "invalid" code for the following tables. */
10848 #define N_INV -1u
10849
10850 struct neon_tab_entry
10851 {
10852 unsigned integer;
10853 unsigned float_or_poly;
10854 unsigned scalar_or_imm;
10855 };
10856
10857 /* Map overloaded Neon opcodes to their respective encodings. */
10858 #define NEON_ENC_TAB \
10859 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10860 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10861 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10862 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10863 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10864 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10865 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10866 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10867 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10868 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10869 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10870 /* Register variants of the following two instructions are encoded as
10871 vcge / vcgt with the operands reversed. */ \
10872 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10873 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10874 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10875 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10876 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10877 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10878 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10879 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10880 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10881 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10882 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10883 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10884 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10885 X(vshl, 0x0000400, N_INV, 0x0800510), \
10886 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10887 X(vand, 0x0000110, N_INV, 0x0800030), \
10888 X(vbic, 0x0100110, N_INV, 0x0800030), \
10889 X(veor, 0x1000110, N_INV, N_INV), \
10890 X(vorn, 0x0300110, N_INV, 0x0800010), \
10891 X(vorr, 0x0200110, N_INV, 0x0800010), \
10892 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10893 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10894 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10895 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10896 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10897 X(vst1, 0x0000000, 0x0800000, N_INV), \
10898 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10899 X(vst2, 0x0000100, 0x0800100, N_INV), \
10900 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10901 X(vst3, 0x0000200, 0x0800200, N_INV), \
10902 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10903 X(vst4, 0x0000300, 0x0800300, N_INV), \
10904 X(vmovn, 0x1b20200, N_INV, N_INV), \
10905 X(vtrn, 0x1b20080, N_INV, N_INV), \
10906 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10907 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10908 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10909 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10910 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10911 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10912 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10913 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10914 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10915
10916 enum neon_opc
10917 {
10918 #define X(OPC,I,F,S) N_MNEM_##OPC
10919 NEON_ENC_TAB
10920 #undef X
10921 };
10922
10923 static const struct neon_tab_entry neon_enc_tab[] =
10924 {
10925 #define X(OPC,I,F,S) { (I), (F), (S) }
10926 NEON_ENC_TAB
10927 #undef X
10928 };
10929
10930 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10931 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10932 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10933 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10934 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10935 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10936 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10937 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10938 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10939 #define NEON_ENC_SINGLE(X) \
10940 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10941 #define NEON_ENC_DOUBLE(X) \
10942 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10943
10944 /* Define shapes for instruction operands. The following mnemonic characters
10945 are used in this table:
10946
10947 F - VFP S<n> register
10948 D - Neon D<n> register
10949 Q - Neon Q<n> register
10950 I - Immediate
10951 S - Scalar
10952 R - ARM register
10953 L - D<n> register list
10954
10955 This table is used to generate various data:
10956 - enumerations of the form NS_DDR to be used as arguments to
10957 neon_select_shape.
10958 - a table classifying shapes into single, double, quad, mixed.
10959 - a table used to drive neon_select_shape. */
10960
10961 #define NEON_SHAPE_DEF \
10962 X(3, (D, D, D), DOUBLE), \
10963 X(3, (Q, Q, Q), QUAD), \
10964 X(3, (D, D, I), DOUBLE), \
10965 X(3, (Q, Q, I), QUAD), \
10966 X(3, (D, D, S), DOUBLE), \
10967 X(3, (Q, Q, S), QUAD), \
10968 X(2, (D, D), DOUBLE), \
10969 X(2, (Q, Q), QUAD), \
10970 X(2, (D, S), DOUBLE), \
10971 X(2, (Q, S), QUAD), \
10972 X(2, (D, R), DOUBLE), \
10973 X(2, (Q, R), QUAD), \
10974 X(2, (D, I), DOUBLE), \
10975 X(2, (Q, I), QUAD), \
10976 X(3, (D, L, D), DOUBLE), \
10977 X(2, (D, Q), MIXED), \
10978 X(2, (Q, D), MIXED), \
10979 X(3, (D, Q, I), MIXED), \
10980 X(3, (Q, D, I), MIXED), \
10981 X(3, (Q, D, D), MIXED), \
10982 X(3, (D, Q, Q), MIXED), \
10983 X(3, (Q, Q, D), MIXED), \
10984 X(3, (Q, D, S), MIXED), \
10985 X(3, (D, Q, S), MIXED), \
10986 X(4, (D, D, D, I), DOUBLE), \
10987 X(4, (Q, Q, Q, I), QUAD), \
10988 X(2, (F, F), SINGLE), \
10989 X(3, (F, F, F), SINGLE), \
10990 X(2, (F, I), SINGLE), \
10991 X(2, (F, D), MIXED), \
10992 X(2, (D, F), MIXED), \
10993 X(3, (F, F, I), MIXED), \
10994 X(4, (R, R, F, F), SINGLE), \
10995 X(4, (F, F, R, R), SINGLE), \
10996 X(3, (D, R, R), DOUBLE), \
10997 X(3, (R, R, D), DOUBLE), \
10998 X(2, (S, R), SINGLE), \
10999 X(2, (R, S), SINGLE), \
11000 X(2, (F, R), SINGLE), \
11001 X(2, (R, F), SINGLE)
11002
11003 #define S2(A,B) NS_##A##B
11004 #define S3(A,B,C) NS_##A##B##C
11005 #define S4(A,B,C,D) NS_##A##B##C##D
11006
11007 #define X(N, L, C) S##N L
11008
11009 enum neon_shape
11010 {
11011 NEON_SHAPE_DEF,
11012 NS_NULL
11013 };
11014
11015 #undef X
11016 #undef S2
11017 #undef S3
11018 #undef S4
11019
11020 enum neon_shape_class
11021 {
11022 SC_SINGLE,
11023 SC_DOUBLE,
11024 SC_QUAD,
11025 SC_MIXED
11026 };
11027
11028 #define X(N, L, C) SC_##C
11029
11030 static enum neon_shape_class neon_shape_class[] =
11031 {
11032 NEON_SHAPE_DEF
11033 };
11034
11035 #undef X
11036
11037 enum neon_shape_el
11038 {
11039 SE_F,
11040 SE_D,
11041 SE_Q,
11042 SE_I,
11043 SE_S,
11044 SE_R,
11045 SE_L
11046 };
11047
11048 /* Register widths of above. */
11049 static unsigned neon_shape_el_size[] =
11050 {
11051 32,
11052 64,
11053 128,
11054 0,
11055 32,
11056 32,
11057 0
11058 };
11059
11060 struct neon_shape_info
11061 {
11062 unsigned els;
11063 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11064 };
11065
11066 #define S2(A,B) { SE_##A, SE_##B }
11067 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11068 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11069
11070 #define X(N, L, C) { N, S##N L }
11071
11072 static struct neon_shape_info neon_shape_tab[] =
11073 {
11074 NEON_SHAPE_DEF
11075 };
11076
11077 #undef X
11078 #undef S2
11079 #undef S3
11080 #undef S4
11081
11082 /* Bit masks used in type checking given instructions.
11083 'N_EQK' means the type must be the same as (or based on in some way) the key
11084 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11085 set, various other bits can be set as well in order to modify the meaning of
11086 the type constraint. */
11087
11088 enum neon_type_mask
11089 {
11090 N_S8 = 0x0000001,
11091 N_S16 = 0x0000002,
11092 N_S32 = 0x0000004,
11093 N_S64 = 0x0000008,
11094 N_U8 = 0x0000010,
11095 N_U16 = 0x0000020,
11096 N_U32 = 0x0000040,
11097 N_U64 = 0x0000080,
11098 N_I8 = 0x0000100,
11099 N_I16 = 0x0000200,
11100 N_I32 = 0x0000400,
11101 N_I64 = 0x0000800,
11102 N_8 = 0x0001000,
11103 N_16 = 0x0002000,
11104 N_32 = 0x0004000,
11105 N_64 = 0x0008000,
11106 N_P8 = 0x0010000,
11107 N_P16 = 0x0020000,
11108 N_F16 = 0x0040000,
11109 N_F32 = 0x0080000,
11110 N_F64 = 0x0100000,
11111 N_KEY = 0x1000000, /* key element (main type specifier). */
11112 N_EQK = 0x2000000, /* given operand has the same type & size as the key. */
11113 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11114 N_DBL = 0x0000001, /* if N_EQK, this operand is twice the size. */
11115 N_HLF = 0x0000002, /* if N_EQK, this operand is half the size. */
11116 N_SGN = 0x0000004, /* if N_EQK, this operand is forced to be signed. */
11117 N_UNS = 0x0000008, /* if N_EQK, this operand is forced to be unsigned. */
11118 N_INT = 0x0000010, /* if N_EQK, this operand is forced to be integer. */
11119 N_FLT = 0x0000020, /* if N_EQK, this operand is forced to be float. */
11120 N_SIZ = 0x0000040, /* if N_EQK, this operand is forced to be size-only. */
11121 N_UTYP = 0,
11122 N_MAX_NONSPECIAL = N_F64
11123 };
11124
11125 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11126
11127 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11128 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11129 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11130 #define N_SUF_32 (N_SU_32 | N_F32)
11131 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11132 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11133
11134 /* Pass this as the first type argument to neon_check_type to ignore types
11135 altogether. */
11136 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11137
11138 /* Select a "shape" for the current instruction (describing register types or
11139 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11140 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11141 function of operand parsing, so this function doesn't need to be called.
11142 Shapes should be listed in order of decreasing length. */
11143
11144 static enum neon_shape
11145 neon_select_shape (enum neon_shape shape, ...)
11146 {
11147 va_list ap;
11148 enum neon_shape first_shape = shape;
11149
11150 /* Fix missing optional operands. FIXME: we don't know at this point how
11151 many arguments we should have, so this makes the assumption that we have
11152 > 1. This is true of all current Neon opcodes, I think, but may not be
11153 true in the future. */
11154 if (!inst.operands[1].present)
11155 inst.operands[1] = inst.operands[0];
11156
11157 va_start (ap, shape);
11158
11159 for (; shape != NS_NULL; shape = va_arg (ap, int))
11160 {
11161 unsigned j;
11162 int matches = 1;
11163
11164 for (j = 0; j < neon_shape_tab[shape].els; j++)
11165 {
11166 if (!inst.operands[j].present)
11167 {
11168 matches = 0;
11169 break;
11170 }
11171
11172 switch (neon_shape_tab[shape].el[j])
11173 {
11174 case SE_F:
11175 if (!(inst.operands[j].isreg
11176 && inst.operands[j].isvec
11177 && inst.operands[j].issingle
11178 && !inst.operands[j].isquad))
11179 matches = 0;
11180 break;
11181
11182 case SE_D:
11183 if (!(inst.operands[j].isreg
11184 && inst.operands[j].isvec
11185 && !inst.operands[j].isquad
11186 && !inst.operands[j].issingle))
11187 matches = 0;
11188 break;
11189
11190 case SE_R:
11191 if (!(inst.operands[j].isreg
11192 && !inst.operands[j].isvec))
11193 matches = 0;
11194 break;
11195
11196 case SE_Q:
11197 if (!(inst.operands[j].isreg
11198 && inst.operands[j].isvec
11199 && inst.operands[j].isquad
11200 && !inst.operands[j].issingle))
11201 matches = 0;
11202 break;
11203
11204 case SE_I:
11205 if (!(!inst.operands[j].isreg
11206 && !inst.operands[j].isscalar))
11207 matches = 0;
11208 break;
11209
11210 case SE_S:
11211 if (!(!inst.operands[j].isreg
11212 && inst.operands[j].isscalar))
11213 matches = 0;
11214 break;
11215
11216 case SE_L:
11217 break;
11218 }
11219 }
11220 if (matches)
11221 break;
11222 }
11223
11224 va_end (ap);
11225
11226 if (shape == NS_NULL && first_shape != NS_NULL)
11227 first_error (_("invalid instruction shape"));
11228
11229 return shape;
11230 }
11231
11232 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11233 means the Q bit should be set). */
11234
11235 static int
11236 neon_quad (enum neon_shape shape)
11237 {
11238 return neon_shape_class[shape] == SC_QUAD;
11239 }
11240
11241 static void
11242 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11243 unsigned *g_size)
11244 {
11245 /* Allow modification to be made to types which are constrained to be
11246 based on the key element, based on bits set alongside N_EQK. */
11247 if ((typebits & N_EQK) != 0)
11248 {
11249 if ((typebits & N_HLF) != 0)
11250 *g_size /= 2;
11251 else if ((typebits & N_DBL) != 0)
11252 *g_size *= 2;
11253 if ((typebits & N_SGN) != 0)
11254 *g_type = NT_signed;
11255 else if ((typebits & N_UNS) != 0)
11256 *g_type = NT_unsigned;
11257 else if ((typebits & N_INT) != 0)
11258 *g_type = NT_integer;
11259 else if ((typebits & N_FLT) != 0)
11260 *g_type = NT_float;
11261 else if ((typebits & N_SIZ) != 0)
11262 *g_type = NT_untyped;
11263 }
11264 }
11265
11266 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11267 operand type, i.e. the single type specified in a Neon instruction when it
11268 is the only one given. */
11269
11270 static struct neon_type_el
11271 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11272 {
11273 struct neon_type_el dest = *key;
11274
11275 assert ((thisarg & N_EQK) != 0);
11276
11277 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11278
11279 return dest;
11280 }
11281
11282 /* Convert Neon type and size into compact bitmask representation. */
11283
11284 static enum neon_type_mask
11285 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11286 {
11287 switch (type)
11288 {
11289 case NT_untyped:
11290 switch (size)
11291 {
11292 case 8: return N_8;
11293 case 16: return N_16;
11294 case 32: return N_32;
11295 case 64: return N_64;
11296 default: ;
11297 }
11298 break;
11299
11300 case NT_integer:
11301 switch (size)
11302 {
11303 case 8: return N_I8;
11304 case 16: return N_I16;
11305 case 32: return N_I32;
11306 case 64: return N_I64;
11307 default: ;
11308 }
11309 break;
11310
11311 case NT_float:
11312 switch (size)
11313 {
11314 case 16: return N_F16;
11315 case 32: return N_F32;
11316 case 64: return N_F64;
11317 default: ;
11318 }
11319 break;
11320
11321 case NT_poly:
11322 switch (size)
11323 {
11324 case 8: return N_P8;
11325 case 16: return N_P16;
11326 default: ;
11327 }
11328 break;
11329
11330 case NT_signed:
11331 switch (size)
11332 {
11333 case 8: return N_S8;
11334 case 16: return N_S16;
11335 case 32: return N_S32;
11336 case 64: return N_S64;
11337 default: ;
11338 }
11339 break;
11340
11341 case NT_unsigned:
11342 switch (size)
11343 {
11344 case 8: return N_U8;
11345 case 16: return N_U16;
11346 case 32: return N_U32;
11347 case 64: return N_U64;
11348 default: ;
11349 }
11350 break;
11351
11352 default: ;
11353 }
11354
11355 return N_UTYP;
11356 }
11357
11358 /* Convert compact Neon bitmask type representation to a type and size. Only
11359 handles the case where a single bit is set in the mask. */
11360
11361 static int
11362 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11363 enum neon_type_mask mask)
11364 {
11365 if ((mask & N_EQK) != 0)
11366 return FAIL;
11367
11368 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11369 *size = 8;
11370 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11371 *size = 16;
11372 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11373 *size = 32;
11374 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11375 *size = 64;
11376 else
11377 return FAIL;
11378
11379 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11380 *type = NT_signed;
11381 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11382 *type = NT_unsigned;
11383 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11384 *type = NT_integer;
11385 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11386 *type = NT_untyped;
11387 else if ((mask & (N_P8 | N_P16)) != 0)
11388 *type = NT_poly;
11389 else if ((mask & (N_F32 | N_F64)) != 0)
11390 *type = NT_float;
11391 else
11392 return FAIL;
11393
11394 return SUCCESS;
11395 }
11396
11397 /* Modify a bitmask of allowed types. This is only needed for type
11398 relaxation. */
11399
11400 static unsigned
11401 modify_types_allowed (unsigned allowed, unsigned mods)
11402 {
11403 unsigned size;
11404 enum neon_el_type type;
11405 unsigned destmask;
11406 int i;
11407
11408 destmask = 0;
11409
11410 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11411 {
11412 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
11413 {
11414 neon_modify_type_size (mods, &type, &size);
11415 destmask |= type_chk_of_el_type (type, size);
11416 }
11417 }
11418
11419 return destmask;
11420 }
11421
11422 /* Check type and return type classification.
11423 The manual states (paraphrase): If one datatype is given, it indicates the
11424 type given in:
11425 - the second operand, if there is one
11426 - the operand, if there is no second operand
11427 - the result, if there are no operands.
11428 This isn't quite good enough though, so we use a concept of a "key" datatype
11429 which is set on a per-instruction basis, which is the one which matters when
11430 only one data type is written.
11431 Note: this function has side-effects (e.g. filling in missing operands). All
11432 Neon instructions should call it before performing bit encoding. */
11433
11434 static struct neon_type_el
11435 neon_check_type (unsigned els, enum neon_shape ns, ...)
11436 {
11437 va_list ap;
11438 unsigned i, pass, key_el = 0;
11439 unsigned types[NEON_MAX_TYPE_ELS];
11440 enum neon_el_type k_type = NT_invtype;
11441 unsigned k_size = -1u;
11442 struct neon_type_el badtype = {NT_invtype, -1};
11443 unsigned key_allowed = 0;
11444
11445 /* Optional registers in Neon instructions are always (not) in operand 1.
11446 Fill in the missing operand here, if it was omitted. */
11447 if (els > 1 && !inst.operands[1].present)
11448 inst.operands[1] = inst.operands[0];
11449
11450 /* Suck up all the varargs. */
11451 va_start (ap, ns);
11452 for (i = 0; i < els; i++)
11453 {
11454 unsigned thisarg = va_arg (ap, unsigned);
11455 if (thisarg == N_IGNORE_TYPE)
11456 {
11457 va_end (ap);
11458 return badtype;
11459 }
11460 types[i] = thisarg;
11461 if ((thisarg & N_KEY) != 0)
11462 key_el = i;
11463 }
11464 va_end (ap);
11465
11466 if (inst.vectype.elems > 0)
11467 for (i = 0; i < els; i++)
11468 if (inst.operands[i].vectype.type != NT_invtype)
11469 {
11470 first_error (_("types specified in both the mnemonic and operands"));
11471 return badtype;
11472 }
11473
11474 /* Duplicate inst.vectype elements here as necessary.
11475 FIXME: No idea if this is exactly the same as the ARM assembler,
11476 particularly when an insn takes one register and one non-register
11477 operand. */
11478 if (inst.vectype.elems == 1 && els > 1)
11479 {
11480 unsigned j;
11481 inst.vectype.elems = els;
11482 inst.vectype.el[key_el] = inst.vectype.el[0];
11483 for (j = 0; j < els; j++)
11484 if (j != key_el)
11485 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11486 types[j]);
11487 }
11488 else if (inst.vectype.elems == 0 && els > 0)
11489 {
11490 unsigned j;
11491 /* No types were given after the mnemonic, so look for types specified
11492 after each operand. We allow some flexibility here; as long as the
11493 "key" operand has a type, we can infer the others. */
11494 for (j = 0; j < els; j++)
11495 if (inst.operands[j].vectype.type != NT_invtype)
11496 inst.vectype.el[j] = inst.operands[j].vectype;
11497
11498 if (inst.operands[key_el].vectype.type != NT_invtype)
11499 {
11500 for (j = 0; j < els; j++)
11501 if (inst.operands[j].vectype.type == NT_invtype)
11502 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
11503 types[j]);
11504 }
11505 else
11506 {
11507 first_error (_("operand types can't be inferred"));
11508 return badtype;
11509 }
11510 }
11511 else if (inst.vectype.elems != els)
11512 {
11513 first_error (_("type specifier has the wrong number of parts"));
11514 return badtype;
11515 }
11516
11517 for (pass = 0; pass < 2; pass++)
11518 {
11519 for (i = 0; i < els; i++)
11520 {
11521 unsigned thisarg = types[i];
11522 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
11523 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
11524 enum neon_el_type g_type = inst.vectype.el[i].type;
11525 unsigned g_size = inst.vectype.el[i].size;
11526
11527 /* Decay more-specific signed & unsigned types to sign-insensitive
11528 integer types if sign-specific variants are unavailable. */
11529 if ((g_type == NT_signed || g_type == NT_unsigned)
11530 && (types_allowed & N_SU_ALL) == 0)
11531 g_type = NT_integer;
11532
11533 /* If only untyped args are allowed, decay any more specific types to
11534 them. Some instructions only care about signs for some element
11535 sizes, so handle that properly. */
11536 if ((g_size == 8 && (types_allowed & N_8) != 0)
11537 || (g_size == 16 && (types_allowed & N_16) != 0)
11538 || (g_size == 32 && (types_allowed & N_32) != 0)
11539 || (g_size == 64 && (types_allowed & N_64) != 0))
11540 g_type = NT_untyped;
11541
11542 if (pass == 0)
11543 {
11544 if ((thisarg & N_KEY) != 0)
11545 {
11546 k_type = g_type;
11547 k_size = g_size;
11548 key_allowed = thisarg & ~N_KEY;
11549 }
11550 }
11551 else
11552 {
11553 if ((thisarg & N_VFP) != 0)
11554 {
11555 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11556 unsigned regwidth = neon_shape_el_size[regshape], match;
11557
11558 /* In VFP mode, operands must match register widths. If we
11559 have a key operand, use its width, else use the width of
11560 the current operand. */
11561 if (k_size != -1u)
11562 match = k_size;
11563 else
11564 match = g_size;
11565
11566 if (regwidth != match)
11567 {
11568 first_error (_("operand size must match register width"));
11569 return badtype;
11570 }
11571 }
11572
11573 if ((thisarg & N_EQK) == 0)
11574 {
11575 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11576
11577 if ((given_type & types_allowed) == 0)
11578 {
11579 first_error (_("bad type in Neon instruction"));
11580 return badtype;
11581 }
11582 }
11583 else
11584 {
11585 enum neon_el_type mod_k_type = k_type;
11586 unsigned mod_k_size = k_size;
11587 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11588 if (g_type != mod_k_type || g_size != mod_k_size)
11589 {
11590 first_error (_("inconsistent types in Neon instruction"));
11591 return badtype;
11592 }
11593 }
11594 }
11595 }
11596 }
11597
11598 return inst.vectype.el[key_el];
11599 }
11600
11601 /* Neon-style VFP instruction forwarding. */
11602
11603 /* Thumb VFP instructions have 0xE in the condition field. */
11604
11605 static void
11606 do_vfp_cond_or_thumb (void)
11607 {
11608 if (thumb_mode)
11609 inst.instruction |= 0xe0000000;
11610 else
11611 inst.instruction |= inst.cond << 28;
11612 }
11613
11614 /* Look up and encode a simple mnemonic, for use as a helper function for the
11615 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11616 etc. It is assumed that operand parsing has already been done, and that the
11617 operands are in the form expected by the given opcode (this isn't necessarily
11618 the same as the form in which they were parsed, hence some massaging must
11619 take place before this function is called).
11620 Checks current arch version against that in the looked-up opcode. */
11621
11622 static void
11623 do_vfp_nsyn_opcode (const char *opname)
11624 {
11625 const struct asm_opcode *opcode;
11626
11627 opcode = hash_find (arm_ops_hsh, opname);
11628
11629 if (!opcode)
11630 abort ();
11631
11632 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11633 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11634 _(BAD_FPU));
11635
11636 if (thumb_mode)
11637 {
11638 inst.instruction = opcode->tvalue;
11639 opcode->tencode ();
11640 }
11641 else
11642 {
11643 inst.instruction = (inst.cond << 28) | opcode->avalue;
11644 opcode->aencode ();
11645 }
11646 }
11647
11648 static void
11649 do_vfp_nsyn_add_sub (enum neon_shape rs)
11650 {
11651 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11652
11653 if (rs == NS_FFF)
11654 {
11655 if (is_add)
11656 do_vfp_nsyn_opcode ("fadds");
11657 else
11658 do_vfp_nsyn_opcode ("fsubs");
11659 }
11660 else
11661 {
11662 if (is_add)
11663 do_vfp_nsyn_opcode ("faddd");
11664 else
11665 do_vfp_nsyn_opcode ("fsubd");
11666 }
11667 }
11668
11669 /* Check operand types to see if this is a VFP instruction, and if so call
11670 PFN (). */
11671
11672 static int
11673 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11674 {
11675 enum neon_shape rs;
11676 struct neon_type_el et;
11677
11678 switch (args)
11679 {
11680 case 2:
11681 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11682 et = neon_check_type (2, rs,
11683 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11684 break;
11685
11686 case 3:
11687 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11688 et = neon_check_type (3, rs,
11689 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11690 break;
11691
11692 default:
11693 abort ();
11694 }
11695
11696 if (et.type != NT_invtype)
11697 {
11698 pfn (rs);
11699 return SUCCESS;
11700 }
11701 else
11702 inst.error = NULL;
11703
11704 return FAIL;
11705 }
11706
11707 static void
11708 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11709 {
11710 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11711
11712 if (rs == NS_FFF)
11713 {
11714 if (is_mla)
11715 do_vfp_nsyn_opcode ("fmacs");
11716 else
11717 do_vfp_nsyn_opcode ("fmscs");
11718 }
11719 else
11720 {
11721 if (is_mla)
11722 do_vfp_nsyn_opcode ("fmacd");
11723 else
11724 do_vfp_nsyn_opcode ("fmscd");
11725 }
11726 }
11727
11728 static void
11729 do_vfp_nsyn_mul (enum neon_shape rs)
11730 {
11731 if (rs == NS_FFF)
11732 do_vfp_nsyn_opcode ("fmuls");
11733 else
11734 do_vfp_nsyn_opcode ("fmuld");
11735 }
11736
11737 static void
11738 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11739 {
11740 int is_neg = (inst.instruction & 0x80) != 0;
11741 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11742
11743 if (rs == NS_FF)
11744 {
11745 if (is_neg)
11746 do_vfp_nsyn_opcode ("fnegs");
11747 else
11748 do_vfp_nsyn_opcode ("fabss");
11749 }
11750 else
11751 {
11752 if (is_neg)
11753 do_vfp_nsyn_opcode ("fnegd");
11754 else
11755 do_vfp_nsyn_opcode ("fabsd");
11756 }
11757 }
11758
11759 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11760 insns belong to Neon, and are handled elsewhere. */
11761
11762 static void
11763 do_vfp_nsyn_ldm_stm (int is_dbmode)
11764 {
11765 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11766 if (is_ldm)
11767 {
11768 if (is_dbmode)
11769 do_vfp_nsyn_opcode ("fldmdbs");
11770 else
11771 do_vfp_nsyn_opcode ("fldmias");
11772 }
11773 else
11774 {
11775 if (is_dbmode)
11776 do_vfp_nsyn_opcode ("fstmdbs");
11777 else
11778 do_vfp_nsyn_opcode ("fstmias");
11779 }
11780 }
11781
11782 static void
11783 do_vfp_nsyn_sqrt (void)
11784 {
11785 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11786 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11787
11788 if (rs == NS_FF)
11789 do_vfp_nsyn_opcode ("fsqrts");
11790 else
11791 do_vfp_nsyn_opcode ("fsqrtd");
11792 }
11793
11794 static void
11795 do_vfp_nsyn_div (void)
11796 {
11797 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11798 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11799 N_F32 | N_F64 | N_KEY | N_VFP);
11800
11801 if (rs == NS_FFF)
11802 do_vfp_nsyn_opcode ("fdivs");
11803 else
11804 do_vfp_nsyn_opcode ("fdivd");
11805 }
11806
11807 static void
11808 do_vfp_nsyn_nmul (void)
11809 {
11810 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11811 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11812 N_F32 | N_F64 | N_KEY | N_VFP);
11813
11814 if (rs == NS_FFF)
11815 {
11816 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11817 do_vfp_sp_dyadic ();
11818 }
11819 else
11820 {
11821 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11822 do_vfp_dp_rd_rn_rm ();
11823 }
11824 do_vfp_cond_or_thumb ();
11825 }
11826
11827 static void
11828 do_vfp_nsyn_cmp (void)
11829 {
11830 if (inst.operands[1].isreg)
11831 {
11832 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11833 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11834
11835 if (rs == NS_FF)
11836 {
11837 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11838 do_vfp_sp_monadic ();
11839 }
11840 else
11841 {
11842 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11843 do_vfp_dp_rd_rm ();
11844 }
11845 }
11846 else
11847 {
11848 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11849 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11850
11851 switch (inst.instruction & 0x0fffffff)
11852 {
11853 case N_MNEM_vcmp:
11854 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11855 break;
11856 case N_MNEM_vcmpe:
11857 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11858 break;
11859 default:
11860 abort ();
11861 }
11862
11863 if (rs == NS_FI)
11864 {
11865 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11866 do_vfp_sp_compare_z ();
11867 }
11868 else
11869 {
11870 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11871 do_vfp_dp_rd ();
11872 }
11873 }
11874 do_vfp_cond_or_thumb ();
11875 }
11876
11877 static void
11878 nsyn_insert_sp (void)
11879 {
11880 inst.operands[1] = inst.operands[0];
11881 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11882 inst.operands[0].reg = REG_SP;
11883 inst.operands[0].isreg = 1;
11884 inst.operands[0].writeback = 1;
11885 inst.operands[0].present = 1;
11886 }
11887
11888 static void
11889 do_vfp_nsyn_push (void)
11890 {
11891 nsyn_insert_sp ();
11892 if (inst.operands[1].issingle)
11893 do_vfp_nsyn_opcode ("fstmdbs");
11894 else
11895 do_vfp_nsyn_opcode ("fstmdbd");
11896 }
11897
11898 static void
11899 do_vfp_nsyn_pop (void)
11900 {
11901 nsyn_insert_sp ();
11902 if (inst.operands[1].issingle)
11903 do_vfp_nsyn_opcode ("fldmias");
11904 else
11905 do_vfp_nsyn_opcode ("fldmiad");
11906 }
11907
11908 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11909 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11910
11911 static unsigned
11912 neon_dp_fixup (unsigned i)
11913 {
11914 if (thumb_mode)
11915 {
11916 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11917 if (i & (1 << 24))
11918 i |= 1 << 28;
11919
11920 i &= ~(1 << 24);
11921
11922 i |= 0xef000000;
11923 }
11924 else
11925 i |= 0xf2000000;
11926
11927 return i;
11928 }
11929
11930 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11931 (0, 1, 2, 3). */
11932
11933 static unsigned
11934 neon_logbits (unsigned x)
11935 {
11936 return ffs (x) - 4;
11937 }
11938
11939 #define LOW4(R) ((R) & 0xf)
11940 #define HI1(R) (((R) >> 4) & 1)
11941
11942 /* Encode insns with bit pattern:
11943
11944 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11945 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11946
11947 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11948 different meaning for some instruction. */
11949
11950 static void
11951 neon_three_same (int isquad, int ubit, int size)
11952 {
11953 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11954 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11955 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11956 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11957 inst.instruction |= LOW4 (inst.operands[2].reg);
11958 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11959 inst.instruction |= (isquad != 0) << 6;
11960 inst.instruction |= (ubit != 0) << 24;
11961 if (size != -1)
11962 inst.instruction |= neon_logbits (size) << 20;
11963
11964 inst.instruction = neon_dp_fixup (inst.instruction);
11965 }
11966
11967 /* Encode instructions of the form:
11968
11969 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11970 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11971
11972 Don't write size if SIZE == -1. */
11973
11974 static void
11975 neon_two_same (int qbit, int ubit, int size)
11976 {
11977 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11978 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11979 inst.instruction |= LOW4 (inst.operands[1].reg);
11980 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11981 inst.instruction |= (qbit != 0) << 6;
11982 inst.instruction |= (ubit != 0) << 24;
11983
11984 if (size != -1)
11985 inst.instruction |= neon_logbits (size) << 18;
11986
11987 inst.instruction = neon_dp_fixup (inst.instruction);
11988 }
11989
11990 /* Neon instruction encoders, in approximate order of appearance. */
11991
11992 static void
11993 do_neon_dyadic_i_su (void)
11994 {
11995 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11996 struct neon_type_el et = neon_check_type (3, rs,
11997 N_EQK, N_EQK, N_SU_32 | N_KEY);
11998 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11999 }
12000
12001 static void
12002 do_neon_dyadic_i64_su (void)
12003 {
12004 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12005 struct neon_type_el et = neon_check_type (3, rs,
12006 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12007 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12008 }
12009
12010 static void
12011 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12012 unsigned immbits)
12013 {
12014 unsigned size = et.size >> 3;
12015 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12016 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12017 inst.instruction |= LOW4 (inst.operands[1].reg);
12018 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12019 inst.instruction |= (isquad != 0) << 6;
12020 inst.instruction |= immbits << 16;
12021 inst.instruction |= (size >> 3) << 7;
12022 inst.instruction |= (size & 0x7) << 19;
12023 if (write_ubit)
12024 inst.instruction |= (uval != 0) << 24;
12025
12026 inst.instruction = neon_dp_fixup (inst.instruction);
12027 }
12028
12029 static void
12030 do_neon_shl_imm (void)
12031 {
12032 if (!inst.operands[2].isreg)
12033 {
12034 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12035 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12036 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12037 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12038 }
12039 else
12040 {
12041 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12042 struct neon_type_el et = neon_check_type (3, rs,
12043 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12044 unsigned int tmp;
12045
12046 /* VSHL/VQSHL 3-register variants have syntax such as:
12047 vshl.xx Dd, Dm, Dn
12048 whereas other 3-register operations encoded by neon_three_same have
12049 syntax like:
12050 vadd.xx Dd, Dn, Dm
12051 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12052 here. */
12053 tmp = inst.operands[2].reg;
12054 inst.operands[2].reg = inst.operands[1].reg;
12055 inst.operands[1].reg = tmp;
12056 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12057 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12058 }
12059 }
12060
12061 static void
12062 do_neon_qshl_imm (void)
12063 {
12064 if (!inst.operands[2].isreg)
12065 {
12066 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12067 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12068
12069 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12070 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12071 inst.operands[2].imm);
12072 }
12073 else
12074 {
12075 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12076 struct neon_type_el et = neon_check_type (3, rs,
12077 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12078 unsigned int tmp;
12079
12080 /* See note in do_neon_shl_imm. */
12081 tmp = inst.operands[2].reg;
12082 inst.operands[2].reg = inst.operands[1].reg;
12083 inst.operands[1].reg = tmp;
12084 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12085 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12086 }
12087 }
12088
12089 static void
12090 do_neon_rshl (void)
12091 {
12092 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12093 struct neon_type_el et = neon_check_type (3, rs,
12094 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12095 unsigned int tmp;
12096
12097 tmp = inst.operands[2].reg;
12098 inst.operands[2].reg = inst.operands[1].reg;
12099 inst.operands[1].reg = tmp;
12100 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12101 }
12102
12103 static int
12104 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12105 {
12106 /* Handle .I8 pseudo-instructions. */
12107 if (size == 8)
12108 {
12109 /* Unfortunately, this will make everything apart from zero out-of-range.
12110 FIXME is this the intended semantics? There doesn't seem much point in
12111 accepting .I8 if so. */
12112 immediate |= immediate << 8;
12113 size = 16;
12114 }
12115
12116 if (size >= 32)
12117 {
12118 if (immediate == (immediate & 0x000000ff))
12119 {
12120 *immbits = immediate;
12121 return 0x1;
12122 }
12123 else if (immediate == (immediate & 0x0000ff00))
12124 {
12125 *immbits = immediate >> 8;
12126 return 0x3;
12127 }
12128 else if (immediate == (immediate & 0x00ff0000))
12129 {
12130 *immbits = immediate >> 16;
12131 return 0x5;
12132 }
12133 else if (immediate == (immediate & 0xff000000))
12134 {
12135 *immbits = immediate >> 24;
12136 return 0x7;
12137 }
12138 if ((immediate & 0xffff) != (immediate >> 16))
12139 goto bad_immediate;
12140 immediate &= 0xffff;
12141 }
12142
12143 if (immediate == (immediate & 0x000000ff))
12144 {
12145 *immbits = immediate;
12146 return 0x9;
12147 }
12148 else if (immediate == (immediate & 0x0000ff00))
12149 {
12150 *immbits = immediate >> 8;
12151 return 0xb;
12152 }
12153
12154 bad_immediate:
12155 first_error (_("immediate value out of range"));
12156 return FAIL;
12157 }
12158
12159 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12160 A, B, C, D. */
12161
12162 static int
12163 neon_bits_same_in_bytes (unsigned imm)
12164 {
12165 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12166 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12167 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12168 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12169 }
12170
12171 /* For immediate of above form, return 0bABCD. */
12172
12173 static unsigned
12174 neon_squash_bits (unsigned imm)
12175 {
12176 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12177 | ((imm & 0x01000000) >> 21);
12178 }
12179
12180 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12181
12182 static unsigned
12183 neon_qfloat_bits (unsigned imm)
12184 {
12185 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12186 }
12187
12188 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12189 the instruction. *OP is passed as the initial value of the op field, and
12190 may be set to a different value depending on the constant (i.e.
12191 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12192 MVN). If the immediate looks like a repeated pattern then also
12193 try smaller element sizes. */
12194
12195 static int
12196 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12197 unsigned *immbits, int *op, int size,
12198 enum neon_el_type type)
12199 {
12200 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12201 float. */
12202 if (type == NT_float && !float_p)
12203 return FAIL;
12204
12205 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12206 {
12207 if (size != 32 || *op == 1)
12208 return FAIL;
12209 *immbits = neon_qfloat_bits (immlo);
12210 return 0xf;
12211 }
12212
12213 if (size == 64)
12214 {
12215 if (neon_bits_same_in_bytes (immhi)
12216 && neon_bits_same_in_bytes (immlo))
12217 {
12218 if (*op == 1)
12219 return FAIL;
12220 *immbits = (neon_squash_bits (immhi) << 4)
12221 | neon_squash_bits (immlo);
12222 *op = 1;
12223 return 0xe;
12224 }
12225
12226 if (immhi != immlo)
12227 return FAIL;
12228 }
12229
12230 if (size >= 32)
12231 {
12232 if (immlo == (immlo & 0x000000ff))
12233 {
12234 *immbits = immlo;
12235 return 0x0;
12236 }
12237 else if (immlo == (immlo & 0x0000ff00))
12238 {
12239 *immbits = immlo >> 8;
12240 return 0x2;
12241 }
12242 else if (immlo == (immlo & 0x00ff0000))
12243 {
12244 *immbits = immlo >> 16;
12245 return 0x4;
12246 }
12247 else if (immlo == (immlo & 0xff000000))
12248 {
12249 *immbits = immlo >> 24;
12250 return 0x6;
12251 }
12252 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12253 {
12254 *immbits = (immlo >> 8) & 0xff;
12255 return 0xc;
12256 }
12257 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12258 {
12259 *immbits = (immlo >> 16) & 0xff;
12260 return 0xd;
12261 }
12262
12263 if ((immlo & 0xffff) != (immlo >> 16))
12264 return FAIL;
12265 immlo &= 0xffff;
12266 }
12267
12268 if (size >= 16)
12269 {
12270 if (immlo == (immlo & 0x000000ff))
12271 {
12272 *immbits = immlo;
12273 return 0x8;
12274 }
12275 else if (immlo == (immlo & 0x0000ff00))
12276 {
12277 *immbits = immlo >> 8;
12278 return 0xa;
12279 }
12280
12281 if ((immlo & 0xff) != (immlo >> 8))
12282 return FAIL;
12283 immlo &= 0xff;
12284 }
12285
12286 if (immlo == (immlo & 0x000000ff))
12287 {
12288 /* Don't allow MVN with 8-bit immediate. */
12289 if (*op == 1)
12290 return FAIL;
12291 *immbits = immlo;
12292 return 0xe;
12293 }
12294
12295 return FAIL;
12296 }
12297
12298 /* Write immediate bits [7:0] to the following locations:
12299
12300 |28/24|23 19|18 16|15 4|3 0|
12301 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12302
12303 This function is used by VMOV/VMVN/VORR/VBIC. */
12304
12305 static void
12306 neon_write_immbits (unsigned immbits)
12307 {
12308 inst.instruction |= immbits & 0xf;
12309 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12310 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12311 }
12312
12313 /* Invert low-order SIZE bits of XHI:XLO. */
12314
12315 static void
12316 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12317 {
12318 unsigned immlo = xlo ? *xlo : 0;
12319 unsigned immhi = xhi ? *xhi : 0;
12320
12321 switch (size)
12322 {
12323 case 8:
12324 immlo = (~immlo) & 0xff;
12325 break;
12326
12327 case 16:
12328 immlo = (~immlo) & 0xffff;
12329 break;
12330
12331 case 64:
12332 immhi = (~immhi) & 0xffffffff;
12333 /* fall through. */
12334
12335 case 32:
12336 immlo = (~immlo) & 0xffffffff;
12337 break;
12338
12339 default:
12340 abort ();
12341 }
12342
12343 if (xlo)
12344 *xlo = immlo;
12345
12346 if (xhi)
12347 *xhi = immhi;
12348 }
12349
12350 static void
12351 do_neon_logic (void)
12352 {
12353 if (inst.operands[2].present && inst.operands[2].isreg)
12354 {
12355 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12356 neon_check_type (3, rs, N_IGNORE_TYPE);
12357 /* U bit and size field were set as part of the bitmask. */
12358 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12359 neon_three_same (neon_quad (rs), 0, -1);
12360 }
12361 else
12362 {
12363 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12364 struct neon_type_el et = neon_check_type (2, rs,
12365 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12366 enum neon_opc opcode = inst.instruction & 0x0fffffff;
12367 unsigned immbits;
12368 int cmode;
12369
12370 if (et.type == NT_invtype)
12371 return;
12372
12373 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12374
12375 immbits = inst.operands[1].imm;
12376 if (et.size == 64)
12377 {
12378 /* .i64 is a pseudo-op, so the immediate must be a repeating
12379 pattern. */
12380 if (immbits != (inst.operands[1].regisimm ?
12381 inst.operands[1].reg : 0))
12382 {
12383 /* Set immbits to an invalid constant. */
12384 immbits = 0xdeadbeef;
12385 }
12386 }
12387
12388 switch (opcode)
12389 {
12390 case N_MNEM_vbic:
12391 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12392 break;
12393
12394 case N_MNEM_vorr:
12395 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12396 break;
12397
12398 case N_MNEM_vand:
12399 /* Pseudo-instruction for VBIC. */
12400 neon_invert_size (&immbits, 0, et.size);
12401 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12402 break;
12403
12404 case N_MNEM_vorn:
12405 /* Pseudo-instruction for VORR. */
12406 neon_invert_size (&immbits, 0, et.size);
12407 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12408 break;
12409
12410 default:
12411 abort ();
12412 }
12413
12414 if (cmode == FAIL)
12415 return;
12416
12417 inst.instruction |= neon_quad (rs) << 6;
12418 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12419 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12420 inst.instruction |= cmode << 8;
12421 neon_write_immbits (immbits);
12422
12423 inst.instruction = neon_dp_fixup (inst.instruction);
12424 }
12425 }
12426
12427 static void
12428 do_neon_bitfield (void)
12429 {
12430 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12431 neon_check_type (3, rs, N_IGNORE_TYPE);
12432 neon_three_same (neon_quad (rs), 0, -1);
12433 }
12434
12435 static void
12436 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12437 unsigned destbits)
12438 {
12439 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12440 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12441 types | N_KEY);
12442 if (et.type == NT_float)
12443 {
12444 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
12445 neon_three_same (neon_quad (rs), 0, -1);
12446 }
12447 else
12448 {
12449 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12450 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
12451 }
12452 }
12453
12454 static void
12455 do_neon_dyadic_if_su (void)
12456 {
12457 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12458 }
12459
12460 static void
12461 do_neon_dyadic_if_su_d (void)
12462 {
12463 /* This version only allow D registers, but that constraint is enforced during
12464 operand parsing so we don't need to do anything extra here. */
12465 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
12466 }
12467
12468 static void
12469 do_neon_dyadic_if_i_d (void)
12470 {
12471 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12472 affected if we specify unsigned args. */
12473 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12474 }
12475
12476 enum vfp_or_neon_is_neon_bits
12477 {
12478 NEON_CHECK_CC = 1,
12479 NEON_CHECK_ARCH = 2
12480 };
12481
12482 /* Call this function if an instruction which may have belonged to the VFP or
12483 Neon instruction sets, but turned out to be a Neon instruction (due to the
12484 operand types involved, etc.). We have to check and/or fix-up a couple of
12485 things:
12486
12487 - Make sure the user hasn't attempted to make a Neon instruction
12488 conditional.
12489 - Alter the value in the condition code field if necessary.
12490 - Make sure that the arch supports Neon instructions.
12491
12492 Which of these operations take place depends on bits from enum
12493 vfp_or_neon_is_neon_bits.
12494
12495 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12496 current instruction's condition is COND_ALWAYS, the condition field is
12497 changed to inst.uncond_value. This is necessary because instructions shared
12498 between VFP and Neon may be conditional for the VFP variants only, and the
12499 unconditional Neon version must have, e.g., 0xF in the condition field. */
12500
12501 static int
12502 vfp_or_neon_is_neon (unsigned check)
12503 {
12504 /* Conditions are always legal in Thumb mode (IT blocks). */
12505 if (!thumb_mode && (check & NEON_CHECK_CC))
12506 {
12507 if (inst.cond != COND_ALWAYS)
12508 {
12509 first_error (_(BAD_COND));
12510 return FAIL;
12511 }
12512 if (inst.uncond_value != -1)
12513 inst.instruction |= inst.uncond_value << 28;
12514 }
12515
12516 if ((check & NEON_CHECK_ARCH)
12517 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
12518 {
12519 first_error (_(BAD_FPU));
12520 return FAIL;
12521 }
12522
12523 return SUCCESS;
12524 }
12525
12526 static void
12527 do_neon_addsub_if_i (void)
12528 {
12529 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
12530 return;
12531
12532 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12533 return;
12534
12535 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12536 affected if we specify unsigned args. */
12537 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12538 }
12539
12540 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12541 result to be:
12542 V<op> A,B (A is operand 0, B is operand 2)
12543 to mean:
12544 V<op> A,B,A
12545 not:
12546 V<op> A,B,B
12547 so handle that case specially. */
12548
12549 static void
12550 neon_exchange_operands (void)
12551 {
12552 void *scratch = alloca (sizeof (inst.operands[0]));
12553 if (inst.operands[1].present)
12554 {
12555 /* Swap operands[1] and operands[2]. */
12556 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12557 inst.operands[1] = inst.operands[2];
12558 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12559 }
12560 else
12561 {
12562 inst.operands[1] = inst.operands[2];
12563 inst.operands[2] = inst.operands[0];
12564 }
12565 }
12566
12567 static void
12568 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12569 {
12570 if (inst.operands[2].isreg)
12571 {
12572 if (invert)
12573 neon_exchange_operands ();
12574 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12575 }
12576 else
12577 {
12578 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12579 struct neon_type_el et = neon_check_type (2, rs,
12580 N_EQK | N_SIZ, immtypes | N_KEY);
12581
12582 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12583 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12584 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12585 inst.instruction |= LOW4 (inst.operands[1].reg);
12586 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12587 inst.instruction |= neon_quad (rs) << 6;
12588 inst.instruction |= (et.type == NT_float) << 10;
12589 inst.instruction |= neon_logbits (et.size) << 18;
12590
12591 inst.instruction = neon_dp_fixup (inst.instruction);
12592 }
12593 }
12594
12595 static void
12596 do_neon_cmp (void)
12597 {
12598 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12599 }
12600
12601 static void
12602 do_neon_cmp_inv (void)
12603 {
12604 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12605 }
12606
12607 static void
12608 do_neon_ceq (void)
12609 {
12610 neon_compare (N_IF_32, N_IF_32, FALSE);
12611 }
12612
12613 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12614 scalars, which are encoded in 5 bits, M : Rm.
12615 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12616 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12617 index in M. */
12618
12619 static unsigned
12620 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12621 {
12622 unsigned regno = NEON_SCALAR_REG (scalar);
12623 unsigned elno = NEON_SCALAR_INDEX (scalar);
12624
12625 switch (elsize)
12626 {
12627 case 16:
12628 if (regno > 7 || elno > 3)
12629 goto bad_scalar;
12630 return regno | (elno << 3);
12631
12632 case 32:
12633 if (regno > 15 || elno > 1)
12634 goto bad_scalar;
12635 return regno | (elno << 4);
12636
12637 default:
12638 bad_scalar:
12639 first_error (_("scalar out of range for multiply instruction"));
12640 }
12641
12642 return 0;
12643 }
12644
12645 /* Encode multiply / multiply-accumulate scalar instructions. */
12646
12647 static void
12648 neon_mul_mac (struct neon_type_el et, int ubit)
12649 {
12650 unsigned scalar;
12651
12652 /* Give a more helpful error message if we have an invalid type. */
12653 if (et.type == NT_invtype)
12654 return;
12655
12656 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12657 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12658 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12659 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12660 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12661 inst.instruction |= LOW4 (scalar);
12662 inst.instruction |= HI1 (scalar) << 5;
12663 inst.instruction |= (et.type == NT_float) << 8;
12664 inst.instruction |= neon_logbits (et.size) << 20;
12665 inst.instruction |= (ubit != 0) << 24;
12666
12667 inst.instruction = neon_dp_fixup (inst.instruction);
12668 }
12669
12670 static void
12671 do_neon_mac_maybe_scalar (void)
12672 {
12673 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12674 return;
12675
12676 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12677 return;
12678
12679 if (inst.operands[2].isscalar)
12680 {
12681 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12682 struct neon_type_el et = neon_check_type (3, rs,
12683 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12684 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12685 neon_mul_mac (et, neon_quad (rs));
12686 }
12687 else
12688 {
12689 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12690 affected if we specify unsigned args. */
12691 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12692 }
12693 }
12694
12695 static void
12696 do_neon_tst (void)
12697 {
12698 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12699 struct neon_type_el et = neon_check_type (3, rs,
12700 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12701 neon_three_same (neon_quad (rs), 0, et.size);
12702 }
12703
12704 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12705 same types as the MAC equivalents. The polynomial type for this instruction
12706 is encoded the same as the integer type. */
12707
12708 static void
12709 do_neon_mul (void)
12710 {
12711 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12712 return;
12713
12714 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12715 return;
12716
12717 if (inst.operands[2].isscalar)
12718 do_neon_mac_maybe_scalar ();
12719 else
12720 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12721 }
12722
12723 static void
12724 do_neon_qdmulh (void)
12725 {
12726 if (inst.operands[2].isscalar)
12727 {
12728 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12729 struct neon_type_el et = neon_check_type (3, rs,
12730 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12731 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12732 neon_mul_mac (et, neon_quad (rs));
12733 }
12734 else
12735 {
12736 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12737 struct neon_type_el et = neon_check_type (3, rs,
12738 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12739 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12740 /* The U bit (rounding) comes from bit mask. */
12741 neon_three_same (neon_quad (rs), 0, et.size);
12742 }
12743 }
12744
12745 static void
12746 do_neon_fcmp_absolute (void)
12747 {
12748 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12749 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12750 /* Size field comes from bit mask. */
12751 neon_three_same (neon_quad (rs), 1, -1);
12752 }
12753
12754 static void
12755 do_neon_fcmp_absolute_inv (void)
12756 {
12757 neon_exchange_operands ();
12758 do_neon_fcmp_absolute ();
12759 }
12760
12761 static void
12762 do_neon_step (void)
12763 {
12764 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12765 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12766 neon_three_same (neon_quad (rs), 0, -1);
12767 }
12768
12769 static void
12770 do_neon_abs_neg (void)
12771 {
12772 enum neon_shape rs;
12773 struct neon_type_el et;
12774
12775 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12776 return;
12777
12778 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12779 return;
12780
12781 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12782 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12783
12784 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12785 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12786 inst.instruction |= LOW4 (inst.operands[1].reg);
12787 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12788 inst.instruction |= neon_quad (rs) << 6;
12789 inst.instruction |= (et.type == NT_float) << 10;
12790 inst.instruction |= neon_logbits (et.size) << 18;
12791
12792 inst.instruction = neon_dp_fixup (inst.instruction);
12793 }
12794
12795 static void
12796 do_neon_sli (void)
12797 {
12798 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12799 struct neon_type_el et = neon_check_type (2, rs,
12800 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12801 int imm = inst.operands[2].imm;
12802 constraint (imm < 0 || (unsigned)imm >= et.size,
12803 _("immediate out of range for insert"));
12804 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12805 }
12806
12807 static void
12808 do_neon_sri (void)
12809 {
12810 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12811 struct neon_type_el et = neon_check_type (2, rs,
12812 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12813 int imm = inst.operands[2].imm;
12814 constraint (imm < 1 || (unsigned)imm > et.size,
12815 _("immediate out of range for insert"));
12816 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12817 }
12818
12819 static void
12820 do_neon_qshlu_imm (void)
12821 {
12822 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12823 struct neon_type_el et = neon_check_type (2, rs,
12824 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12825 int imm = inst.operands[2].imm;
12826 constraint (imm < 0 || (unsigned)imm >= et.size,
12827 _("immediate out of range for shift"));
12828 /* Only encodes the 'U present' variant of the instruction.
12829 In this case, signed types have OP (bit 8) set to 0.
12830 Unsigned types have OP set to 1. */
12831 inst.instruction |= (et.type == NT_unsigned) << 8;
12832 /* The rest of the bits are the same as other immediate shifts. */
12833 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12834 }
12835
12836 static void
12837 do_neon_qmovn (void)
12838 {
12839 struct neon_type_el et = neon_check_type (2, NS_DQ,
12840 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12841 /* Saturating move where operands can be signed or unsigned, and the
12842 destination has the same signedness. */
12843 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12844 if (et.type == NT_unsigned)
12845 inst.instruction |= 0xc0;
12846 else
12847 inst.instruction |= 0x80;
12848 neon_two_same (0, 1, et.size / 2);
12849 }
12850
12851 static void
12852 do_neon_qmovun (void)
12853 {
12854 struct neon_type_el et = neon_check_type (2, NS_DQ,
12855 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12856 /* Saturating move with unsigned results. Operands must be signed. */
12857 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12858 neon_two_same (0, 1, et.size / 2);
12859 }
12860
12861 static void
12862 do_neon_rshift_sat_narrow (void)
12863 {
12864 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12865 or unsigned. If operands are unsigned, results must also be unsigned. */
12866 struct neon_type_el et = neon_check_type (2, NS_DQI,
12867 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12868 int imm = inst.operands[2].imm;
12869 /* This gets the bounds check, size encoding and immediate bits calculation
12870 right. */
12871 et.size /= 2;
12872
12873 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12874 VQMOVN.I<size> <Dd>, <Qm>. */
12875 if (imm == 0)
12876 {
12877 inst.operands[2].present = 0;
12878 inst.instruction = N_MNEM_vqmovn;
12879 do_neon_qmovn ();
12880 return;
12881 }
12882
12883 constraint (imm < 1 || (unsigned)imm > et.size,
12884 _("immediate out of range"));
12885 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12886 }
12887
12888 static void
12889 do_neon_rshift_sat_narrow_u (void)
12890 {
12891 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12892 or unsigned. If operands are unsigned, results must also be unsigned. */
12893 struct neon_type_el et = neon_check_type (2, NS_DQI,
12894 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12895 int imm = inst.operands[2].imm;
12896 /* This gets the bounds check, size encoding and immediate bits calculation
12897 right. */
12898 et.size /= 2;
12899
12900 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12901 VQMOVUN.I<size> <Dd>, <Qm>. */
12902 if (imm == 0)
12903 {
12904 inst.operands[2].present = 0;
12905 inst.instruction = N_MNEM_vqmovun;
12906 do_neon_qmovun ();
12907 return;
12908 }
12909
12910 constraint (imm < 1 || (unsigned)imm > et.size,
12911 _("immediate out of range"));
12912 /* FIXME: The manual is kind of unclear about what value U should have in
12913 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12914 must be 1. */
12915 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12916 }
12917
12918 static void
12919 do_neon_movn (void)
12920 {
12921 struct neon_type_el et = neon_check_type (2, NS_DQ,
12922 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12923 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12924 neon_two_same (0, 1, et.size / 2);
12925 }
12926
12927 static void
12928 do_neon_rshift_narrow (void)
12929 {
12930 struct neon_type_el et = neon_check_type (2, NS_DQI,
12931 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12932 int imm = inst.operands[2].imm;
12933 /* This gets the bounds check, size encoding and immediate bits calculation
12934 right. */
12935 et.size /= 2;
12936
12937 /* If immediate is zero then we are a pseudo-instruction for
12938 VMOVN.I<size> <Dd>, <Qm> */
12939 if (imm == 0)
12940 {
12941 inst.operands[2].present = 0;
12942 inst.instruction = N_MNEM_vmovn;
12943 do_neon_movn ();
12944 return;
12945 }
12946
12947 constraint (imm < 1 || (unsigned)imm > et.size,
12948 _("immediate out of range for narrowing operation"));
12949 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12950 }
12951
12952 static void
12953 do_neon_shll (void)
12954 {
12955 /* FIXME: Type checking when lengthening. */
12956 struct neon_type_el et = neon_check_type (2, NS_QDI,
12957 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12958 unsigned imm = inst.operands[2].imm;
12959
12960 if (imm == et.size)
12961 {
12962 /* Maximum shift variant. */
12963 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12964 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12965 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12966 inst.instruction |= LOW4 (inst.operands[1].reg);
12967 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12968 inst.instruction |= neon_logbits (et.size) << 18;
12969
12970 inst.instruction = neon_dp_fixup (inst.instruction);
12971 }
12972 else
12973 {
12974 /* A more-specific type check for non-max versions. */
12975 et = neon_check_type (2, NS_QDI,
12976 N_EQK | N_DBL, N_SU_32 | N_KEY);
12977 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12978 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12979 }
12980 }
12981
12982 /* Check the various types for the VCVT instruction, and return which version
12983 the current instruction is. */
12984
12985 static int
12986 neon_cvt_flavour (enum neon_shape rs)
12987 {
12988 #define CVT_VAR(C,X,Y) \
12989 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12990 if (et.type != NT_invtype) \
12991 { \
12992 inst.error = NULL; \
12993 return (C); \
12994 }
12995 struct neon_type_el et;
12996 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12997 || rs == NS_FF) ? N_VFP : 0;
12998 /* The instruction versions which take an immediate take one register
12999 argument, which is extended to the width of the full register. Thus the
13000 "source" and "destination" registers must have the same width. Hack that
13001 here by making the size equal to the key (wider, in this case) operand. */
13002 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13003
13004 CVT_VAR (0, N_S32, N_F32);
13005 CVT_VAR (1, N_U32, N_F32);
13006 CVT_VAR (2, N_F32, N_S32);
13007 CVT_VAR (3, N_F32, N_U32);
13008 /* Half-precision conversions. */
13009 CVT_VAR (4, N_F32, N_F16);
13010 CVT_VAR (5, N_F16, N_F32);
13011
13012 whole_reg = N_VFP;
13013
13014 /* VFP instructions. */
13015 CVT_VAR (6, N_F32, N_F64);
13016 CVT_VAR (7, N_F64, N_F32);
13017 CVT_VAR (8, N_S32, N_F64 | key);
13018 CVT_VAR (9, N_U32, N_F64 | key);
13019 CVT_VAR (10, N_F64 | key, N_S32);
13020 CVT_VAR (11, N_F64 | key, N_U32);
13021 /* VFP instructions with bitshift. */
13022 CVT_VAR (12, N_F32 | key, N_S16);
13023 CVT_VAR (13, N_F32 | key, N_U16);
13024 CVT_VAR (14, N_F64 | key, N_S16);
13025 CVT_VAR (15, N_F64 | key, N_U16);
13026 CVT_VAR (16, N_S16, N_F32 | key);
13027 CVT_VAR (17, N_U16, N_F32 | key);
13028 CVT_VAR (18, N_S16, N_F64 | key);
13029 CVT_VAR (19, N_U16, N_F64 | key);
13030
13031 return -1;
13032 #undef CVT_VAR
13033 }
13034
13035 /* Neon-syntax VFP conversions. */
13036
13037 static void
13038 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13039 {
13040 const char *opname = 0;
13041
13042 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13043 {
13044 /* Conversions with immediate bitshift. */
13045 const char *enc[] =
13046 {
13047 "ftosls",
13048 "ftouls",
13049 "fsltos",
13050 "fultos",
13051 NULL,
13052 NULL,
13053 NULL,
13054 NULL,
13055 "ftosld",
13056 "ftould",
13057 "fsltod",
13058 "fultod",
13059 "fshtos",
13060 "fuhtos",
13061 "fshtod",
13062 "fuhtod",
13063 "ftoshs",
13064 "ftouhs",
13065 "ftoshd",
13066 "ftouhd"
13067 };
13068
13069 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13070 {
13071 opname = enc[flavour];
13072 constraint (inst.operands[0].reg != inst.operands[1].reg,
13073 _("operands 0 and 1 must be the same register"));
13074 inst.operands[1] = inst.operands[2];
13075 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13076 }
13077 }
13078 else
13079 {
13080 /* Conversions without bitshift. */
13081 const char *enc[] =
13082 {
13083 "ftosis",
13084 "ftouis",
13085 "fsitos",
13086 "fuitos",
13087 "NULL",
13088 "NULL",
13089 "fcvtsd",
13090 "fcvtds",
13091 "ftosid",
13092 "ftouid",
13093 "fsitod",
13094 "fuitod"
13095 };
13096
13097 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13098 opname = enc[flavour];
13099 }
13100
13101 if (opname)
13102 do_vfp_nsyn_opcode (opname);
13103 }
13104
13105 static void
13106 do_vfp_nsyn_cvtz (void)
13107 {
13108 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13109 int flavour = neon_cvt_flavour (rs);
13110 const char *enc[] =
13111 {
13112 "ftosizs",
13113 "ftouizs",
13114 NULL,
13115 NULL,
13116 NULL,
13117 NULL,
13118 NULL,
13119 NULL,
13120 "ftosizd",
13121 "ftouizd"
13122 };
13123
13124 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13125 do_vfp_nsyn_opcode (enc[flavour]);
13126 }
13127
13128 static void
13129 do_neon_cvt (void)
13130 {
13131 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13132 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13133 int flavour = neon_cvt_flavour (rs);
13134
13135 /* VFP rather than Neon conversions. */
13136 if (flavour >= 6)
13137 {
13138 do_vfp_nsyn_cvt (rs, flavour);
13139 return;
13140 }
13141
13142 switch (rs)
13143 {
13144 case NS_DDI:
13145 case NS_QQI:
13146 {
13147 unsigned immbits;
13148 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13149
13150 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13151 return;
13152
13153 /* Fixed-point conversion with #0 immediate is encoded as an
13154 integer conversion. */
13155 if (inst.operands[2].present && inst.operands[2].imm == 0)
13156 goto int_encode;
13157 immbits = 32 - inst.operands[2].imm;
13158 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13159 if (flavour != -1)
13160 inst.instruction |= enctab[flavour];
13161 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13162 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13163 inst.instruction |= LOW4 (inst.operands[1].reg);
13164 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13165 inst.instruction |= neon_quad (rs) << 6;
13166 inst.instruction |= 1 << 21;
13167 inst.instruction |= immbits << 16;
13168
13169 inst.instruction = neon_dp_fixup (inst.instruction);
13170 }
13171 break;
13172
13173 case NS_DD:
13174 case NS_QQ:
13175 int_encode:
13176 {
13177 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13178
13179 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13180
13181 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13182 return;
13183
13184 if (flavour != -1)
13185 inst.instruction |= enctab[flavour];
13186
13187 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13188 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13189 inst.instruction |= LOW4 (inst.operands[1].reg);
13190 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13191 inst.instruction |= neon_quad (rs) << 6;
13192 inst.instruction |= 2 << 18;
13193
13194 inst.instruction = neon_dp_fixup (inst.instruction);
13195 }
13196 break;
13197
13198 /* Half-precision conversions for Advanced SIMD -- neon. */
13199 case NS_QD:
13200 case NS_DQ:
13201
13202 if ((rs == NS_DQ)
13203 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13204 {
13205 as_bad (_("operand size must match register width"));
13206 break;
13207 }
13208
13209 if ((rs == NS_QD)
13210 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13211 {
13212 as_bad (_("operand size must match register width"));
13213 break;
13214 }
13215
13216 if (rs == NS_DQ)
13217 inst.instruction = 0x3b60600;
13218 else
13219 inst.instruction = 0x3b60700;
13220
13221 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13222 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13223 inst.instruction |= LOW4 (inst.operands[1].reg);
13224 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13225 inst.instruction = neon_dp_fixup (inst.instruction);
13226 break;
13227
13228 default:
13229 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13230 do_vfp_nsyn_cvt (rs, flavour);
13231 }
13232 }
13233
13234 static void
13235 do_neon_cvtb (void)
13236 {
13237 inst.instruction = 0xeb20a40;
13238
13239 /* The sizes are attached to the mnemonic. */
13240 if (inst.vectype.el[0].type != NT_invtype
13241 && inst.vectype.el[0].size == 16)
13242 inst.instruction |= 0x00010000;
13243
13244 /* Programmer's syntax: the sizes are attached to the operands. */
13245 else if (inst.operands[0].vectype.type != NT_invtype
13246 && inst.operands[0].vectype.size == 16)
13247 inst.instruction |= 0x00010000;
13248
13249 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13250 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13251 do_vfp_cond_or_thumb ();
13252 }
13253
13254
13255 static void
13256 do_neon_cvtt (void)
13257 {
13258 do_neon_cvtb ();
13259 inst.instruction |= 0x80;
13260 }
13261
13262 static void
13263 neon_move_immediate (void)
13264 {
13265 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13266 struct neon_type_el et = neon_check_type (2, rs,
13267 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13268 unsigned immlo, immhi = 0, immbits;
13269 int op, cmode, float_p;
13270
13271 constraint (et.type == NT_invtype,
13272 _("operand size must be specified for immediate VMOV"));
13273
13274 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13275 op = (inst.instruction & (1 << 5)) != 0;
13276
13277 immlo = inst.operands[1].imm;
13278 if (inst.operands[1].regisimm)
13279 immhi = inst.operands[1].reg;
13280
13281 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13282 _("immediate has bits set outside the operand size"));
13283
13284 float_p = inst.operands[1].immisfloat;
13285
13286 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13287 et.size, et.type)) == FAIL)
13288 {
13289 /* Invert relevant bits only. */
13290 neon_invert_size (&immlo, &immhi, et.size);
13291 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13292 with one or the other; those cases are caught by
13293 neon_cmode_for_move_imm. */
13294 op = !op;
13295 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13296 &op, et.size, et.type)) == FAIL)
13297 {
13298 first_error (_("immediate out of range"));
13299 return;
13300 }
13301 }
13302
13303 inst.instruction &= ~(1 << 5);
13304 inst.instruction |= op << 5;
13305
13306 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13307 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13308 inst.instruction |= neon_quad (rs) << 6;
13309 inst.instruction |= cmode << 8;
13310
13311 neon_write_immbits (immbits);
13312 }
13313
13314 static void
13315 do_neon_mvn (void)
13316 {
13317 if (inst.operands[1].isreg)
13318 {
13319 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13320
13321 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13322 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13323 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13324 inst.instruction |= LOW4 (inst.operands[1].reg);
13325 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13326 inst.instruction |= neon_quad (rs) << 6;
13327 }
13328 else
13329 {
13330 inst.instruction = NEON_ENC_IMMED (inst.instruction);
13331 neon_move_immediate ();
13332 }
13333
13334 inst.instruction = neon_dp_fixup (inst.instruction);
13335 }
13336
13337 /* Encode instructions of form:
13338
13339 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13340 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13341
13342 static void
13343 neon_mixed_length (struct neon_type_el et, unsigned size)
13344 {
13345 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13346 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13347 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13348 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13349 inst.instruction |= LOW4 (inst.operands[2].reg);
13350 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13351 inst.instruction |= (et.type == NT_unsigned) << 24;
13352 inst.instruction |= neon_logbits (size) << 20;
13353
13354 inst.instruction = neon_dp_fixup (inst.instruction);
13355 }
13356
13357 static void
13358 do_neon_dyadic_long (void)
13359 {
13360 /* FIXME: Type checking for lengthening op. */
13361 struct neon_type_el et = neon_check_type (3, NS_QDD,
13362 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
13363 neon_mixed_length (et, et.size);
13364 }
13365
13366 static void
13367 do_neon_abal (void)
13368 {
13369 struct neon_type_el et = neon_check_type (3, NS_QDD,
13370 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
13371 neon_mixed_length (et, et.size);
13372 }
13373
13374 static void
13375 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
13376 {
13377 if (inst.operands[2].isscalar)
13378 {
13379 struct neon_type_el et = neon_check_type (3, NS_QDS,
13380 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
13381 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13382 neon_mul_mac (et, et.type == NT_unsigned);
13383 }
13384 else
13385 {
13386 struct neon_type_el et = neon_check_type (3, NS_QDD,
13387 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
13388 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13389 neon_mixed_length (et, et.size);
13390 }
13391 }
13392
13393 static void
13394 do_neon_mac_maybe_scalar_long (void)
13395 {
13396 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
13397 }
13398
13399 static void
13400 do_neon_dyadic_wide (void)
13401 {
13402 struct neon_type_el et = neon_check_type (3, NS_QQD,
13403 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
13404 neon_mixed_length (et, et.size);
13405 }
13406
13407 static void
13408 do_neon_dyadic_narrow (void)
13409 {
13410 struct neon_type_el et = neon_check_type (3, NS_QDD,
13411 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13412 /* Operand sign is unimportant, and the U bit is part of the opcode,
13413 so force the operand type to integer. */
13414 et.type = NT_integer;
13415 neon_mixed_length (et, et.size / 2);
13416 }
13417
13418 static void
13419 do_neon_mul_sat_scalar_long (void)
13420 {
13421 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13422 }
13423
13424 static void
13425 do_neon_vmull (void)
13426 {
13427 if (inst.operands[2].isscalar)
13428 do_neon_mac_maybe_scalar_long ();
13429 else
13430 {
13431 struct neon_type_el et = neon_check_type (3, NS_QDD,
13432 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13433 if (et.type == NT_poly)
13434 inst.instruction = NEON_ENC_POLY (inst.instruction);
13435 else
13436 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13437 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13438 zero. Should be OK as-is. */
13439 neon_mixed_length (et, et.size);
13440 }
13441 }
13442
13443 static void
13444 do_neon_ext (void)
13445 {
13446 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
13447 struct neon_type_el et = neon_check_type (3, rs,
13448 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13449 unsigned imm = (inst.operands[3].imm * et.size) / 8;
13450
13451 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
13452 _("shift out of range"));
13453 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13454 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13455 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13456 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13457 inst.instruction |= LOW4 (inst.operands[2].reg);
13458 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13459 inst.instruction |= neon_quad (rs) << 6;
13460 inst.instruction |= imm << 8;
13461
13462 inst.instruction = neon_dp_fixup (inst.instruction);
13463 }
13464
13465 static void
13466 do_neon_rev (void)
13467 {
13468 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13469 struct neon_type_el et = neon_check_type (2, rs,
13470 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13471 unsigned op = (inst.instruction >> 7) & 3;
13472 /* N (width of reversed regions) is encoded as part of the bitmask. We
13473 extract it here to check the elements to be reversed are smaller.
13474 Otherwise we'd get a reserved instruction. */
13475 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
13476 assert (elsize != 0);
13477 constraint (et.size >= elsize,
13478 _("elements must be smaller than reversal region"));
13479 neon_two_same (neon_quad (rs), 1, et.size);
13480 }
13481
13482 static void
13483 do_neon_dup (void)
13484 {
13485 if (inst.operands[1].isscalar)
13486 {
13487 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
13488 struct neon_type_el et = neon_check_type (2, rs,
13489 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13490 unsigned sizebits = et.size >> 3;
13491 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
13492 int logsize = neon_logbits (et.size);
13493 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
13494
13495 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
13496 return;
13497
13498 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
13499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13501 inst.instruction |= LOW4 (dm);
13502 inst.instruction |= HI1 (dm) << 5;
13503 inst.instruction |= neon_quad (rs) << 6;
13504 inst.instruction |= x << 17;
13505 inst.instruction |= sizebits << 16;
13506
13507 inst.instruction = neon_dp_fixup (inst.instruction);
13508 }
13509 else
13510 {
13511 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
13512 struct neon_type_el et = neon_check_type (2, rs,
13513 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13514 /* Duplicate ARM register to lanes of vector. */
13515 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
13516 switch (et.size)
13517 {
13518 case 8: inst.instruction |= 0x400000; break;
13519 case 16: inst.instruction |= 0x000020; break;
13520 case 32: inst.instruction |= 0x000000; break;
13521 default: break;
13522 }
13523 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13524 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
13525 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
13526 inst.instruction |= neon_quad (rs) << 21;
13527 /* The encoding for this instruction is identical for the ARM and Thumb
13528 variants, except for the condition field. */
13529 do_vfp_cond_or_thumb ();
13530 }
13531 }
13532
13533 /* VMOV has particularly many variations. It can be one of:
13534 0. VMOV<c><q> <Qd>, <Qm>
13535 1. VMOV<c><q> <Dd>, <Dm>
13536 (Register operations, which are VORR with Rm = Rn.)
13537 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13538 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13539 (Immediate loads.)
13540 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13541 (ARM register to scalar.)
13542 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13543 (Two ARM registers to vector.)
13544 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13545 (Scalar to ARM register.)
13546 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13547 (Vector to two ARM registers.)
13548 8. VMOV.F32 <Sd>, <Sm>
13549 9. VMOV.F64 <Dd>, <Dm>
13550 (VFP register moves.)
13551 10. VMOV.F32 <Sd>, #imm
13552 11. VMOV.F64 <Dd>, #imm
13553 (VFP float immediate load.)
13554 12. VMOV <Rd>, <Sm>
13555 (VFP single to ARM reg.)
13556 13. VMOV <Sd>, <Rm>
13557 (ARM reg to VFP single.)
13558 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13559 (Two ARM regs to two VFP singles.)
13560 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13561 (Two VFP singles to two ARM regs.)
13562
13563 These cases can be disambiguated using neon_select_shape, except cases 1/9
13564 and 3/11 which depend on the operand type too.
13565
13566 All the encoded bits are hardcoded by this function.
13567
13568 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13569 Cases 5, 7 may be used with VFPv2 and above.
13570
13571 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13572 can specify a type where it doesn't make sense to, and is ignored). */
13573
13574 static void
13575 do_neon_mov (void)
13576 {
13577 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
13578 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
13579 NS_NULL);
13580 struct neon_type_el et;
13581 const char *ldconst = 0;
13582
13583 switch (rs)
13584 {
13585 case NS_DD: /* case 1/9. */
13586 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13587 /* It is not an error here if no type is given. */
13588 inst.error = NULL;
13589 if (et.type == NT_float && et.size == 64)
13590 {
13591 do_vfp_nsyn_opcode ("fcpyd");
13592 break;
13593 }
13594 /* fall through. */
13595
13596 case NS_QQ: /* case 0/1. */
13597 {
13598 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13599 return;
13600 /* The architecture manual I have doesn't explicitly state which
13601 value the U bit should have for register->register moves, but
13602 the equivalent VORR instruction has U = 0, so do that. */
13603 inst.instruction = 0x0200110;
13604 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13605 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13606 inst.instruction |= LOW4 (inst.operands[1].reg);
13607 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13608 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13609 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13610 inst.instruction |= neon_quad (rs) << 6;
13611
13612 inst.instruction = neon_dp_fixup (inst.instruction);
13613 }
13614 break;
13615
13616 case NS_DI: /* case 3/11. */
13617 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13618 inst.error = NULL;
13619 if (et.type == NT_float && et.size == 64)
13620 {
13621 /* case 11 (fconstd). */
13622 ldconst = "fconstd";
13623 goto encode_fconstd;
13624 }
13625 /* fall through. */
13626
13627 case NS_QI: /* case 2/3. */
13628 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13629 return;
13630 inst.instruction = 0x0800010;
13631 neon_move_immediate ();
13632 inst.instruction = neon_dp_fixup (inst.instruction);
13633 break;
13634
13635 case NS_SR: /* case 4. */
13636 {
13637 unsigned bcdebits = 0;
13638 struct neon_type_el et = neon_check_type (2, NS_NULL,
13639 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13640 int logsize = neon_logbits (et.size);
13641 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13642 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13643
13644 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13645 _(BAD_FPU));
13646 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13647 && et.size != 32, _(BAD_FPU));
13648 constraint (et.type == NT_invtype, _("bad type for scalar"));
13649 constraint (x >= 64 / et.size, _("scalar index out of range"));
13650
13651 switch (et.size)
13652 {
13653 case 8: bcdebits = 0x8; break;
13654 case 16: bcdebits = 0x1; break;
13655 case 32: bcdebits = 0x0; break;
13656 default: ;
13657 }
13658
13659 bcdebits |= x << logsize;
13660
13661 inst.instruction = 0xe000b10;
13662 do_vfp_cond_or_thumb ();
13663 inst.instruction |= LOW4 (dn) << 16;
13664 inst.instruction |= HI1 (dn) << 7;
13665 inst.instruction |= inst.operands[1].reg << 12;
13666 inst.instruction |= (bcdebits & 3) << 5;
13667 inst.instruction |= (bcdebits >> 2) << 21;
13668 }
13669 break;
13670
13671 case NS_DRR: /* case 5 (fmdrr). */
13672 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13673 _(BAD_FPU));
13674
13675 inst.instruction = 0xc400b10;
13676 do_vfp_cond_or_thumb ();
13677 inst.instruction |= LOW4 (inst.operands[0].reg);
13678 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13679 inst.instruction |= inst.operands[1].reg << 12;
13680 inst.instruction |= inst.operands[2].reg << 16;
13681 break;
13682
13683 case NS_RS: /* case 6. */
13684 {
13685 struct neon_type_el et = neon_check_type (2, NS_NULL,
13686 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13687 unsigned logsize = neon_logbits (et.size);
13688 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13689 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13690 unsigned abcdebits = 0;
13691
13692 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13693 _(BAD_FPU));
13694 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13695 && et.size != 32, _(BAD_FPU));
13696 constraint (et.type == NT_invtype, _("bad type for scalar"));
13697 constraint (x >= 64 / et.size, _("scalar index out of range"));
13698
13699 switch (et.size)
13700 {
13701 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13702 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13703 case 32: abcdebits = 0x00; break;
13704 default: ;
13705 }
13706
13707 abcdebits |= x << logsize;
13708 inst.instruction = 0xe100b10;
13709 do_vfp_cond_or_thumb ();
13710 inst.instruction |= LOW4 (dn) << 16;
13711 inst.instruction |= HI1 (dn) << 7;
13712 inst.instruction |= inst.operands[0].reg << 12;
13713 inst.instruction |= (abcdebits & 3) << 5;
13714 inst.instruction |= (abcdebits >> 2) << 21;
13715 }
13716 break;
13717
13718 case NS_RRD: /* case 7 (fmrrd). */
13719 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13720 _(BAD_FPU));
13721
13722 inst.instruction = 0xc500b10;
13723 do_vfp_cond_or_thumb ();
13724 inst.instruction |= inst.operands[0].reg << 12;
13725 inst.instruction |= inst.operands[1].reg << 16;
13726 inst.instruction |= LOW4 (inst.operands[2].reg);
13727 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13728 break;
13729
13730 case NS_FF: /* case 8 (fcpys). */
13731 do_vfp_nsyn_opcode ("fcpys");
13732 break;
13733
13734 case NS_FI: /* case 10 (fconsts). */
13735 ldconst = "fconsts";
13736 encode_fconstd:
13737 if (is_quarter_float (inst.operands[1].imm))
13738 {
13739 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13740 do_vfp_nsyn_opcode (ldconst);
13741 }
13742 else
13743 first_error (_("immediate out of range"));
13744 break;
13745
13746 case NS_RF: /* case 12 (fmrs). */
13747 do_vfp_nsyn_opcode ("fmrs");
13748 break;
13749
13750 case NS_FR: /* case 13 (fmsr). */
13751 do_vfp_nsyn_opcode ("fmsr");
13752 break;
13753
13754 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13755 (one of which is a list), but we have parsed four. Do some fiddling to
13756 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13757 expect. */
13758 case NS_RRFF: /* case 14 (fmrrs). */
13759 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13760 _("VFP registers must be adjacent"));
13761 inst.operands[2].imm = 2;
13762 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13763 do_vfp_nsyn_opcode ("fmrrs");
13764 break;
13765
13766 case NS_FFRR: /* case 15 (fmsrr). */
13767 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13768 _("VFP registers must be adjacent"));
13769 inst.operands[1] = inst.operands[2];
13770 inst.operands[2] = inst.operands[3];
13771 inst.operands[0].imm = 2;
13772 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13773 do_vfp_nsyn_opcode ("fmsrr");
13774 break;
13775
13776 default:
13777 abort ();
13778 }
13779 }
13780
13781 static void
13782 do_neon_rshift_round_imm (void)
13783 {
13784 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13785 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13786 int imm = inst.operands[2].imm;
13787
13788 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13789 if (imm == 0)
13790 {
13791 inst.operands[2].present = 0;
13792 do_neon_mov ();
13793 return;
13794 }
13795
13796 constraint (imm < 1 || (unsigned)imm > et.size,
13797 _("immediate out of range for shift"));
13798 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13799 et.size - imm);
13800 }
13801
13802 static void
13803 do_neon_movl (void)
13804 {
13805 struct neon_type_el et = neon_check_type (2, NS_QD,
13806 N_EQK | N_DBL, N_SU_32 | N_KEY);
13807 unsigned sizebits = et.size >> 3;
13808 inst.instruction |= sizebits << 19;
13809 neon_two_same (0, et.type == NT_unsigned, -1);
13810 }
13811
13812 static void
13813 do_neon_trn (void)
13814 {
13815 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13816 struct neon_type_el et = neon_check_type (2, rs,
13817 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13818 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13819 neon_two_same (neon_quad (rs), 1, et.size);
13820 }
13821
13822 static void
13823 do_neon_zip_uzp (void)
13824 {
13825 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13826 struct neon_type_el et = neon_check_type (2, rs,
13827 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13828 if (rs == NS_DD && et.size == 32)
13829 {
13830 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13831 inst.instruction = N_MNEM_vtrn;
13832 do_neon_trn ();
13833 return;
13834 }
13835 neon_two_same (neon_quad (rs), 1, et.size);
13836 }
13837
13838 static void
13839 do_neon_sat_abs_neg (void)
13840 {
13841 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13842 struct neon_type_el et = neon_check_type (2, rs,
13843 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13844 neon_two_same (neon_quad (rs), 1, et.size);
13845 }
13846
13847 static void
13848 do_neon_pair_long (void)
13849 {
13850 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13851 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13852 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13853 inst.instruction |= (et.type == NT_unsigned) << 7;
13854 neon_two_same (neon_quad (rs), 1, et.size);
13855 }
13856
13857 static void
13858 do_neon_recip_est (void)
13859 {
13860 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13861 struct neon_type_el et = neon_check_type (2, rs,
13862 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13863 inst.instruction |= (et.type == NT_float) << 8;
13864 neon_two_same (neon_quad (rs), 1, et.size);
13865 }
13866
13867 static void
13868 do_neon_cls (void)
13869 {
13870 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13871 struct neon_type_el et = neon_check_type (2, rs,
13872 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13873 neon_two_same (neon_quad (rs), 1, et.size);
13874 }
13875
13876 static void
13877 do_neon_clz (void)
13878 {
13879 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13880 struct neon_type_el et = neon_check_type (2, rs,
13881 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13882 neon_two_same (neon_quad (rs), 1, et.size);
13883 }
13884
13885 static void
13886 do_neon_cnt (void)
13887 {
13888 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13889 struct neon_type_el et = neon_check_type (2, rs,
13890 N_EQK | N_INT, N_8 | N_KEY);
13891 neon_two_same (neon_quad (rs), 1, et.size);
13892 }
13893
13894 static void
13895 do_neon_swp (void)
13896 {
13897 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13898 neon_two_same (neon_quad (rs), 1, -1);
13899 }
13900
13901 static void
13902 do_neon_tbl_tbx (void)
13903 {
13904 unsigned listlenbits;
13905 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13906
13907 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13908 {
13909 first_error (_("bad list length for table lookup"));
13910 return;
13911 }
13912
13913 listlenbits = inst.operands[1].imm - 1;
13914 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13915 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13916 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13917 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13918 inst.instruction |= LOW4 (inst.operands[2].reg);
13919 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13920 inst.instruction |= listlenbits << 8;
13921
13922 inst.instruction = neon_dp_fixup (inst.instruction);
13923 }
13924
13925 static void
13926 do_neon_ldm_stm (void)
13927 {
13928 /* P, U and L bits are part of bitmask. */
13929 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13930 unsigned offsetbits = inst.operands[1].imm * 2;
13931
13932 if (inst.operands[1].issingle)
13933 {
13934 do_vfp_nsyn_ldm_stm (is_dbmode);
13935 return;
13936 }
13937
13938 constraint (is_dbmode && !inst.operands[0].writeback,
13939 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13940
13941 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13942 _("register list must contain at least 1 and at most 16 "
13943 "registers"));
13944
13945 inst.instruction |= inst.operands[0].reg << 16;
13946 inst.instruction |= inst.operands[0].writeback << 21;
13947 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13948 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13949
13950 inst.instruction |= offsetbits;
13951
13952 do_vfp_cond_or_thumb ();
13953 }
13954
13955 static void
13956 do_neon_ldr_str (void)
13957 {
13958 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13959
13960 if (inst.operands[0].issingle)
13961 {
13962 if (is_ldr)
13963 do_vfp_nsyn_opcode ("flds");
13964 else
13965 do_vfp_nsyn_opcode ("fsts");
13966 }
13967 else
13968 {
13969 if (is_ldr)
13970 do_vfp_nsyn_opcode ("fldd");
13971 else
13972 do_vfp_nsyn_opcode ("fstd");
13973 }
13974 }
13975
13976 /* "interleave" version also handles non-interleaving register VLD1/VST1
13977 instructions. */
13978
13979 static void
13980 do_neon_ld_st_interleave (void)
13981 {
13982 struct neon_type_el et = neon_check_type (1, NS_NULL,
13983 N_8 | N_16 | N_32 | N_64);
13984 unsigned alignbits = 0;
13985 unsigned idx;
13986 /* The bits in this table go:
13987 0: register stride of one (0) or two (1)
13988 1,2: register list length, minus one (1, 2, 3, 4).
13989 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13990 We use -1 for invalid entries. */
13991 const int typetable[] =
13992 {
13993 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13994 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13995 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13996 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13997 };
13998 int typebits;
13999
14000 if (et.type == NT_invtype)
14001 return;
14002
14003 if (inst.operands[1].immisalign)
14004 switch (inst.operands[1].imm >> 8)
14005 {
14006 case 64: alignbits = 1; break;
14007 case 128:
14008 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14009 goto bad_alignment;
14010 alignbits = 2;
14011 break;
14012 case 256:
14013 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14014 goto bad_alignment;
14015 alignbits = 3;
14016 break;
14017 default:
14018 bad_alignment:
14019 first_error (_("bad alignment"));
14020 return;
14021 }
14022
14023 inst.instruction |= alignbits << 4;
14024 inst.instruction |= neon_logbits (et.size) << 6;
14025
14026 /* Bits [4:6] of the immediate in a list specifier encode register stride
14027 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14028 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14029 up the right value for "type" in a table based on this value and the given
14030 list style, then stick it back. */
14031 idx = ((inst.operands[0].imm >> 4) & 7)
14032 | (((inst.instruction >> 8) & 3) << 3);
14033
14034 typebits = typetable[idx];
14035
14036 constraint (typebits == -1, _("bad list type for instruction"));
14037
14038 inst.instruction &= ~0xf00;
14039 inst.instruction |= typebits << 8;
14040 }
14041
14042 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14043 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14044 otherwise. The variable arguments are a list of pairs of legal (size, align)
14045 values, terminated with -1. */
14046
14047 static int
14048 neon_alignment_bit (int size, int align, int *do_align, ...)
14049 {
14050 va_list ap;
14051 int result = FAIL, thissize, thisalign;
14052
14053 if (!inst.operands[1].immisalign)
14054 {
14055 *do_align = 0;
14056 return SUCCESS;
14057 }
14058
14059 va_start (ap, do_align);
14060
14061 do
14062 {
14063 thissize = va_arg (ap, int);
14064 if (thissize == -1)
14065 break;
14066 thisalign = va_arg (ap, int);
14067
14068 if (size == thissize && align == thisalign)
14069 result = SUCCESS;
14070 }
14071 while (result != SUCCESS);
14072
14073 va_end (ap);
14074
14075 if (result == SUCCESS)
14076 *do_align = 1;
14077 else
14078 first_error (_("unsupported alignment for instruction"));
14079
14080 return result;
14081 }
14082
14083 static void
14084 do_neon_ld_st_lane (void)
14085 {
14086 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14087 int align_good, do_align = 0;
14088 int logsize = neon_logbits (et.size);
14089 int align = inst.operands[1].imm >> 8;
14090 int n = (inst.instruction >> 8) & 3;
14091 int max_el = 64 / et.size;
14092
14093 if (et.type == NT_invtype)
14094 return;
14095
14096 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14097 _("bad list length"));
14098 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14099 _("scalar index out of range"));
14100 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14101 && et.size == 8,
14102 _("stride of 2 unavailable when element size is 8"));
14103
14104 switch (n)
14105 {
14106 case 0: /* VLD1 / VST1. */
14107 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14108 32, 32, -1);
14109 if (align_good == FAIL)
14110 return;
14111 if (do_align)
14112 {
14113 unsigned alignbits = 0;
14114 switch (et.size)
14115 {
14116 case 16: alignbits = 0x1; break;
14117 case 32: alignbits = 0x3; break;
14118 default: ;
14119 }
14120 inst.instruction |= alignbits << 4;
14121 }
14122 break;
14123
14124 case 1: /* VLD2 / VST2. */
14125 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14126 32, 64, -1);
14127 if (align_good == FAIL)
14128 return;
14129 if (do_align)
14130 inst.instruction |= 1 << 4;
14131 break;
14132
14133 case 2: /* VLD3 / VST3. */
14134 constraint (inst.operands[1].immisalign,
14135 _("can't use alignment with this instruction"));
14136 break;
14137
14138 case 3: /* VLD4 / VST4. */
14139 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14140 16, 64, 32, 64, 32, 128, -1);
14141 if (align_good == FAIL)
14142 return;
14143 if (do_align)
14144 {
14145 unsigned alignbits = 0;
14146 switch (et.size)
14147 {
14148 case 8: alignbits = 0x1; break;
14149 case 16: alignbits = 0x1; break;
14150 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14151 default: ;
14152 }
14153 inst.instruction |= alignbits << 4;
14154 }
14155 break;
14156
14157 default: ;
14158 }
14159
14160 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14161 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14162 inst.instruction |= 1 << (4 + logsize);
14163
14164 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14165 inst.instruction |= logsize << 10;
14166 }
14167
14168 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14169
14170 static void
14171 do_neon_ld_dup (void)
14172 {
14173 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14174 int align_good, do_align = 0;
14175
14176 if (et.type == NT_invtype)
14177 return;
14178
14179 switch ((inst.instruction >> 8) & 3)
14180 {
14181 case 0: /* VLD1. */
14182 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14183 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14184 &do_align, 16, 16, 32, 32, -1);
14185 if (align_good == FAIL)
14186 return;
14187 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14188 {
14189 case 1: break;
14190 case 2: inst.instruction |= 1 << 5; break;
14191 default: first_error (_("bad list length")); return;
14192 }
14193 inst.instruction |= neon_logbits (et.size) << 6;
14194 break;
14195
14196 case 1: /* VLD2. */
14197 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14198 &do_align, 8, 16, 16, 32, 32, 64, -1);
14199 if (align_good == FAIL)
14200 return;
14201 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14202 _("bad list length"));
14203 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14204 inst.instruction |= 1 << 5;
14205 inst.instruction |= neon_logbits (et.size) << 6;
14206 break;
14207
14208 case 2: /* VLD3. */
14209 constraint (inst.operands[1].immisalign,
14210 _("can't use alignment with this instruction"));
14211 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14212 _("bad list length"));
14213 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14214 inst.instruction |= 1 << 5;
14215 inst.instruction |= neon_logbits (et.size) << 6;
14216 break;
14217
14218 case 3: /* VLD4. */
14219 {
14220 int align = inst.operands[1].imm >> 8;
14221 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14222 16, 64, 32, 64, 32, 128, -1);
14223 if (align_good == FAIL)
14224 return;
14225 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14226 _("bad list length"));
14227 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14228 inst.instruction |= 1 << 5;
14229 if (et.size == 32 && align == 128)
14230 inst.instruction |= 0x3 << 6;
14231 else
14232 inst.instruction |= neon_logbits (et.size) << 6;
14233 }
14234 break;
14235
14236 default: ;
14237 }
14238
14239 inst.instruction |= do_align << 4;
14240 }
14241
14242 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14243 apart from bits [11:4]. */
14244
14245 static void
14246 do_neon_ldx_stx (void)
14247 {
14248 switch (NEON_LANE (inst.operands[0].imm))
14249 {
14250 case NEON_INTERLEAVE_LANES:
14251 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
14252 do_neon_ld_st_interleave ();
14253 break;
14254
14255 case NEON_ALL_LANES:
14256 inst.instruction = NEON_ENC_DUP (inst.instruction);
14257 do_neon_ld_dup ();
14258 break;
14259
14260 default:
14261 inst.instruction = NEON_ENC_LANE (inst.instruction);
14262 do_neon_ld_st_lane ();
14263 }
14264
14265 /* L bit comes from bit mask. */
14266 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14267 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14268 inst.instruction |= inst.operands[1].reg << 16;
14269
14270 if (inst.operands[1].postind)
14271 {
14272 int postreg = inst.operands[1].imm & 0xf;
14273 constraint (!inst.operands[1].immisreg,
14274 _("post-index must be a register"));
14275 constraint (postreg == 0xd || postreg == 0xf,
14276 _("bad register for post-index"));
14277 inst.instruction |= postreg;
14278 }
14279 else if (inst.operands[1].writeback)
14280 {
14281 inst.instruction |= 0xd;
14282 }
14283 else
14284 inst.instruction |= 0xf;
14285
14286 if (thumb_mode)
14287 inst.instruction |= 0xf9000000;
14288 else
14289 inst.instruction |= 0xf4000000;
14290 }
14291 \f
14292 /* Overall per-instruction processing. */
14293
14294 /* We need to be able to fix up arbitrary expressions in some statements.
14295 This is so that we can handle symbols that are an arbitrary distance from
14296 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14297 which returns part of an address in a form which will be valid for
14298 a data instruction. We do this by pushing the expression into a symbol
14299 in the expr_section, and creating a fix for that. */
14300
14301 static void
14302 fix_new_arm (fragS * frag,
14303 int where,
14304 short int size,
14305 expressionS * exp,
14306 int pc_rel,
14307 int reloc)
14308 {
14309 fixS * new_fix;
14310
14311 switch (exp->X_op)
14312 {
14313 case O_constant:
14314 case O_symbol:
14315 case O_add:
14316 case O_subtract:
14317 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
14318 break;
14319
14320 default:
14321 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
14322 pc_rel, reloc);
14323 break;
14324 }
14325
14326 /* Mark whether the fix is to a THUMB instruction, or an ARM
14327 instruction. */
14328 new_fix->tc_fix_data = thumb_mode;
14329 }
14330
14331 /* Create a frg for an instruction requiring relaxation. */
14332 static void
14333 output_relax_insn (void)
14334 {
14335 char * to;
14336 symbolS *sym;
14337 int offset;
14338
14339 /* The size of the instruction is unknown, so tie the debug info to the
14340 start of the instruction. */
14341 dwarf2_emit_insn (0);
14342
14343 switch (inst.reloc.exp.X_op)
14344 {
14345 case O_symbol:
14346 sym = inst.reloc.exp.X_add_symbol;
14347 offset = inst.reloc.exp.X_add_number;
14348 break;
14349 case O_constant:
14350 sym = NULL;
14351 offset = inst.reloc.exp.X_add_number;
14352 break;
14353 default:
14354 sym = make_expr_symbol (&inst.reloc.exp);
14355 offset = 0;
14356 break;
14357 }
14358 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
14359 inst.relax, sym, offset, NULL/*offset, opcode*/);
14360 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
14361 }
14362
14363 /* Write a 32-bit thumb instruction to buf. */
14364 static void
14365 put_thumb32_insn (char * buf, unsigned long insn)
14366 {
14367 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
14368 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
14369 }
14370
14371 static void
14372 output_inst (const char * str)
14373 {
14374 char * to = NULL;
14375
14376 if (inst.error)
14377 {
14378 as_bad ("%s -- `%s'", inst.error, str);
14379 return;
14380 }
14381 if (inst.relax)
14382 {
14383 output_relax_insn ();
14384 return;
14385 }
14386 if (inst.size == 0)
14387 return;
14388
14389 to = frag_more (inst.size);
14390
14391 if (thumb_mode && (inst.size > THUMB_SIZE))
14392 {
14393 assert (inst.size == (2 * THUMB_SIZE));
14394 put_thumb32_insn (to, inst.instruction);
14395 }
14396 else if (inst.size > INSN_SIZE)
14397 {
14398 assert (inst.size == (2 * INSN_SIZE));
14399 md_number_to_chars (to, inst.instruction, INSN_SIZE);
14400 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
14401 }
14402 else
14403 md_number_to_chars (to, inst.instruction, inst.size);
14404
14405 if (inst.reloc.type != BFD_RELOC_UNUSED)
14406 fix_new_arm (frag_now, to - frag_now->fr_literal,
14407 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14408 inst.reloc.type);
14409
14410 dwarf2_emit_insn (inst.size);
14411 }
14412
14413 /* Tag values used in struct asm_opcode's tag field. */
14414 enum opcode_tag
14415 {
14416 OT_unconditional, /* Instruction cannot be conditionalized.
14417 The ARM condition field is still 0xE. */
14418 OT_unconditionalF, /* Instruction cannot be conditionalized
14419 and carries 0xF in its ARM condition field. */
14420 OT_csuffix, /* Instruction takes a conditional suffix. */
14421 OT_csuffixF, /* Some forms of the instruction take a conditional
14422 suffix, others place 0xF where the condition field
14423 would be. */
14424 OT_cinfix3, /* Instruction takes a conditional infix,
14425 beginning at character index 3. (In
14426 unified mode, it becomes a suffix.) */
14427 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
14428 tsts, cmps, cmns, and teqs. */
14429 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
14430 character index 3, even in unified mode. Used for
14431 legacy instructions where suffix and infix forms
14432 may be ambiguous. */
14433 OT_csuf_or_in3, /* Instruction takes either a conditional
14434 suffix or an infix at character index 3. */
14435 OT_odd_infix_unc, /* This is the unconditional variant of an
14436 instruction that takes a conditional infix
14437 at an unusual position. In unified mode,
14438 this variant will accept a suffix. */
14439 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
14440 are the conditional variants of instructions that
14441 take conditional infixes in unusual positions.
14442 The infix appears at character index
14443 (tag - OT_odd_infix_0). These are not accepted
14444 in unified mode. */
14445 };
14446
14447 /* Subroutine of md_assemble, responsible for looking up the primary
14448 opcode from the mnemonic the user wrote. STR points to the
14449 beginning of the mnemonic.
14450
14451 This is not simply a hash table lookup, because of conditional
14452 variants. Most instructions have conditional variants, which are
14453 expressed with a _conditional affix_ to the mnemonic. If we were
14454 to encode each conditional variant as a literal string in the opcode
14455 table, it would have approximately 20,000 entries.
14456
14457 Most mnemonics take this affix as a suffix, and in unified syntax,
14458 'most' is upgraded to 'all'. However, in the divided syntax, some
14459 instructions take the affix as an infix, notably the s-variants of
14460 the arithmetic instructions. Of those instructions, all but six
14461 have the infix appear after the third character of the mnemonic.
14462
14463 Accordingly, the algorithm for looking up primary opcodes given
14464 an identifier is:
14465
14466 1. Look up the identifier in the opcode table.
14467 If we find a match, go to step U.
14468
14469 2. Look up the last two characters of the identifier in the
14470 conditions table. If we find a match, look up the first N-2
14471 characters of the identifier in the opcode table. If we
14472 find a match, go to step CE.
14473
14474 3. Look up the fourth and fifth characters of the identifier in
14475 the conditions table. If we find a match, extract those
14476 characters from the identifier, and look up the remaining
14477 characters in the opcode table. If we find a match, go
14478 to step CM.
14479
14480 4. Fail.
14481
14482 U. Examine the tag field of the opcode structure, in case this is
14483 one of the six instructions with its conditional infix in an
14484 unusual place. If it is, the tag tells us where to find the
14485 infix; look it up in the conditions table and set inst.cond
14486 accordingly. Otherwise, this is an unconditional instruction.
14487 Again set inst.cond accordingly. Return the opcode structure.
14488
14489 CE. Examine the tag field to make sure this is an instruction that
14490 should receive a conditional suffix. If it is not, fail.
14491 Otherwise, set inst.cond from the suffix we already looked up,
14492 and return the opcode structure.
14493
14494 CM. Examine the tag field to make sure this is an instruction that
14495 should receive a conditional infix after the third character.
14496 If it is not, fail. Otherwise, undo the edits to the current
14497 line of input and proceed as for case CE. */
14498
14499 static const struct asm_opcode *
14500 opcode_lookup (char **str)
14501 {
14502 char *end, *base;
14503 char *affix;
14504 const struct asm_opcode *opcode;
14505 const struct asm_cond *cond;
14506 char save[2];
14507 bfd_boolean neon_supported;
14508
14509 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
14510
14511 /* Scan up to the end of the mnemonic, which must end in white space,
14512 '.' (in unified mode, or for Neon instructions), or end of string. */
14513 for (base = end = *str; *end != '\0'; end++)
14514 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
14515 break;
14516
14517 if (end == base)
14518 return 0;
14519
14520 /* Handle a possible width suffix and/or Neon type suffix. */
14521 if (end[0] == '.')
14522 {
14523 int offset = 2;
14524
14525 /* The .w and .n suffixes are only valid if the unified syntax is in
14526 use. */
14527 if (unified_syntax && end[1] == 'w')
14528 inst.size_req = 4;
14529 else if (unified_syntax && end[1] == 'n')
14530 inst.size_req = 2;
14531 else
14532 offset = 0;
14533
14534 inst.vectype.elems = 0;
14535
14536 *str = end + offset;
14537
14538 if (end[offset] == '.')
14539 {
14540 /* See if we have a Neon type suffix (possible in either unified or
14541 non-unified ARM syntax mode). */
14542 if (parse_neon_type (&inst.vectype, str) == FAIL)
14543 return 0;
14544 }
14545 else if (end[offset] != '\0' && end[offset] != ' ')
14546 return 0;
14547 }
14548 else
14549 *str = end;
14550
14551 /* Look for unaffixed or special-case affixed mnemonic. */
14552 opcode = hash_find_n (arm_ops_hsh, base, end - base);
14553 if (opcode)
14554 {
14555 /* step U */
14556 if (opcode->tag < OT_odd_infix_0)
14557 {
14558 inst.cond = COND_ALWAYS;
14559 return opcode;
14560 }
14561
14562 if (warn_on_deprecated && unified_syntax)
14563 as_warn (_("conditional infixes are deprecated in unified syntax"));
14564 affix = base + (opcode->tag - OT_odd_infix_0);
14565 cond = hash_find_n (arm_cond_hsh, affix, 2);
14566 assert (cond);
14567
14568 inst.cond = cond->value;
14569 return opcode;
14570 }
14571
14572 /* Cannot have a conditional suffix on a mnemonic of less than two
14573 characters. */
14574 if (end - base < 3)
14575 return 0;
14576
14577 /* Look for suffixed mnemonic. */
14578 affix = end - 2;
14579 cond = hash_find_n (arm_cond_hsh, affix, 2);
14580 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
14581 if (opcode && cond)
14582 {
14583 /* step CE */
14584 switch (opcode->tag)
14585 {
14586 case OT_cinfix3_legacy:
14587 /* Ignore conditional suffixes matched on infix only mnemonics. */
14588 break;
14589
14590 case OT_cinfix3:
14591 case OT_cinfix3_deprecated:
14592 case OT_odd_infix_unc:
14593 if (!unified_syntax)
14594 return 0;
14595 /* else fall through */
14596
14597 case OT_csuffix:
14598 case OT_csuffixF:
14599 case OT_csuf_or_in3:
14600 inst.cond = cond->value;
14601 return opcode;
14602
14603 case OT_unconditional:
14604 case OT_unconditionalF:
14605 if (thumb_mode)
14606 {
14607 inst.cond = cond->value;
14608 }
14609 else
14610 {
14611 /* delayed diagnostic */
14612 inst.error = BAD_COND;
14613 inst.cond = COND_ALWAYS;
14614 }
14615 return opcode;
14616
14617 default:
14618 return 0;
14619 }
14620 }
14621
14622 /* Cannot have a usual-position infix on a mnemonic of less than
14623 six characters (five would be a suffix). */
14624 if (end - base < 6)
14625 return 0;
14626
14627 /* Look for infixed mnemonic in the usual position. */
14628 affix = base + 3;
14629 cond = hash_find_n (arm_cond_hsh, affix, 2);
14630 if (!cond)
14631 return 0;
14632
14633 memcpy (save, affix, 2);
14634 memmove (affix, affix + 2, (end - affix) - 2);
14635 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14636 memmove (affix + 2, affix, (end - affix) - 2);
14637 memcpy (affix, save, 2);
14638
14639 if (opcode
14640 && (opcode->tag == OT_cinfix3
14641 || opcode->tag == OT_cinfix3_deprecated
14642 || opcode->tag == OT_csuf_or_in3
14643 || opcode->tag == OT_cinfix3_legacy))
14644 {
14645 /* step CM */
14646 if (warn_on_deprecated && unified_syntax
14647 && (opcode->tag == OT_cinfix3
14648 || opcode->tag == OT_cinfix3_deprecated))
14649 as_warn (_("conditional infixes are deprecated in unified syntax"));
14650
14651 inst.cond = cond->value;
14652 return opcode;
14653 }
14654
14655 return 0;
14656 }
14657
14658 void
14659 md_assemble (char *str)
14660 {
14661 char *p = str;
14662 const struct asm_opcode * opcode;
14663
14664 /* Align the previous label if needed. */
14665 if (last_label_seen != NULL)
14666 {
14667 symbol_set_frag (last_label_seen, frag_now);
14668 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14669 S_SET_SEGMENT (last_label_seen, now_seg);
14670 }
14671
14672 memset (&inst, '\0', sizeof (inst));
14673 inst.reloc.type = BFD_RELOC_UNUSED;
14674
14675 opcode = opcode_lookup (&p);
14676 if (!opcode)
14677 {
14678 /* It wasn't an instruction, but it might be a register alias of
14679 the form alias .req reg, or a Neon .dn/.qn directive. */
14680 if (!create_register_alias (str, p)
14681 && !create_neon_reg_alias (str, p))
14682 as_bad (_("bad instruction `%s'"), str);
14683
14684 return;
14685 }
14686
14687 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
14688 as_warn (_("s suffix on comparison instruction is deprecated"));
14689
14690 /* The value which unconditional instructions should have in place of the
14691 condition field. */
14692 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14693
14694 if (thumb_mode)
14695 {
14696 arm_feature_set variant;
14697
14698 variant = cpu_variant;
14699 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14700 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14701 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14702 /* Check that this instruction is supported for this CPU. */
14703 if (!opcode->tvariant
14704 || (thumb_mode == 1
14705 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14706 {
14707 as_bad (_("selected processor does not support `%s'"), str);
14708 return;
14709 }
14710 if (inst.cond != COND_ALWAYS && !unified_syntax
14711 && opcode->tencode != do_t_branch)
14712 {
14713 as_bad (_("Thumb does not support conditional execution"));
14714 return;
14715 }
14716
14717 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2) && !inst.size_req)
14718 {
14719 /* Implicit require narrow instructions on Thumb-1. This avoids
14720 relaxation accidentally introducing Thumb-2 instructions. */
14721 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
14722 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14723 inst.size_req = 2;
14724 }
14725
14726 /* Check conditional suffixes. */
14727 if (current_it_mask)
14728 {
14729 int cond;
14730 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14731 current_it_mask <<= 1;
14732 current_it_mask &= 0x1f;
14733 /* The BKPT instruction is unconditional even in an IT block. */
14734 if (!inst.error
14735 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14736 {
14737 as_bad (_("incorrect condition in IT block"));
14738 return;
14739 }
14740 }
14741 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14742 {
14743 as_bad (_("thumb conditional instruction not in IT block"));
14744 return;
14745 }
14746
14747 mapping_state (MAP_THUMB);
14748 inst.instruction = opcode->tvalue;
14749
14750 if (!parse_operands (p, opcode->operands))
14751 opcode->tencode ();
14752
14753 /* Clear current_it_mask at the end of an IT block. */
14754 if (current_it_mask == 0x10)
14755 current_it_mask = 0;
14756
14757 if (!(inst.error || inst.relax))
14758 {
14759 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14760 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14761 if (inst.size_req && inst.size_req != inst.size)
14762 {
14763 as_bad (_("cannot honor width suffix -- `%s'"), str);
14764 return;
14765 }
14766 }
14767
14768 /* Something has gone badly wrong if we try to relax a fixed size
14769 instruction. */
14770 assert (inst.size_req == 0 || !inst.relax);
14771
14772 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14773 *opcode->tvariant);
14774 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14775 set those bits when Thumb-2 32-bit instructions are seen. ie.
14776 anything other than bl/blx and v6-M instructions.
14777 This is overly pessimistic for relaxable instructions. */
14778 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14779 || inst.relax)
14780 && !ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr))
14781 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14782 arm_ext_v6t2);
14783 }
14784 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14785 {
14786 bfd_boolean is_bx;
14787
14788 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
14789 is_bx = (opcode->aencode == do_bx);
14790
14791 /* Check that this instruction is supported for this CPU. */
14792 if (!(is_bx && fix_v4bx)
14793 && !(opcode->avariant &&
14794 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
14795 {
14796 as_bad (_("selected processor does not support `%s'"), str);
14797 return;
14798 }
14799 if (inst.size_req)
14800 {
14801 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14802 return;
14803 }
14804
14805 mapping_state (MAP_ARM);
14806 inst.instruction = opcode->avalue;
14807 if (opcode->tag == OT_unconditionalF)
14808 inst.instruction |= 0xF << 28;
14809 else
14810 inst.instruction |= inst.cond << 28;
14811 inst.size = INSN_SIZE;
14812 if (!parse_operands (p, opcode->operands))
14813 opcode->aencode ();
14814 /* Arm mode bx is marked as both v4T and v5 because it's still required
14815 on a hypothetical non-thumb v5 core. */
14816 if (is_bx)
14817 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14818 else
14819 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14820 *opcode->avariant);
14821 }
14822 else
14823 {
14824 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14825 "-- `%s'"), str);
14826 return;
14827 }
14828 output_inst (str);
14829 }
14830
14831 /* Various frobbings of labels and their addresses. */
14832
14833 void
14834 arm_start_line_hook (void)
14835 {
14836 last_label_seen = NULL;
14837 }
14838
14839 void
14840 arm_frob_label (symbolS * sym)
14841 {
14842 last_label_seen = sym;
14843
14844 ARM_SET_THUMB (sym, thumb_mode);
14845
14846 #if defined OBJ_COFF || defined OBJ_ELF
14847 ARM_SET_INTERWORK (sym, support_interwork);
14848 #endif
14849
14850 /* Note - do not allow local symbols (.Lxxx) to be labelled
14851 as Thumb functions. This is because these labels, whilst
14852 they exist inside Thumb code, are not the entry points for
14853 possible ARM->Thumb calls. Also, these labels can be used
14854 as part of a computed goto or switch statement. eg gcc
14855 can generate code that looks like this:
14856
14857 ldr r2, [pc, .Laaa]
14858 lsl r3, r3, #2
14859 ldr r2, [r3, r2]
14860 mov pc, r2
14861
14862 .Lbbb: .word .Lxxx
14863 .Lccc: .word .Lyyy
14864 ..etc...
14865 .Laaa: .word Lbbb
14866
14867 The first instruction loads the address of the jump table.
14868 The second instruction converts a table index into a byte offset.
14869 The third instruction gets the jump address out of the table.
14870 The fourth instruction performs the jump.
14871
14872 If the address stored at .Laaa is that of a symbol which has the
14873 Thumb_Func bit set, then the linker will arrange for this address
14874 to have the bottom bit set, which in turn would mean that the
14875 address computation performed by the third instruction would end
14876 up with the bottom bit set. Since the ARM is capable of unaligned
14877 word loads, the instruction would then load the incorrect address
14878 out of the jump table, and chaos would ensue. */
14879 if (label_is_thumb_function_name
14880 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14881 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14882 {
14883 /* When the address of a Thumb function is taken the bottom
14884 bit of that address should be set. This will allow
14885 interworking between Arm and Thumb functions to work
14886 correctly. */
14887
14888 THUMB_SET_FUNC (sym, 1);
14889
14890 label_is_thumb_function_name = FALSE;
14891 }
14892
14893 dwarf2_emit_label (sym);
14894 }
14895
14896 int
14897 arm_data_in_code (void)
14898 {
14899 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14900 {
14901 *input_line_pointer = '/';
14902 input_line_pointer += 5;
14903 *input_line_pointer = 0;
14904 return 1;
14905 }
14906
14907 return 0;
14908 }
14909
14910 char *
14911 arm_canonicalize_symbol_name (char * name)
14912 {
14913 int len;
14914
14915 if (thumb_mode && (len = strlen (name)) > 5
14916 && streq (name + len - 5, "/data"))
14917 *(name + len - 5) = 0;
14918
14919 return name;
14920 }
14921 \f
14922 /* Table of all register names defined by default. The user can
14923 define additional names with .req. Note that all register names
14924 should appear in both upper and lowercase variants. Some registers
14925 also have mixed-case names. */
14926
14927 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14928 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14929 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14930 #define REGSET(p,t) \
14931 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14932 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14933 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14934 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14935 #define REGSETH(p,t) \
14936 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14937 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14938 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14939 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14940 #define REGSET2(p,t) \
14941 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14942 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14943 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14944 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14945
14946 static const struct reg_entry reg_names[] =
14947 {
14948 /* ARM integer registers. */
14949 REGSET(r, RN), REGSET(R, RN),
14950
14951 /* ATPCS synonyms. */
14952 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14953 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14954 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14955
14956 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14957 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14958 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14959
14960 /* Well-known aliases. */
14961 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14962 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14963
14964 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14965 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14966
14967 /* Coprocessor numbers. */
14968 REGSET(p, CP), REGSET(P, CP),
14969
14970 /* Coprocessor register numbers. The "cr" variants are for backward
14971 compatibility. */
14972 REGSET(c, CN), REGSET(C, CN),
14973 REGSET(cr, CN), REGSET(CR, CN),
14974
14975 /* FPA registers. */
14976 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14977 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14978
14979 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14980 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14981
14982 /* VFP SP registers. */
14983 REGSET(s,VFS), REGSET(S,VFS),
14984 REGSETH(s,VFS), REGSETH(S,VFS),
14985
14986 /* VFP DP Registers. */
14987 REGSET(d,VFD), REGSET(D,VFD),
14988 /* Extra Neon DP registers. */
14989 REGSETH(d,VFD), REGSETH(D,VFD),
14990
14991 /* Neon QP registers. */
14992 REGSET2(q,NQ), REGSET2(Q,NQ),
14993
14994 /* VFP control registers. */
14995 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14996 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14997 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
14998 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
14999 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
15000 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
15001
15002 /* Maverick DSP coprocessor registers. */
15003 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
15004 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
15005
15006 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
15007 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
15008 REGDEF(dspsc,0,DSPSC),
15009
15010 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
15011 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
15012 REGDEF(DSPSC,0,DSPSC),
15013
15014 /* iWMMXt data registers - p0, c0-15. */
15015 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
15016
15017 /* iWMMXt control registers - p1, c0-3. */
15018 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
15019 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
15020 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
15021 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
15022
15023 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15024 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
15025 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
15026 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
15027 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
15028
15029 /* XScale accumulator registers. */
15030 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
15031 };
15032 #undef REGDEF
15033 #undef REGNUM
15034 #undef REGSET
15035
15036 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15037 within psr_required_here. */
15038 static const struct asm_psr psrs[] =
15039 {
15040 /* Backward compatibility notation. Note that "all" is no longer
15041 truly all possible PSR bits. */
15042 {"all", PSR_c | PSR_f},
15043 {"flg", PSR_f},
15044 {"ctl", PSR_c},
15045
15046 /* Individual flags. */
15047 {"f", PSR_f},
15048 {"c", PSR_c},
15049 {"x", PSR_x},
15050 {"s", PSR_s},
15051 /* Combinations of flags. */
15052 {"fs", PSR_f | PSR_s},
15053 {"fx", PSR_f | PSR_x},
15054 {"fc", PSR_f | PSR_c},
15055 {"sf", PSR_s | PSR_f},
15056 {"sx", PSR_s | PSR_x},
15057 {"sc", PSR_s | PSR_c},
15058 {"xf", PSR_x | PSR_f},
15059 {"xs", PSR_x | PSR_s},
15060 {"xc", PSR_x | PSR_c},
15061 {"cf", PSR_c | PSR_f},
15062 {"cs", PSR_c | PSR_s},
15063 {"cx", PSR_c | PSR_x},
15064 {"fsx", PSR_f | PSR_s | PSR_x},
15065 {"fsc", PSR_f | PSR_s | PSR_c},
15066 {"fxs", PSR_f | PSR_x | PSR_s},
15067 {"fxc", PSR_f | PSR_x | PSR_c},
15068 {"fcs", PSR_f | PSR_c | PSR_s},
15069 {"fcx", PSR_f | PSR_c | PSR_x},
15070 {"sfx", PSR_s | PSR_f | PSR_x},
15071 {"sfc", PSR_s | PSR_f | PSR_c},
15072 {"sxf", PSR_s | PSR_x | PSR_f},
15073 {"sxc", PSR_s | PSR_x | PSR_c},
15074 {"scf", PSR_s | PSR_c | PSR_f},
15075 {"scx", PSR_s | PSR_c | PSR_x},
15076 {"xfs", PSR_x | PSR_f | PSR_s},
15077 {"xfc", PSR_x | PSR_f | PSR_c},
15078 {"xsf", PSR_x | PSR_s | PSR_f},
15079 {"xsc", PSR_x | PSR_s | PSR_c},
15080 {"xcf", PSR_x | PSR_c | PSR_f},
15081 {"xcs", PSR_x | PSR_c | PSR_s},
15082 {"cfs", PSR_c | PSR_f | PSR_s},
15083 {"cfx", PSR_c | PSR_f | PSR_x},
15084 {"csf", PSR_c | PSR_s | PSR_f},
15085 {"csx", PSR_c | PSR_s | PSR_x},
15086 {"cxf", PSR_c | PSR_x | PSR_f},
15087 {"cxs", PSR_c | PSR_x | PSR_s},
15088 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
15089 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
15090 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
15091 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
15092 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
15093 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
15094 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
15095 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
15096 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
15097 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
15098 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
15099 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
15100 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
15101 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
15102 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
15103 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
15104 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
15105 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
15106 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
15107 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
15108 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
15109 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
15110 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
15111 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
15112 };
15113
15114 /* Table of V7M psr names. */
15115 static const struct asm_psr v7m_psrs[] =
15116 {
15117 {"apsr", 0 }, {"APSR", 0 },
15118 {"iapsr", 1 }, {"IAPSR", 1 },
15119 {"eapsr", 2 }, {"EAPSR", 2 },
15120 {"psr", 3 }, {"PSR", 3 },
15121 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15122 {"ipsr", 5 }, {"IPSR", 5 },
15123 {"epsr", 6 }, {"EPSR", 6 },
15124 {"iepsr", 7 }, {"IEPSR", 7 },
15125 {"msp", 8 }, {"MSP", 8 },
15126 {"psp", 9 }, {"PSP", 9 },
15127 {"primask", 16}, {"PRIMASK", 16},
15128 {"basepri", 17}, {"BASEPRI", 17},
15129 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15130 {"faultmask", 19}, {"FAULTMASK", 19},
15131 {"control", 20}, {"CONTROL", 20}
15132 };
15133
15134 /* Table of all shift-in-operand names. */
15135 static const struct asm_shift_name shift_names [] =
15136 {
15137 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
15138 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
15139 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
15140 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
15141 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
15142 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
15143 };
15144
15145 /* Table of all explicit relocation names. */
15146 #ifdef OBJ_ELF
15147 static struct reloc_entry reloc_names[] =
15148 {
15149 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
15150 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
15151 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
15152 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
15153 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
15154 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
15155 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
15156 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
15157 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
15158 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
15159 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
15160 };
15161 #endif
15162
15163 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15164 static const struct asm_cond conds[] =
15165 {
15166 {"eq", 0x0},
15167 {"ne", 0x1},
15168 {"cs", 0x2}, {"hs", 0x2},
15169 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15170 {"mi", 0x4},
15171 {"pl", 0x5},
15172 {"vs", 0x6},
15173 {"vc", 0x7},
15174 {"hi", 0x8},
15175 {"ls", 0x9},
15176 {"ge", 0xa},
15177 {"lt", 0xb},
15178 {"gt", 0xc},
15179 {"le", 0xd},
15180 {"al", 0xe}
15181 };
15182
15183 static struct asm_barrier_opt barrier_opt_names[] =
15184 {
15185 { "sy", 0xf },
15186 { "un", 0x7 },
15187 { "st", 0xe },
15188 { "unst", 0x6 }
15189 };
15190
15191 /* Table of ARM-format instructions. */
15192
15193 /* Macros for gluing together operand strings. N.B. In all cases
15194 other than OPS0, the trailing OP_stop comes from default
15195 zero-initialization of the unspecified elements of the array. */
15196 #define OPS0() { OP_stop, }
15197 #define OPS1(a) { OP_##a, }
15198 #define OPS2(a,b) { OP_##a,OP_##b, }
15199 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15200 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15201 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15202 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15203
15204 /* These macros abstract out the exact format of the mnemonic table and
15205 save some repeated characters. */
15206
15207 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15208 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15209 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15210 THUMB_VARIANT, do_##ae, do_##te }
15211
15212 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15213 a T_MNEM_xyz enumerator. */
15214 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15215 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
15216 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15217 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15218
15219 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
15220 infix after the third character. */
15221 #define TxC3(mnem, op, top, nops, ops, ae, te) \
15222 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
15223 THUMB_VARIANT, do_##ae, do_##te }
15224 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
15225 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
15226 THUMB_VARIANT, do_##ae, do_##te }
15227 #define TC3(mnem, aop, top, nops, ops, ae, te) \
15228 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
15229 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
15230 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
15231 #define tC3(mnem, aop, top, nops, ops, ae, te) \
15232 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15233 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
15234 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15235
15236 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
15237 appear in the condition table. */
15238 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
15239 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15240 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
15241
15242 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
15243 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
15244 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
15245 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
15246 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
15247 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
15248 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
15249 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
15250 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
15251 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
15252 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
15253 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
15254 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
15255 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
15256 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
15257 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
15258 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
15259 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
15260 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
15261 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
15262
15263 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
15264 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
15265 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
15266 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
15267
15268 /* Mnemonic that cannot be conditionalized. The ARM condition-code
15269 field is still 0xE. Many of the Thumb variants can be executed
15270 conditionally, so this is checked separately. */
15271 #define TUE(mnem, op, top, nops, ops, ae, te) \
15272 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
15273 THUMB_VARIANT, do_##ae, do_##te }
15274
15275 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
15276 condition code field. */
15277 #define TUF(mnem, op, top, nops, ops, ae, te) \
15278 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
15279 THUMB_VARIANT, do_##ae, do_##te }
15280
15281 /* ARM-only variants of all the above. */
15282 #define CE(mnem, op, nops, ops, ae) \
15283 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15284
15285 #define C3(mnem, op, nops, ops, ae) \
15286 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15287
15288 /* Legacy mnemonics that always have conditional infix after the third
15289 character. */
15290 #define CL(mnem, op, nops, ops, ae) \
15291 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15292 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15293
15294 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
15295 #define cCE(mnem, op, nops, ops, ae) \
15296 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15297
15298 /* Legacy coprocessor instructions where conditional infix and conditional
15299 suffix are ambiguous. For consistency this includes all FPA instructions,
15300 not just the potentially ambiguous ones. */
15301 #define cCL(mnem, op, nops, ops, ae) \
15302 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15303 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15304
15305 /* Coprocessor, takes either a suffix or a position-3 infix
15306 (for an FPA corner case). */
15307 #define C3E(mnem, op, nops, ops, ae) \
15308 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
15309 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15310
15311 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
15312 { #m1 #m2 #m3, OPS##nops ops, \
15313 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15314 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15315
15316 #define CM(m1, m2, op, nops, ops, ae) \
15317 xCM_(m1, , m2, op, nops, ops, ae), \
15318 xCM_(m1, eq, m2, op, nops, ops, ae), \
15319 xCM_(m1, ne, m2, op, nops, ops, ae), \
15320 xCM_(m1, cs, m2, op, nops, ops, ae), \
15321 xCM_(m1, hs, m2, op, nops, ops, ae), \
15322 xCM_(m1, cc, m2, op, nops, ops, ae), \
15323 xCM_(m1, ul, m2, op, nops, ops, ae), \
15324 xCM_(m1, lo, m2, op, nops, ops, ae), \
15325 xCM_(m1, mi, m2, op, nops, ops, ae), \
15326 xCM_(m1, pl, m2, op, nops, ops, ae), \
15327 xCM_(m1, vs, m2, op, nops, ops, ae), \
15328 xCM_(m1, vc, m2, op, nops, ops, ae), \
15329 xCM_(m1, hi, m2, op, nops, ops, ae), \
15330 xCM_(m1, ls, m2, op, nops, ops, ae), \
15331 xCM_(m1, ge, m2, op, nops, ops, ae), \
15332 xCM_(m1, lt, m2, op, nops, ops, ae), \
15333 xCM_(m1, gt, m2, op, nops, ops, ae), \
15334 xCM_(m1, le, m2, op, nops, ops, ae), \
15335 xCM_(m1, al, m2, op, nops, ops, ae)
15336
15337 #define UE(mnem, op, nops, ops, ae) \
15338 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15339
15340 #define UF(mnem, op, nops, ops, ae) \
15341 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15342
15343 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
15344 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
15345 use the same encoding function for each. */
15346 #define NUF(mnem, op, nops, ops, enc) \
15347 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
15348 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15349
15350 /* Neon data processing, version which indirects through neon_enc_tab for
15351 the various overloaded versions of opcodes. */
15352 #define nUF(mnem, op, nops, ops, enc) \
15353 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
15354 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15355
15356 /* Neon insn with conditional suffix for the ARM version, non-overloaded
15357 version. */
15358 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
15359 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
15360 THUMB_VARIANT, do_##enc, do_##enc }
15361
15362 #define NCE(mnem, op, nops, ops, enc) \
15363 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15364
15365 #define NCEF(mnem, op, nops, ops, enc) \
15366 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15367
15368 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
15369 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
15370 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
15371 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15372
15373 #define nCE(mnem, op, nops, ops, enc) \
15374 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15375
15376 #define nCEF(mnem, op, nops, ops, enc) \
15377 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15378
15379 #define do_0 0
15380
15381 /* Thumb-only, unconditional. */
15382 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
15383
15384 static const struct asm_opcode insns[] =
15385 {
15386 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
15387 #define THUMB_VARIANT &arm_ext_v4t
15388 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
15389 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
15390 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
15391 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
15392 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
15393 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
15394 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
15395 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
15396 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
15397 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
15398 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
15399 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
15400 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
15401 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
15402 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
15403 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
15404
15405 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
15406 for setting PSR flag bits. They are obsolete in V6 and do not
15407 have Thumb equivalents. */
15408 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15409 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
15410 CL(tstp, 110f000, 2, (RR, SH), cmp),
15411 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15412 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
15413 CL(cmpp, 150f000, 2, (RR, SH), cmp),
15414 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15415 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
15416 CL(cmnp, 170f000, 2, (RR, SH), cmp),
15417
15418 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
15419 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
15420 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
15421 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
15422
15423 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
15424 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15425 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
15426 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
15427
15428 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15429 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15430 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15431 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15432 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15433 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15434
15435 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
15436 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
15437 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
15438 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
15439
15440 /* Pseudo ops. */
15441 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
15442 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
15443 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
15444
15445 /* Thumb-compatibility pseudo ops. */
15446 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
15447 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
15448 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
15449 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
15450 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
15451 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
15452 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
15453 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
15454 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
15455 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
15456 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
15457 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
15458
15459 /* These may simplify to neg. */
15460 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
15461 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
15462
15463 #undef THUMB_VARIANT
15464 #define THUMB_VARIANT &arm_ext_v6
15465 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
15466
15467 /* V1 instructions with no Thumb analogue prior to V6T2. */
15468 #undef THUMB_VARIANT
15469 #define THUMB_VARIANT &arm_ext_v6t2
15470 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15471 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
15472 CL(teqp, 130f000, 2, (RR, SH), cmp),
15473
15474 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
15475 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
15476 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
15477 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
15478
15479 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15480 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15481
15482 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15483 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
15484
15485 /* V1 instructions with no Thumb analogue at all. */
15486 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
15487 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
15488
15489 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
15490 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
15491 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
15492 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
15493 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
15494 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
15495 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
15496 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
15497
15498 #undef ARM_VARIANT
15499 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
15500 #undef THUMB_VARIANT
15501 #define THUMB_VARIANT &arm_ext_v4t
15502 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15503 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
15504
15505 #undef THUMB_VARIANT
15506 #define THUMB_VARIANT &arm_ext_v6t2
15507 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15508 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
15509
15510 /* Generic coprocessor instructions. */
15511 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15512 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15513 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15514 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15515 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15516 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15517 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15518
15519 #undef ARM_VARIANT
15520 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15521 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15522 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
15523
15524 #undef ARM_VARIANT
15525 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15526 #undef THUMB_VARIANT
15527 #define THUMB_VARIANT &arm_ext_msr
15528 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
15529 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
15530
15531 #undef ARM_VARIANT
15532 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15533 #undef THUMB_VARIANT
15534 #define THUMB_VARIANT &arm_ext_v6t2
15535 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15536 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15537 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15538 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15539 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15540 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15541 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
15542 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
15543
15544 #undef ARM_VARIANT
15545 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15546 #undef THUMB_VARIANT
15547 #define THUMB_VARIANT &arm_ext_v4t
15548 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15549 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15550 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15551 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15552 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15553 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
15554
15555 #undef ARM_VARIANT
15556 #define ARM_VARIANT &arm_ext_v4t_5
15557 /* ARM Architecture 4T. */
15558 /* Note: bx (and blx) are required on V5, even if the processor does
15559 not support Thumb. */
15560 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
15561
15562 #undef ARM_VARIANT
15563 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15564 #undef THUMB_VARIANT
15565 #define THUMB_VARIANT &arm_ext_v5t
15566 /* Note: blx has 2 variants; the .value coded here is for
15567 BLX(2). Only this variant has conditional execution. */
15568 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
15569 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
15570
15571 #undef THUMB_VARIANT
15572 #define THUMB_VARIANT &arm_ext_v6t2
15573 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
15574 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15575 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15576 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15577 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
15578 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
15579 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15580 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
15581
15582 #undef ARM_VARIANT
15583 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15584 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15585 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15586 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15587 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15588
15589 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15590 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
15591
15592 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15593 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15594 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15595 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
15596
15597 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15598 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15599 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15600 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15601
15602 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15603 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15604
15605 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15606 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15607 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15608 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd),
15609
15610 #undef ARM_VARIANT
15611 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15612 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
15613 TC3(ldrd, 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15614 TC3(strd, 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
15615
15616 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15617 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15618
15619 #undef ARM_VARIANT
15620 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15621 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
15622
15623 #undef ARM_VARIANT
15624 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15625 #undef THUMB_VARIANT
15626 #define THUMB_VARIANT &arm_ext_v6
15627 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
15628 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
15629 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15630 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15631 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
15632 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15633 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15634 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15635 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15636 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15637
15638 #undef THUMB_VARIANT
15639 #define THUMB_VARIANT &arm_ext_v6t2
15640 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15641 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15642 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15643 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15644
15645 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15646 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15647
15648 /* ARM V6 not included in V7M (eg. integer SIMD). */
15649 #undef THUMB_VARIANT
15650 #define THUMB_VARIANT &arm_ext_v6_notm
15651 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15652 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15653 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15654 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15655 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15656 TCE(qasx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15657 /* Old name for QASX. */
15658 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15659 TCE(qsax, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15660 /* Old name for QSAX. */
15661 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15662 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15663 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15664 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15665 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15666 TCE(sasx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15667 /* Old name for SASX. */
15668 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15669 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15670 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15671 TCE(shasx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15672 /* Old name for SHASX. */
15673 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15674 TCE(shsax, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15675 /* Old name for SHSAX. */
15676 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15677 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15678 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15679 TCE(ssax, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15680 /* Old name for SSAX. */
15681 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15682 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15683 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15684 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15685 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15686 TCE(uasx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15687 /* Old name for UASX. */
15688 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15689 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15690 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15691 TCE(uhasx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15692 /* Old name for UHASX. */
15693 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15694 TCE(uhsax, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15695 /* Old name for UHSAX. */
15696 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15697 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15698 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15699 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15700 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15701 TCE(uqasx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15702 /* Old name for UQASX. */
15703 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15704 TCE(uqsax, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15705 /* Old name for UQSAX. */
15706 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15707 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15708 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15709 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15710 TCE(usax, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15711 /* Old name for USAX. */
15712 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15713 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15714 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15715 UF(rfeib, 9900a00, 1, (RRw), rfe),
15716 UF(rfeda, 8100a00, 1, (RRw), rfe),
15717 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15718 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15719 UF(rfefa, 9900a00, 1, (RRw), rfe),
15720 UF(rfeea, 8100a00, 1, (RRw), rfe),
15721 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15722 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15723 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15724 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15725 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15726 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15727 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15728 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15729 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15730 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15731 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15732 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15733 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15734 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15735 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15736 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15737 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15738 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15739 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15740 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15741 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15742 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15743 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15744 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15745 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15746 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15747 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15748 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15749 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15750 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15751 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15752 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15753 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15754 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15755 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15756 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15757 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15758
15759 #undef ARM_VARIANT
15760 #define ARM_VARIANT &arm_ext_v6k
15761 #undef THUMB_VARIANT
15762 #define THUMB_VARIANT &arm_ext_v6k
15763 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15764 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15765 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15766 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15767
15768 #undef THUMB_VARIANT
15769 #define THUMB_VARIANT &arm_ext_v6_notm
15770 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15771 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15772
15773 #undef THUMB_VARIANT
15774 #define THUMB_VARIANT &arm_ext_v6t2
15775 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15776 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15777 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15778 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15779 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15780
15781 #undef ARM_VARIANT
15782 #define ARM_VARIANT &arm_ext_v6z
15783 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15784
15785 #undef ARM_VARIANT
15786 #define ARM_VARIANT &arm_ext_v6t2
15787 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15788 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15789 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15790 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15791
15792 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15793 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15794 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15795 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15796
15797 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15798 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15799 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15800 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15801
15802 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15803 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15804 /* ARM does not really have an IT instruction, so always allow it. */
15805 #undef ARM_VARIANT
15806 #define ARM_VARIANT &arm_ext_v1
15807 TUE(it, 0, bf08, 1, (COND), it, t_it),
15808 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15809 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15810 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15811 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15812 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15813 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15814 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15815 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15816 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15817 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15818 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15819 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15820 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15821 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15822 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
15823 TC3(rrx, 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
15824 TC3(rrxs, 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
15825
15826 /* Thumb2 only instructions. */
15827 #undef ARM_VARIANT
15828 #define ARM_VARIANT NULL
15829
15830 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15831 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15832 TCE(orn, 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
15833 TCE(orns, 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
15834 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15835 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15836
15837 /* Thumb-2 hardware division instructions (R and M profiles only). */
15838 #undef THUMB_VARIANT
15839 #define THUMB_VARIANT &arm_ext_div
15840 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15841 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15842
15843 /* ARM V6M/V7 instructions. */
15844 #undef ARM_VARIANT
15845 #define ARM_VARIANT &arm_ext_barrier
15846 #undef THUMB_VARIANT
15847 #define THUMB_VARIANT &arm_ext_barrier
15848 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15849 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15850 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15851
15852 /* ARM V7 instructions. */
15853 #undef ARM_VARIANT
15854 #define ARM_VARIANT &arm_ext_v7
15855 #undef THUMB_VARIANT
15856 #define THUMB_VARIANT &arm_ext_v7
15857 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15858 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15859
15860 #undef ARM_VARIANT
15861 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15862 cCE(wfs, e200110, 1, (RR), rd),
15863 cCE(rfs, e300110, 1, (RR), rd),
15864 cCE(wfc, e400110, 1, (RR), rd),
15865 cCE(rfc, e500110, 1, (RR), rd),
15866
15867 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15868 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15869 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15870 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15871
15872 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15873 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15874 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15875 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15876
15877 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15878 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15879 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15880 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15881 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15882 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15883 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15884 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15885 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15886 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15887 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15888 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15889
15890 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15891 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15892 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15893 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15894 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15895 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15896 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15897 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15898 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15899 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15900 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15901 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15902
15903 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15904 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15905 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15906 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15907 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15908 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15909 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15910 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15911 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15912 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15913 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15914 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15915
15916 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15917 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15918 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15919 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15920 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15921 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15922 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15923 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15924 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15925 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15926 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15927 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15928
15929 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15930 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15931 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15932 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15933 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15934 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15935 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15936 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15937 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15938 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15939 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15940 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15941
15942 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15943 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15944 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15945 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15946 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15947 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15948 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15949 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15950 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15951 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15952 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15953 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15954
15955 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15956 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15957 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15958 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15959 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15960 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15961 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15962 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15963 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15964 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15965 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15966 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15967
15968 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15969 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15970 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15971 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15972 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15973 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15974 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15975 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15976 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15977 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15978 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15979 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15980
15981 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15982 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15983 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15984 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15985 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15986 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15987 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15988 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15989 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15990 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15991 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15992 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15993
15994 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15995 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15996 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15997 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15998 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15999 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
16000 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
16001 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
16002 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
16003 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
16004 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
16005 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
16006
16007 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
16008 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
16009 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
16010 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
16011 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
16012 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
16013 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
16014 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
16015 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
16016 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
16017 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
16018 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
16019
16020 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
16021 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
16022 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
16023 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
16024 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
16025 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
16026 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
16027 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
16028 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
16029 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
16030 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
16031 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
16032
16033 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
16034 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
16035 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
16036 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
16037 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
16038 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
16039 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
16040 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
16041 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
16042 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
16043 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
16044 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
16045
16046 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
16047 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
16048 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
16049 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
16050 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
16051 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
16052 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
16053 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
16054 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
16055 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
16056 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
16057 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
16058
16059 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
16060 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
16061 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
16062 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
16063 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
16064 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
16065 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
16066 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
16067 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
16068 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
16069 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
16070 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
16071
16072 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
16073 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
16074 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
16075 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
16076 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
16077 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
16078 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
16079 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
16080 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
16081 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
16082 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
16083 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
16084
16085 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
16086 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
16087 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
16088 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
16089 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
16090 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16091 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16092 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16093 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
16094 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
16095 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
16096 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
16097
16098 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
16099 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
16100 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
16101 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
16102 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
16103 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16104 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16105 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16106 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
16107 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
16108 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
16109 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
16110
16111 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
16112 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
16113 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
16114 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
16115 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
16116 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16117 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16118 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16119 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
16120 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
16121 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
16122 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
16123
16124 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
16125 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
16126 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
16127 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
16128 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
16129 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16130 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16131 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16132 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
16133 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
16134 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
16135 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
16136
16137 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
16138 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
16139 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
16140 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
16141 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
16142 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16143 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16144 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16145 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
16146 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
16147 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
16148 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
16149
16150 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
16151 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
16152 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
16153 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
16154 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
16155 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16156 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16157 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16158 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
16159 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
16160 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
16161 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
16162
16163 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
16164 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
16165 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
16166 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
16167 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
16168 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16169 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16170 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16171 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
16172 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
16173 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
16174 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
16175
16176 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
16177 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
16178 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
16179 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
16180 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
16181 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16182 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16183 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16184 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
16185 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
16186 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
16187 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
16188
16189 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
16190 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
16191 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
16192 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
16193 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
16194 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16195 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16196 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16197 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
16198 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
16199 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
16200 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
16201
16202 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
16203 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
16204 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
16205 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
16206 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
16207 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16208 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16209 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16210 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
16211 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
16212 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
16213 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
16214
16215 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16216 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16217 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16218 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16219 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16220 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16221 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16222 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16223 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16224 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16225 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16226 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16227
16228 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16229 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16230 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16231 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16232 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16233 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16234 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16235 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16236 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16237 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16238 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16239 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16240
16241 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
16242 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
16243 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
16244 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
16245 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
16246 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
16247 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
16248 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
16249 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
16250 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
16251 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
16252 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
16253
16254 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
16255 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
16256 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
16257 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
16258
16259 cCL(flts, e000110, 2, (RF, RR), rn_rd),
16260 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
16261 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
16262 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
16263 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
16264 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
16265 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
16266 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
16267 cCL(flte, e080110, 2, (RF, RR), rn_rd),
16268 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
16269 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
16270 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
16271
16272 /* The implementation of the FIX instruction is broken on some
16273 assemblers, in that it accepts a precision specifier as well as a
16274 rounding specifier, despite the fact that this is meaningless.
16275 To be more compatible, we accept it as well, though of course it
16276 does not set any bits. */
16277 cCE(fix, e100110, 2, (RR, RF), rd_rm),
16278 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
16279 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
16280 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
16281 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
16282 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
16283 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
16284 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
16285 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
16286 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
16287 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
16288 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
16289 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
16290
16291 /* Instructions that were new with the real FPA, call them V2. */
16292 #undef ARM_VARIANT
16293 #define ARM_VARIANT &fpu_fpa_ext_v2
16294 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16295 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16296 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16297 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16298 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16299 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
16300
16301 #undef ARM_VARIANT
16302 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
16303 /* Moves and type conversions. */
16304 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
16305 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
16306 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
16307 cCE(fmstat, ef1fa10, 0, (), noargs),
16308 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
16309 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
16310 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
16311 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
16312 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
16313 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
16314 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
16315 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
16316
16317 /* Memory operations. */
16318 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
16319 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
16320 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16321 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16322 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16323 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16324 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16325 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16326 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16327 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16328 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16329 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
16330 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16331 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
16332 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16333 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
16334 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16335 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
16336
16337 /* Monadic operations. */
16338 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
16339 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
16340 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
16341
16342 /* Dyadic operations. */
16343 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16344 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16345 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16346 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16347 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16348 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16349 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16350 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16351 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
16352
16353 /* Comparisons. */
16354 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
16355 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
16356 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
16357 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
16358
16359 #undef ARM_VARIANT
16360 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
16361 /* Moves and type conversions. */
16362 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16363 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
16364 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16365 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
16366 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
16367 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
16368 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
16369 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
16370 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
16371 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
16372 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16373 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
16374 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
16375
16376 /* Memory operations. */
16377 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
16378 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
16379 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16380 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16381 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16382 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16383 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16384 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
16385 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16386 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
16387
16388 /* Monadic operations. */
16389 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16390 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16391 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16392
16393 /* Dyadic operations. */
16394 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16395 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16396 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16397 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16398 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16399 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16400 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16401 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16402 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
16403
16404 /* Comparisons. */
16405 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
16406 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
16407 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
16408 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
16409
16410 #undef ARM_VARIANT
16411 #define ARM_VARIANT &fpu_vfp_ext_v2
16412 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
16413 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
16414 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
16415 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
16416
16417 /* Instructions which may belong to either the Neon or VFP instruction sets.
16418 Individual encoder functions perform additional architecture checks. */
16419 #undef ARM_VARIANT
16420 #define ARM_VARIANT &fpu_vfp_ext_v1xd
16421 #undef THUMB_VARIANT
16422 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
16423 /* These mnemonics are unique to VFP. */
16424 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
16425 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
16426 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16427 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16428 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
16429 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
16430 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
16431 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
16432 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
16433 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
16434
16435 /* Mnemonics shared by Neon and VFP. */
16436 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
16437 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16438 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
16439
16440 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16441 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
16442
16443 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16444 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
16445
16446 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16447 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16448 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16449 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16450 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16451 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
16452 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16453 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
16454
16455 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
16456 nCEF(vcvtb, vcvt, 2, (RVS, RVS), neon_cvtb),
16457 nCEF(vcvtt, vcvt, 2, (RVS, RVS), neon_cvtt),
16458
16459
16460 /* NOTE: All VMOV encoding is special-cased! */
16461 NCE(vmov, 0, 1, (VMOV), neon_mov),
16462 NCE(vmovq, 0, 1, (VMOV), neon_mov),
16463
16464 #undef THUMB_VARIANT
16465 #define THUMB_VARIANT &fpu_neon_ext_v1
16466 #undef ARM_VARIANT
16467 #define ARM_VARIANT &fpu_neon_ext_v1
16468 /* Data processing with three registers of the same length. */
16469 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
16470 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
16471 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
16472 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16473 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16474 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16475 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16476 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
16477 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
16478 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
16479 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16480 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16481 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
16482 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
16483 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16484 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16485 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
16486 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
16487 /* If not immediate, fall back to neon_dyadic_i64_su.
16488 shl_imm should accept I8 I16 I32 I64,
16489 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
16490 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
16491 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
16492 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
16493 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
16494 /* Logic ops, types optional & ignored. */
16495 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
16496 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
16497 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
16498 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
16499 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
16500 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
16501 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
16502 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
16503 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
16504 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
16505 /* Bitfield ops, untyped. */
16506 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16507 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16508 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16509 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16510 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
16511 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
16512 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
16513 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16514 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16515 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16516 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16517 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
16518 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
16519 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
16520 back to neon_dyadic_if_su. */
16521 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16522 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16523 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
16524 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
16525 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16526 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16527 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
16528 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
16529 /* Comparison. Type I8 I16 I32 F32. */
16530 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
16531 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
16532 /* As above, D registers only. */
16533 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16534 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
16535 /* Int and float variants, signedness unimportant. */
16536 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16537 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
16538 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
16539 /* Add/sub take types I8 I16 I32 I64 F32. */
16540 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16541 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
16542 /* vtst takes sizes 8, 16, 32. */
16543 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
16544 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
16545 /* VMUL takes I8 I16 I32 F32 P8. */
16546 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
16547 /* VQD{R}MULH takes S16 S32. */
16548 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16549 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16550 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
16551 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
16552 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16553 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16554 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
16555 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
16556 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16557 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16558 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
16559 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
16560 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16561 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16562 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
16563 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
16564
16565 /* Two address, int/float. Types S8 S16 S32 F32. */
16566 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
16567 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
16568
16569 /* Data processing with two registers and a shift amount. */
16570 /* Right shifts, and variants with rounding.
16571 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16572 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16573 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16574 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
16575 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
16576 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16577 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16578 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
16579 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
16580 /* Shift and insert. Sizes accepted 8 16 32 64. */
16581 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
16582 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
16583 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
16584 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
16585 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16586 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
16587 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
16588 /* Right shift immediate, saturating & narrowing, with rounding variants.
16589 Types accepted S16 S32 S64 U16 U32 U64. */
16590 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16591 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
16592 /* As above, unsigned. Types accepted S16 S32 S64. */
16593 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16594 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
16595 /* Right shift narrowing. Types accepted I16 I32 I64. */
16596 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16597 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
16598 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16599 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
16600 /* CVT with optional immediate for fixed-point variant. */
16601 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
16602
16603 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
16604 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
16605
16606 /* Data processing, three registers of different lengths. */
16607 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16608 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
16609 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
16610 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
16611 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
16612 /* If not scalar, fall back to neon_dyadic_long.
16613 Vector types as above, scalar types S16 S32 U16 U32. */
16614 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16615 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
16616 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16617 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16618 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
16619 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16620 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16621 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16622 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16623 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
16624 /* Saturating doubling multiplies. Types S16 S32. */
16625 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16626 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16627 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
16628 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16629 S16 S32 U16 U32. */
16630 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
16631
16632 /* Extract. Size 8. */
16633 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
16634 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
16635
16636 /* Two registers, miscellaneous. */
16637 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16638 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
16639 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
16640 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
16641 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
16642 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
16643 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
16644 /* Vector replicate. Sizes 8 16 32. */
16645 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
16646 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
16647 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16648 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
16649 /* VMOVN. Types I16 I32 I64. */
16650 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
16651 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16652 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
16653 /* VQMOVUN. Types S16 S32 S64. */
16654 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
16655 /* VZIP / VUZP. Sizes 8 16 32. */
16656 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
16657 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
16658 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
16659 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
16660 /* VQABS / VQNEG. Types S8 S16 S32. */
16661 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16662 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
16663 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
16664 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
16665 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16666 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
16667 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
16668 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
16669 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
16670 /* Reciprocal estimates. Types U32 F32. */
16671 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16672 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16673 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16674 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16675 /* VCLS. Types S8 S16 S32. */
16676 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16677 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16678 /* VCLZ. Types I8 I16 I32. */
16679 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16680 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16681 /* VCNT. Size 8. */
16682 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16683 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16684 /* Two address, untyped. */
16685 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16686 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16687 /* VTRN. Sizes 8 16 32. */
16688 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16689 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16690
16691 /* Table lookup. Size 8. */
16692 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16693 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16694
16695 #undef THUMB_VARIANT
16696 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16697 #undef ARM_VARIANT
16698 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16699 /* Neon element/structure load/store. */
16700 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16701 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16702 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16703 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16704 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16705 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16706 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16707 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16708
16709 #undef THUMB_VARIANT
16710 #define THUMB_VARIANT &fpu_vfp_ext_v3
16711 #undef ARM_VARIANT
16712 #define ARM_VARIANT &fpu_vfp_ext_v3
16713 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16714 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16715 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16716 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16717 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16718 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16719 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16720 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16721 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16722 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16723 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16724 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16725 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16726 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16727 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16728 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16729 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16730 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16731
16732 #undef THUMB_VARIANT
16733 #undef ARM_VARIANT
16734 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16735 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16736 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16737 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16738 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16739 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16740 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16741 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16742 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16743
16744 #undef ARM_VARIANT
16745 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16746 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16747 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16748 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16749 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16750 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16751 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16752 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16753 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16754 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16755 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16756 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16757 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16758 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16759 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16760 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16761 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16762 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16763 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16764 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16765 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16766 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16767 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16768 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16769 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16770 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16771 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16772 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16773 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16774 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16775 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16776 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16777 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16778 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16779 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16780 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16781 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16782 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16783 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16784 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16785 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16786 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16787 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16788 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16789 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16790 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16791 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16792 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16793 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16794 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16795 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16796 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16797 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16798 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16799 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16800 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16801 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16802 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16803 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16804 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16805 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16806 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16807 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16808 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16809 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16810 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16811 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16812 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16813 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16814 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16815 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16816 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16817 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16818 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16819 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16820 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16821 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16822 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16823 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16824 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16825 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16826 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16827 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16828 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16829 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16830 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16831 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16832 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16833 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16834 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16835 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16836 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16837 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16838 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16839 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16840 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16841 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16842 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16843 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16844 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16845 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16846 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16847 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16848 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16849 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16850 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16851 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16852 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16853 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16854 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16855 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16856 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16857 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16858 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16859 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16860 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16861 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16862 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16863 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16864 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16865 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16866 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16867 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16868 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16869 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16870 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16871 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16872 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16873 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16874 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16875 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16876 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16877 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16878 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16879 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16880 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16881 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16882 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16883 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16884 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16885 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16886 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16887 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16888 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16889 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16890 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16891 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16892 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16893 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16894 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16895 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16896 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16897 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16898 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16899 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16900 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16901 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16902 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16903 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16904 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16905 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16906 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16907 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16908
16909 #undef ARM_VARIANT
16910 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16911 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16912 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16913 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16914 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16915 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16916 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16917 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16918 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16919 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16920 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16921 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16922 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16923 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16924 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16925 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16926 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16927 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16928 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16929 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16930 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16931 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16932 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16933 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16934 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16935 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16936 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16937 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16938 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16939 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16940 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16941 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16942 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16943 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16944 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16945 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16946 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16947 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16948 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16949 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16950 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16951 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16952 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16953 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16954 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16955 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16956 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16957 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16958 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16959 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16960 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16961 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16962 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16963 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16964 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16965 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16966 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16967 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16968
16969 #undef ARM_VARIANT
16970 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16971 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16972 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16973 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16974 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16975 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16976 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16977 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16978 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16979 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16980 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16981 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16982 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16983 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16984 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16985 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16986 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16987 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16988 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16989 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16990 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16991 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16992 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16993 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16994 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16995 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16996 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16997 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16998 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16999 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
17000 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
17001 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
17002 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
17003 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
17004 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
17005 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
17006 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
17007 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
17008 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
17009 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
17010 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
17011 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
17012 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
17013 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
17014 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
17015 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
17016 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
17017 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
17018 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
17019 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
17020 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
17021 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
17022 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
17023 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
17024 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
17025 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
17026 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
17027 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
17028 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
17029 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
17030 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
17031 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
17032 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
17033 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
17034 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
17035 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17036 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17037 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17038 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17039 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17040 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
17041 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17042 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
17043 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17044 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
17045 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17046 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
17047 };
17048 #undef ARM_VARIANT
17049 #undef THUMB_VARIANT
17050 #undef TCE
17051 #undef TCM
17052 #undef TUE
17053 #undef TUF
17054 #undef TCC
17055 #undef cCE
17056 #undef cCL
17057 #undef C3E
17058 #undef CE
17059 #undef CM
17060 #undef UE
17061 #undef UF
17062 #undef UT
17063 #undef NUF
17064 #undef nUF
17065 #undef NCE
17066 #undef nCE
17067 #undef OPS0
17068 #undef OPS1
17069 #undef OPS2
17070 #undef OPS3
17071 #undef OPS4
17072 #undef OPS5
17073 #undef OPS6
17074 #undef do_0
17075 \f
17076 /* MD interface: bits in the object file. */
17077
17078 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17079 for use in the a.out file, and stores them in the array pointed to by buf.
17080 This knows about the endian-ness of the target machine and does
17081 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17082 2 (short) and 4 (long) Floating numbers are put out as a series of
17083 LITTLENUMS (shorts, here at least). */
17084
17085 void
17086 md_number_to_chars (char * buf, valueT val, int n)
17087 {
17088 if (target_big_endian)
17089 number_to_chars_bigendian (buf, val, n);
17090 else
17091 number_to_chars_littleendian (buf, val, n);
17092 }
17093
17094 static valueT
17095 md_chars_to_number (char * buf, int n)
17096 {
17097 valueT result = 0;
17098 unsigned char * where = (unsigned char *) buf;
17099
17100 if (target_big_endian)
17101 {
17102 while (n--)
17103 {
17104 result <<= 8;
17105 result |= (*where++ & 255);
17106 }
17107 }
17108 else
17109 {
17110 while (n--)
17111 {
17112 result <<= 8;
17113 result |= (where[n] & 255);
17114 }
17115 }
17116
17117 return result;
17118 }
17119
17120 /* MD interface: Sections. */
17121
17122 /* Estimate the size of a frag before relaxing. Assume everything fits in
17123 2 bytes. */
17124
17125 int
17126 md_estimate_size_before_relax (fragS * fragp,
17127 segT segtype ATTRIBUTE_UNUSED)
17128 {
17129 fragp->fr_var = 2;
17130 return 2;
17131 }
17132
17133 /* Convert a machine dependent frag. */
17134
17135 void
17136 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
17137 {
17138 unsigned long insn;
17139 unsigned long old_op;
17140 char *buf;
17141 expressionS exp;
17142 fixS *fixp;
17143 int reloc_type;
17144 int pc_rel;
17145 int opcode;
17146
17147 buf = fragp->fr_literal + fragp->fr_fix;
17148
17149 old_op = bfd_get_16(abfd, buf);
17150 if (fragp->fr_symbol)
17151 {
17152 exp.X_op = O_symbol;
17153 exp.X_add_symbol = fragp->fr_symbol;
17154 }
17155 else
17156 {
17157 exp.X_op = O_constant;
17158 }
17159 exp.X_add_number = fragp->fr_offset;
17160 opcode = fragp->fr_subtype;
17161 switch (opcode)
17162 {
17163 case T_MNEM_ldr_pc:
17164 case T_MNEM_ldr_pc2:
17165 case T_MNEM_ldr_sp:
17166 case T_MNEM_str_sp:
17167 case T_MNEM_ldr:
17168 case T_MNEM_ldrb:
17169 case T_MNEM_ldrh:
17170 case T_MNEM_str:
17171 case T_MNEM_strb:
17172 case T_MNEM_strh:
17173 if (fragp->fr_var == 4)
17174 {
17175 insn = THUMB_OP32 (opcode);
17176 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
17177 {
17178 insn |= (old_op & 0x700) << 4;
17179 }
17180 else
17181 {
17182 insn |= (old_op & 7) << 12;
17183 insn |= (old_op & 0x38) << 13;
17184 }
17185 insn |= 0x00000c00;
17186 put_thumb32_insn (buf, insn);
17187 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
17188 }
17189 else
17190 {
17191 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
17192 }
17193 pc_rel = (opcode == T_MNEM_ldr_pc2);
17194 break;
17195 case T_MNEM_adr:
17196 if (fragp->fr_var == 4)
17197 {
17198 insn = THUMB_OP32 (opcode);
17199 insn |= (old_op & 0xf0) << 4;
17200 put_thumb32_insn (buf, insn);
17201 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
17202 }
17203 else
17204 {
17205 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17206 exp.X_add_number -= 4;
17207 }
17208 pc_rel = 1;
17209 break;
17210 case T_MNEM_mov:
17211 case T_MNEM_movs:
17212 case T_MNEM_cmp:
17213 case T_MNEM_cmn:
17214 if (fragp->fr_var == 4)
17215 {
17216 int r0off = (opcode == T_MNEM_mov
17217 || opcode == T_MNEM_movs) ? 0 : 8;
17218 insn = THUMB_OP32 (opcode);
17219 insn = (insn & 0xe1ffffff) | 0x10000000;
17220 insn |= (old_op & 0x700) << r0off;
17221 put_thumb32_insn (buf, insn);
17222 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
17223 }
17224 else
17225 {
17226 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
17227 }
17228 pc_rel = 0;
17229 break;
17230 case T_MNEM_b:
17231 if (fragp->fr_var == 4)
17232 {
17233 insn = THUMB_OP32(opcode);
17234 put_thumb32_insn (buf, insn);
17235 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
17236 }
17237 else
17238 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
17239 pc_rel = 1;
17240 break;
17241 case T_MNEM_bcond:
17242 if (fragp->fr_var == 4)
17243 {
17244 insn = THUMB_OP32(opcode);
17245 insn |= (old_op & 0xf00) << 14;
17246 put_thumb32_insn (buf, insn);
17247 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
17248 }
17249 else
17250 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
17251 pc_rel = 1;
17252 break;
17253 case T_MNEM_add_sp:
17254 case T_MNEM_add_pc:
17255 case T_MNEM_inc_sp:
17256 case T_MNEM_dec_sp:
17257 if (fragp->fr_var == 4)
17258 {
17259 /* ??? Choose between add and addw. */
17260 insn = THUMB_OP32 (opcode);
17261 insn |= (old_op & 0xf0) << 4;
17262 put_thumb32_insn (buf, insn);
17263 if (opcode == T_MNEM_add_pc)
17264 reloc_type = BFD_RELOC_ARM_T32_IMM12;
17265 else
17266 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
17267 }
17268 else
17269 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17270 pc_rel = 0;
17271 break;
17272
17273 case T_MNEM_addi:
17274 case T_MNEM_addis:
17275 case T_MNEM_subi:
17276 case T_MNEM_subis:
17277 if (fragp->fr_var == 4)
17278 {
17279 insn = THUMB_OP32 (opcode);
17280 insn |= (old_op & 0xf0) << 4;
17281 insn |= (old_op & 0xf) << 16;
17282 put_thumb32_insn (buf, insn);
17283 if (insn & (1 << 20))
17284 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
17285 else
17286 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
17287 }
17288 else
17289 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
17290 pc_rel = 0;
17291 break;
17292 default:
17293 abort ();
17294 }
17295 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
17296 reloc_type);
17297 fixp->fx_file = fragp->fr_file;
17298 fixp->fx_line = fragp->fr_line;
17299 fragp->fr_fix += fragp->fr_var;
17300 }
17301
17302 /* Return the size of a relaxable immediate operand instruction.
17303 SHIFT and SIZE specify the form of the allowable immediate. */
17304 static int
17305 relax_immediate (fragS *fragp, int size, int shift)
17306 {
17307 offsetT offset;
17308 offsetT mask;
17309 offsetT low;
17310
17311 /* ??? Should be able to do better than this. */
17312 if (fragp->fr_symbol)
17313 return 4;
17314
17315 low = (1 << shift) - 1;
17316 mask = (1 << (shift + size)) - (1 << shift);
17317 offset = fragp->fr_offset;
17318 /* Force misaligned offsets to 32-bit variant. */
17319 if (offset & low)
17320 return 4;
17321 if (offset & ~mask)
17322 return 4;
17323 return 2;
17324 }
17325
17326 /* Get the address of a symbol during relaxation. */
17327 static addressT
17328 relaxed_symbol_addr (fragS *fragp, long stretch)
17329 {
17330 fragS *sym_frag;
17331 addressT addr;
17332 symbolS *sym;
17333
17334 sym = fragp->fr_symbol;
17335 sym_frag = symbol_get_frag (sym);
17336 know (S_GET_SEGMENT (sym) != absolute_section
17337 || sym_frag == &zero_address_frag);
17338 addr = S_GET_VALUE (sym) + fragp->fr_offset;
17339
17340 /* If frag has yet to be reached on this pass, assume it will
17341 move by STRETCH just as we did. If this is not so, it will
17342 be because some frag between grows, and that will force
17343 another pass. */
17344
17345 if (stretch != 0
17346 && sym_frag->relax_marker != fragp->relax_marker)
17347 {
17348 fragS *f;
17349
17350 /* Adjust stretch for any alignment frag. Note that if have
17351 been expanding the earlier code, the symbol may be
17352 defined in what appears to be an earlier frag. FIXME:
17353 This doesn't handle the fr_subtype field, which specifies
17354 a maximum number of bytes to skip when doing an
17355 alignment. */
17356 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
17357 {
17358 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
17359 {
17360 if (stretch < 0)
17361 stretch = - ((- stretch)
17362 & ~ ((1 << (int) f->fr_offset) - 1));
17363 else
17364 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
17365 if (stretch == 0)
17366 break;
17367 }
17368 }
17369 if (f != NULL)
17370 addr += stretch;
17371 }
17372
17373 return addr;
17374 }
17375
17376 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
17377 load. */
17378 static int
17379 relax_adr (fragS *fragp, asection *sec, long stretch)
17380 {
17381 addressT addr;
17382 offsetT val;
17383
17384 /* Assume worst case for symbols not known to be in the same section. */
17385 if (!S_IS_DEFINED (fragp->fr_symbol)
17386 || sec != S_GET_SEGMENT (fragp->fr_symbol))
17387 return 4;
17388
17389 val = relaxed_symbol_addr (fragp, stretch);
17390 addr = fragp->fr_address + fragp->fr_fix;
17391 addr = (addr + 4) & ~3;
17392 /* Force misaligned targets to 32-bit variant. */
17393 if (val & 3)
17394 return 4;
17395 val -= addr;
17396 if (val < 0 || val > 1020)
17397 return 4;
17398 return 2;
17399 }
17400
17401 /* Return the size of a relaxable add/sub immediate instruction. */
17402 static int
17403 relax_addsub (fragS *fragp, asection *sec)
17404 {
17405 char *buf;
17406 int op;
17407
17408 buf = fragp->fr_literal + fragp->fr_fix;
17409 op = bfd_get_16(sec->owner, buf);
17410 if ((op & 0xf) == ((op >> 4) & 0xf))
17411 return relax_immediate (fragp, 8, 0);
17412 else
17413 return relax_immediate (fragp, 3, 0);
17414 }
17415
17416
17417 /* Return the size of a relaxable branch instruction. BITS is the
17418 size of the offset field in the narrow instruction. */
17419
17420 static int
17421 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
17422 {
17423 addressT addr;
17424 offsetT val;
17425 offsetT limit;
17426
17427 /* Assume worst case for symbols not known to be in the same section. */
17428 if (!S_IS_DEFINED (fragp->fr_symbol)
17429 || sec != S_GET_SEGMENT (fragp->fr_symbol))
17430 return 4;
17431
17432 val = relaxed_symbol_addr (fragp, stretch);
17433 addr = fragp->fr_address + fragp->fr_fix + 4;
17434 val -= addr;
17435
17436 /* Offset is a signed value *2 */
17437 limit = 1 << bits;
17438 if (val >= limit || val < -limit)
17439 return 4;
17440 return 2;
17441 }
17442
17443
17444 /* Relax a machine dependent frag. This returns the amount by which
17445 the current size of the frag should change. */
17446
17447 int
17448 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
17449 {
17450 int oldsize;
17451 int newsize;
17452
17453 oldsize = fragp->fr_var;
17454 switch (fragp->fr_subtype)
17455 {
17456 case T_MNEM_ldr_pc2:
17457 newsize = relax_adr (fragp, sec, stretch);
17458 break;
17459 case T_MNEM_ldr_pc:
17460 case T_MNEM_ldr_sp:
17461 case T_MNEM_str_sp:
17462 newsize = relax_immediate (fragp, 8, 2);
17463 break;
17464 case T_MNEM_ldr:
17465 case T_MNEM_str:
17466 newsize = relax_immediate (fragp, 5, 2);
17467 break;
17468 case T_MNEM_ldrh:
17469 case T_MNEM_strh:
17470 newsize = relax_immediate (fragp, 5, 1);
17471 break;
17472 case T_MNEM_ldrb:
17473 case T_MNEM_strb:
17474 newsize = relax_immediate (fragp, 5, 0);
17475 break;
17476 case T_MNEM_adr:
17477 newsize = relax_adr (fragp, sec, stretch);
17478 break;
17479 case T_MNEM_mov:
17480 case T_MNEM_movs:
17481 case T_MNEM_cmp:
17482 case T_MNEM_cmn:
17483 newsize = relax_immediate (fragp, 8, 0);
17484 break;
17485 case T_MNEM_b:
17486 newsize = relax_branch (fragp, sec, 11, stretch);
17487 break;
17488 case T_MNEM_bcond:
17489 newsize = relax_branch (fragp, sec, 8, stretch);
17490 break;
17491 case T_MNEM_add_sp:
17492 case T_MNEM_add_pc:
17493 newsize = relax_immediate (fragp, 8, 2);
17494 break;
17495 case T_MNEM_inc_sp:
17496 case T_MNEM_dec_sp:
17497 newsize = relax_immediate (fragp, 7, 2);
17498 break;
17499 case T_MNEM_addi:
17500 case T_MNEM_addis:
17501 case T_MNEM_subi:
17502 case T_MNEM_subis:
17503 newsize = relax_addsub (fragp, sec);
17504 break;
17505 default:
17506 abort ();
17507 }
17508
17509 fragp->fr_var = newsize;
17510 /* Freeze wide instructions that are at or before the same location as
17511 in the previous pass. This avoids infinite loops.
17512 Don't freeze them unconditionally because targets may be artificially
17513 misaligned by the expansion of preceding frags. */
17514 if (stretch <= 0 && newsize > 2)
17515 {
17516 md_convert_frag (sec->owner, sec, fragp);
17517 frag_wane (fragp);
17518 }
17519
17520 return newsize - oldsize;
17521 }
17522
17523 /* Round up a section size to the appropriate boundary. */
17524
17525 valueT
17526 md_section_align (segT segment ATTRIBUTE_UNUSED,
17527 valueT size)
17528 {
17529 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
17530 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
17531 {
17532 /* For a.out, force the section size to be aligned. If we don't do
17533 this, BFD will align it for us, but it will not write out the
17534 final bytes of the section. This may be a bug in BFD, but it is
17535 easier to fix it here since that is how the other a.out targets
17536 work. */
17537 int align;
17538
17539 align = bfd_get_section_alignment (stdoutput, segment);
17540 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
17541 }
17542 #endif
17543
17544 return size;
17545 }
17546
17547 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
17548 of an rs_align_code fragment. */
17549
17550 void
17551 arm_handle_align (fragS * fragP)
17552 {
17553 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
17554 static char const thumb_noop[2] = { 0xc0, 0x46 };
17555 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
17556 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
17557
17558 int bytes, fix, noop_size;
17559 char * p;
17560 const char * noop;
17561
17562 if (fragP->fr_type != rs_align_code)
17563 return;
17564
17565 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
17566 p = fragP->fr_literal + fragP->fr_fix;
17567 fix = 0;
17568
17569 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
17570 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
17571
17572 if (fragP->tc_frag_data)
17573 {
17574 if (target_big_endian)
17575 noop = thumb_bigend_noop;
17576 else
17577 noop = thumb_noop;
17578 noop_size = sizeof (thumb_noop);
17579 }
17580 else
17581 {
17582 if (target_big_endian)
17583 noop = arm_bigend_noop;
17584 else
17585 noop = arm_noop;
17586 noop_size = sizeof (arm_noop);
17587 }
17588
17589 if (bytes & (noop_size - 1))
17590 {
17591 fix = bytes & (noop_size - 1);
17592 memset (p, 0, fix);
17593 p += fix;
17594 bytes -= fix;
17595 }
17596
17597 while (bytes >= noop_size)
17598 {
17599 memcpy (p, noop, noop_size);
17600 p += noop_size;
17601 bytes -= noop_size;
17602 fix += noop_size;
17603 }
17604
17605 fragP->fr_fix += fix;
17606 fragP->fr_var = noop_size;
17607 }
17608
17609 /* Called from md_do_align. Used to create an alignment
17610 frag in a code section. */
17611
17612 void
17613 arm_frag_align_code (int n, int max)
17614 {
17615 char * p;
17616
17617 /* We assume that there will never be a requirement
17618 to support alignments greater than 32 bytes. */
17619 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
17620 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17621
17622 p = frag_var (rs_align_code,
17623 MAX_MEM_FOR_RS_ALIGN_CODE,
17624 1,
17625 (relax_substateT) max,
17626 (symbolS *) NULL,
17627 (offsetT) n,
17628 (char *) NULL);
17629 *p = 0;
17630 }
17631
17632 /* Perform target specific initialisation of a frag. */
17633
17634 void
17635 arm_init_frag (fragS * fragP)
17636 {
17637 /* Record whether this frag is in an ARM or a THUMB area. */
17638 fragP->tc_frag_data = thumb_mode;
17639 }
17640
17641 #ifdef OBJ_ELF
17642 /* When we change sections we need to issue a new mapping symbol. */
17643
17644 void
17645 arm_elf_change_section (void)
17646 {
17647 flagword flags;
17648 segment_info_type *seginfo;
17649
17650 /* Link an unlinked unwind index table section to the .text section. */
17651 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
17652 && elf_linked_to_section (now_seg) == NULL)
17653 elf_linked_to_section (now_seg) = text_section;
17654
17655 if (!SEG_NORMAL (now_seg))
17656 return;
17657
17658 flags = bfd_get_section_flags (stdoutput, now_seg);
17659
17660 /* We can ignore sections that only contain debug info. */
17661 if ((flags & SEC_ALLOC) == 0)
17662 return;
17663
17664 seginfo = seg_info (now_seg);
17665 mapstate = seginfo->tc_segment_info_data.mapstate;
17666 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
17667 }
17668
17669 int
17670 arm_elf_section_type (const char * str, size_t len)
17671 {
17672 if (len == 5 && strncmp (str, "exidx", 5) == 0)
17673 return SHT_ARM_EXIDX;
17674
17675 return -1;
17676 }
17677 \f
17678 /* Code to deal with unwinding tables. */
17679
17680 static void add_unwind_adjustsp (offsetT);
17681
17682 /* Generate any deferred unwind frame offset. */
17683
17684 static void
17685 flush_pending_unwind (void)
17686 {
17687 offsetT offset;
17688
17689 offset = unwind.pending_offset;
17690 unwind.pending_offset = 0;
17691 if (offset != 0)
17692 add_unwind_adjustsp (offset);
17693 }
17694
17695 /* Add an opcode to this list for this function. Two-byte opcodes should
17696 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17697 order. */
17698
17699 static void
17700 add_unwind_opcode (valueT op, int length)
17701 {
17702 /* Add any deferred stack adjustment. */
17703 if (unwind.pending_offset)
17704 flush_pending_unwind ();
17705
17706 unwind.sp_restored = 0;
17707
17708 if (unwind.opcode_count + length > unwind.opcode_alloc)
17709 {
17710 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17711 if (unwind.opcodes)
17712 unwind.opcodes = xrealloc (unwind.opcodes,
17713 unwind.opcode_alloc);
17714 else
17715 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17716 }
17717 while (length > 0)
17718 {
17719 length--;
17720 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17721 op >>= 8;
17722 unwind.opcode_count++;
17723 }
17724 }
17725
17726 /* Add unwind opcodes to adjust the stack pointer. */
17727
17728 static void
17729 add_unwind_adjustsp (offsetT offset)
17730 {
17731 valueT op;
17732
17733 if (offset > 0x200)
17734 {
17735 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17736 char bytes[5];
17737 int n;
17738 valueT o;
17739
17740 /* Long form: 0xb2, uleb128. */
17741 /* This might not fit in a word so add the individual bytes,
17742 remembering the list is built in reverse order. */
17743 o = (valueT) ((offset - 0x204) >> 2);
17744 if (o == 0)
17745 add_unwind_opcode (0, 1);
17746
17747 /* Calculate the uleb128 encoding of the offset. */
17748 n = 0;
17749 while (o)
17750 {
17751 bytes[n] = o & 0x7f;
17752 o >>= 7;
17753 if (o)
17754 bytes[n] |= 0x80;
17755 n++;
17756 }
17757 /* Add the insn. */
17758 for (; n; n--)
17759 add_unwind_opcode (bytes[n - 1], 1);
17760 add_unwind_opcode (0xb2, 1);
17761 }
17762 else if (offset > 0x100)
17763 {
17764 /* Two short opcodes. */
17765 add_unwind_opcode (0x3f, 1);
17766 op = (offset - 0x104) >> 2;
17767 add_unwind_opcode (op, 1);
17768 }
17769 else if (offset > 0)
17770 {
17771 /* Short opcode. */
17772 op = (offset - 4) >> 2;
17773 add_unwind_opcode (op, 1);
17774 }
17775 else if (offset < 0)
17776 {
17777 offset = -offset;
17778 while (offset > 0x100)
17779 {
17780 add_unwind_opcode (0x7f, 1);
17781 offset -= 0x100;
17782 }
17783 op = ((offset - 4) >> 2) | 0x40;
17784 add_unwind_opcode (op, 1);
17785 }
17786 }
17787
17788 /* Finish the list of unwind opcodes for this function. */
17789 static void
17790 finish_unwind_opcodes (void)
17791 {
17792 valueT op;
17793
17794 if (unwind.fp_used)
17795 {
17796 /* Adjust sp as necessary. */
17797 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17798 flush_pending_unwind ();
17799
17800 /* After restoring sp from the frame pointer. */
17801 op = 0x90 | unwind.fp_reg;
17802 add_unwind_opcode (op, 1);
17803 }
17804 else
17805 flush_pending_unwind ();
17806 }
17807
17808
17809 /* Start an exception table entry. If idx is nonzero this is an index table
17810 entry. */
17811
17812 static void
17813 start_unwind_section (const segT text_seg, int idx)
17814 {
17815 const char * text_name;
17816 const char * prefix;
17817 const char * prefix_once;
17818 const char * group_name;
17819 size_t prefix_len;
17820 size_t text_len;
17821 char * sec_name;
17822 size_t sec_name_len;
17823 int type;
17824 int flags;
17825 int linkonce;
17826
17827 if (idx)
17828 {
17829 prefix = ELF_STRING_ARM_unwind;
17830 prefix_once = ELF_STRING_ARM_unwind_once;
17831 type = SHT_ARM_EXIDX;
17832 }
17833 else
17834 {
17835 prefix = ELF_STRING_ARM_unwind_info;
17836 prefix_once = ELF_STRING_ARM_unwind_info_once;
17837 type = SHT_PROGBITS;
17838 }
17839
17840 text_name = segment_name (text_seg);
17841 if (streq (text_name, ".text"))
17842 text_name = "";
17843
17844 if (strncmp (text_name, ".gnu.linkonce.t.",
17845 strlen (".gnu.linkonce.t.")) == 0)
17846 {
17847 prefix = prefix_once;
17848 text_name += strlen (".gnu.linkonce.t.");
17849 }
17850
17851 prefix_len = strlen (prefix);
17852 text_len = strlen (text_name);
17853 sec_name_len = prefix_len + text_len;
17854 sec_name = xmalloc (sec_name_len + 1);
17855 memcpy (sec_name, prefix, prefix_len);
17856 memcpy (sec_name + prefix_len, text_name, text_len);
17857 sec_name[prefix_len + text_len] = '\0';
17858
17859 flags = SHF_ALLOC;
17860 linkonce = 0;
17861 group_name = 0;
17862
17863 /* Handle COMDAT group. */
17864 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17865 {
17866 group_name = elf_group_name (text_seg);
17867 if (group_name == NULL)
17868 {
17869 as_bad (_("Group section `%s' has no group signature"),
17870 segment_name (text_seg));
17871 ignore_rest_of_line ();
17872 return;
17873 }
17874 flags |= SHF_GROUP;
17875 linkonce = 1;
17876 }
17877
17878 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17879
17880 /* Set the section link for index tables. */
17881 if (idx)
17882 elf_linked_to_section (now_seg) = text_seg;
17883 }
17884
17885
17886 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17887 personality routine data. Returns zero, or the index table value for
17888 and inline entry. */
17889
17890 static valueT
17891 create_unwind_entry (int have_data)
17892 {
17893 int size;
17894 addressT where;
17895 char *ptr;
17896 /* The current word of data. */
17897 valueT data;
17898 /* The number of bytes left in this word. */
17899 int n;
17900
17901 finish_unwind_opcodes ();
17902
17903 /* Remember the current text section. */
17904 unwind.saved_seg = now_seg;
17905 unwind.saved_subseg = now_subseg;
17906
17907 start_unwind_section (now_seg, 0);
17908
17909 if (unwind.personality_routine == NULL)
17910 {
17911 if (unwind.personality_index == -2)
17912 {
17913 if (have_data)
17914 as_bad (_("handlerdata in cantunwind frame"));
17915 return 1; /* EXIDX_CANTUNWIND. */
17916 }
17917
17918 /* Use a default personality routine if none is specified. */
17919 if (unwind.personality_index == -1)
17920 {
17921 if (unwind.opcode_count > 3)
17922 unwind.personality_index = 1;
17923 else
17924 unwind.personality_index = 0;
17925 }
17926
17927 /* Space for the personality routine entry. */
17928 if (unwind.personality_index == 0)
17929 {
17930 if (unwind.opcode_count > 3)
17931 as_bad (_("too many unwind opcodes for personality routine 0"));
17932
17933 if (!have_data)
17934 {
17935 /* All the data is inline in the index table. */
17936 data = 0x80;
17937 n = 3;
17938 while (unwind.opcode_count > 0)
17939 {
17940 unwind.opcode_count--;
17941 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17942 n--;
17943 }
17944
17945 /* Pad with "finish" opcodes. */
17946 while (n--)
17947 data = (data << 8) | 0xb0;
17948
17949 return data;
17950 }
17951 size = 0;
17952 }
17953 else
17954 /* We get two opcodes "free" in the first word. */
17955 size = unwind.opcode_count - 2;
17956 }
17957 else
17958 /* An extra byte is required for the opcode count. */
17959 size = unwind.opcode_count + 1;
17960
17961 size = (size + 3) >> 2;
17962 if (size > 0xff)
17963 as_bad (_("too many unwind opcodes"));
17964
17965 frag_align (2, 0, 0);
17966 record_alignment (now_seg, 2);
17967 unwind.table_entry = expr_build_dot ();
17968
17969 /* Allocate the table entry. */
17970 ptr = frag_more ((size << 2) + 4);
17971 where = frag_now_fix () - ((size << 2) + 4);
17972
17973 switch (unwind.personality_index)
17974 {
17975 case -1:
17976 /* ??? Should this be a PLT generating relocation? */
17977 /* Custom personality routine. */
17978 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17979 BFD_RELOC_ARM_PREL31);
17980
17981 where += 4;
17982 ptr += 4;
17983
17984 /* Set the first byte to the number of additional words. */
17985 data = size - 1;
17986 n = 3;
17987 break;
17988
17989 /* ABI defined personality routines. */
17990 case 0:
17991 /* Three opcodes bytes are packed into the first word. */
17992 data = 0x80;
17993 n = 3;
17994 break;
17995
17996 case 1:
17997 case 2:
17998 /* The size and first two opcode bytes go in the first word. */
17999 data = ((0x80 + unwind.personality_index) << 8) | size;
18000 n = 2;
18001 break;
18002
18003 default:
18004 /* Should never happen. */
18005 abort ();
18006 }
18007
18008 /* Pack the opcodes into words (MSB first), reversing the list at the same
18009 time. */
18010 while (unwind.opcode_count > 0)
18011 {
18012 if (n == 0)
18013 {
18014 md_number_to_chars (ptr, data, 4);
18015 ptr += 4;
18016 n = 4;
18017 data = 0;
18018 }
18019 unwind.opcode_count--;
18020 n--;
18021 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
18022 }
18023
18024 /* Finish off the last word. */
18025 if (n < 4)
18026 {
18027 /* Pad with "finish" opcodes. */
18028 while (n--)
18029 data = (data << 8) | 0xb0;
18030
18031 md_number_to_chars (ptr, data, 4);
18032 }
18033
18034 if (!have_data)
18035 {
18036 /* Add an empty descriptor if there is no user-specified data. */
18037 ptr = frag_more (4);
18038 md_number_to_chars (ptr, 0, 4);
18039 }
18040
18041 return 0;
18042 }
18043
18044
18045 /* Initialize the DWARF-2 unwind information for this procedure. */
18046
18047 void
18048 tc_arm_frame_initial_instructions (void)
18049 {
18050 cfi_add_CFA_def_cfa (REG_SP, 0);
18051 }
18052 #endif /* OBJ_ELF */
18053
18054 /* Convert REGNAME to a DWARF-2 register number. */
18055
18056 int
18057 tc_arm_regname_to_dw2regnum (char *regname)
18058 {
18059 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
18060
18061 if (reg == FAIL)
18062 return -1;
18063
18064 return reg;
18065 }
18066
18067 #ifdef TE_PE
18068 void
18069 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
18070 {
18071 expressionS expr;
18072
18073 expr.X_op = O_secrel;
18074 expr.X_add_symbol = symbol;
18075 expr.X_add_number = 0;
18076 emit_expr (&expr, size);
18077 }
18078 #endif
18079
18080 /* MD interface: Symbol and relocation handling. */
18081
18082 /* Return the address within the segment that a PC-relative fixup is
18083 relative to. For ARM, PC-relative fixups applied to instructions
18084 are generally relative to the location of the fixup plus 8 bytes.
18085 Thumb branches are offset by 4, and Thumb loads relative to PC
18086 require special handling. */
18087
18088 long
18089 md_pcrel_from_section (fixS * fixP, segT seg)
18090 {
18091 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
18092
18093 /* If this is pc-relative and we are going to emit a relocation
18094 then we just want to put out any pipeline compensation that the linker
18095 will need. Otherwise we want to use the calculated base.
18096 For WinCE we skip the bias for externals as well, since this
18097 is how the MS ARM-CE assembler behaves and we want to be compatible. */
18098 if (fixP->fx_pcrel
18099 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
18100 || (arm_force_relocation (fixP)
18101 #ifdef TE_WINCE
18102 && !S_IS_EXTERNAL (fixP->fx_addsy)
18103 #endif
18104 )))
18105 base = 0;
18106
18107 switch (fixP->fx_r_type)
18108 {
18109 /* PC relative addressing on the Thumb is slightly odd as the
18110 bottom two bits of the PC are forced to zero for the
18111 calculation. This happens *after* application of the
18112 pipeline offset. However, Thumb adrl already adjusts for
18113 this, so we need not do it again. */
18114 case BFD_RELOC_ARM_THUMB_ADD:
18115 return base & ~3;
18116
18117 case BFD_RELOC_ARM_THUMB_OFFSET:
18118 case BFD_RELOC_ARM_T32_OFFSET_IMM:
18119 case BFD_RELOC_ARM_T32_ADD_PC12:
18120 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18121 return (base + 4) & ~3;
18122
18123 /* Thumb branches are simply offset by +4. */
18124 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18125 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18126 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18127 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18128 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18129 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18130 case BFD_RELOC_THUMB_PCREL_BLX:
18131 return base + 4;
18132
18133 /* ARM mode branches are offset by +8. However, the Windows CE
18134 loader expects the relocation not to take this into account. */
18135 case BFD_RELOC_ARM_PCREL_BRANCH:
18136 case BFD_RELOC_ARM_PCREL_CALL:
18137 case BFD_RELOC_ARM_PCREL_JUMP:
18138 case BFD_RELOC_ARM_PCREL_BLX:
18139 case BFD_RELOC_ARM_PLT32:
18140 #ifdef TE_WINCE
18141 /* When handling fixups immediately, because we have already
18142 discovered the value of a symbol, or the address of the frag involved
18143 we must account for the offset by +8, as the OS loader will never see the reloc.
18144 see fixup_segment() in write.c
18145 The S_IS_EXTERNAL test handles the case of global symbols.
18146 Those need the calculated base, not just the pipe compensation the linker will need. */
18147 if (fixP->fx_pcrel
18148 && fixP->fx_addsy != NULL
18149 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
18150 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
18151 return base + 8;
18152 return base;
18153 #else
18154 return base + 8;
18155 #endif
18156
18157 /* ARM mode loads relative to PC are also offset by +8. Unlike
18158 branches, the Windows CE loader *does* expect the relocation
18159 to take this into account. */
18160 case BFD_RELOC_ARM_OFFSET_IMM:
18161 case BFD_RELOC_ARM_OFFSET_IMM8:
18162 case BFD_RELOC_ARM_HWLITERAL:
18163 case BFD_RELOC_ARM_LITERAL:
18164 case BFD_RELOC_ARM_CP_OFF_IMM:
18165 return base + 8;
18166
18167
18168 /* Other PC-relative relocations are un-offset. */
18169 default:
18170 return base;
18171 }
18172 }
18173
18174 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
18175 Otherwise we have no need to default values of symbols. */
18176
18177 symbolS *
18178 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
18179 {
18180 #ifdef OBJ_ELF
18181 if (name[0] == '_' && name[1] == 'G'
18182 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
18183 {
18184 if (!GOT_symbol)
18185 {
18186 if (symbol_find (name))
18187 as_bad (_("GOT already in the symbol table"));
18188
18189 GOT_symbol = symbol_new (name, undefined_section,
18190 (valueT) 0, & zero_address_frag);
18191 }
18192
18193 return GOT_symbol;
18194 }
18195 #endif
18196
18197 return 0;
18198 }
18199
18200 /* Subroutine of md_apply_fix. Check to see if an immediate can be
18201 computed as two separate immediate values, added together. We
18202 already know that this value cannot be computed by just one ARM
18203 instruction. */
18204
18205 static unsigned int
18206 validate_immediate_twopart (unsigned int val,
18207 unsigned int * highpart)
18208 {
18209 unsigned int a;
18210 unsigned int i;
18211
18212 for (i = 0; i < 32; i += 2)
18213 if (((a = rotate_left (val, i)) & 0xff) != 0)
18214 {
18215 if (a & 0xff00)
18216 {
18217 if (a & ~ 0xffff)
18218 continue;
18219 * highpart = (a >> 8) | ((i + 24) << 7);
18220 }
18221 else if (a & 0xff0000)
18222 {
18223 if (a & 0xff000000)
18224 continue;
18225 * highpart = (a >> 16) | ((i + 16) << 7);
18226 }
18227 else
18228 {
18229 assert (a & 0xff000000);
18230 * highpart = (a >> 24) | ((i + 8) << 7);
18231 }
18232
18233 return (a & 0xff) | (i << 7);
18234 }
18235
18236 return FAIL;
18237 }
18238
18239 static int
18240 validate_offset_imm (unsigned int val, int hwse)
18241 {
18242 if ((hwse && val > 255) || val > 4095)
18243 return FAIL;
18244 return val;
18245 }
18246
18247 /* Subroutine of md_apply_fix. Do those data_ops which can take a
18248 negative immediate constant by altering the instruction. A bit of
18249 a hack really.
18250 MOV <-> MVN
18251 AND <-> BIC
18252 ADC <-> SBC
18253 by inverting the second operand, and
18254 ADD <-> SUB
18255 CMP <-> CMN
18256 by negating the second operand. */
18257
18258 static int
18259 negate_data_op (unsigned long * instruction,
18260 unsigned long value)
18261 {
18262 int op, new_inst;
18263 unsigned long negated, inverted;
18264
18265 negated = encode_arm_immediate (-value);
18266 inverted = encode_arm_immediate (~value);
18267
18268 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
18269 switch (op)
18270 {
18271 /* First negates. */
18272 case OPCODE_SUB: /* ADD <-> SUB */
18273 new_inst = OPCODE_ADD;
18274 value = negated;
18275 break;
18276
18277 case OPCODE_ADD:
18278 new_inst = OPCODE_SUB;
18279 value = negated;
18280 break;
18281
18282 case OPCODE_CMP: /* CMP <-> CMN */
18283 new_inst = OPCODE_CMN;
18284 value = negated;
18285 break;
18286
18287 case OPCODE_CMN:
18288 new_inst = OPCODE_CMP;
18289 value = negated;
18290 break;
18291
18292 /* Now Inverted ops. */
18293 case OPCODE_MOV: /* MOV <-> MVN */
18294 new_inst = OPCODE_MVN;
18295 value = inverted;
18296 break;
18297
18298 case OPCODE_MVN:
18299 new_inst = OPCODE_MOV;
18300 value = inverted;
18301 break;
18302
18303 case OPCODE_AND: /* AND <-> BIC */
18304 new_inst = OPCODE_BIC;
18305 value = inverted;
18306 break;
18307
18308 case OPCODE_BIC:
18309 new_inst = OPCODE_AND;
18310 value = inverted;
18311 break;
18312
18313 case OPCODE_ADC: /* ADC <-> SBC */
18314 new_inst = OPCODE_SBC;
18315 value = inverted;
18316 break;
18317
18318 case OPCODE_SBC:
18319 new_inst = OPCODE_ADC;
18320 value = inverted;
18321 break;
18322
18323 /* We cannot do anything. */
18324 default:
18325 return FAIL;
18326 }
18327
18328 if (value == (unsigned) FAIL)
18329 return FAIL;
18330
18331 *instruction &= OPCODE_MASK;
18332 *instruction |= new_inst << DATA_OP_SHIFT;
18333 return value;
18334 }
18335
18336 /* Like negate_data_op, but for Thumb-2. */
18337
18338 static unsigned int
18339 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
18340 {
18341 int op, new_inst;
18342 int rd;
18343 unsigned int negated, inverted;
18344
18345 negated = encode_thumb32_immediate (-value);
18346 inverted = encode_thumb32_immediate (~value);
18347
18348 rd = (*instruction >> 8) & 0xf;
18349 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
18350 switch (op)
18351 {
18352 /* ADD <-> SUB. Includes CMP <-> CMN. */
18353 case T2_OPCODE_SUB:
18354 new_inst = T2_OPCODE_ADD;
18355 value = negated;
18356 break;
18357
18358 case T2_OPCODE_ADD:
18359 new_inst = T2_OPCODE_SUB;
18360 value = negated;
18361 break;
18362
18363 /* ORR <-> ORN. Includes MOV <-> MVN. */
18364 case T2_OPCODE_ORR:
18365 new_inst = T2_OPCODE_ORN;
18366 value = inverted;
18367 break;
18368
18369 case T2_OPCODE_ORN:
18370 new_inst = T2_OPCODE_ORR;
18371 value = inverted;
18372 break;
18373
18374 /* AND <-> BIC. TST has no inverted equivalent. */
18375 case T2_OPCODE_AND:
18376 new_inst = T2_OPCODE_BIC;
18377 if (rd == 15)
18378 value = FAIL;
18379 else
18380 value = inverted;
18381 break;
18382
18383 case T2_OPCODE_BIC:
18384 new_inst = T2_OPCODE_AND;
18385 value = inverted;
18386 break;
18387
18388 /* ADC <-> SBC */
18389 case T2_OPCODE_ADC:
18390 new_inst = T2_OPCODE_SBC;
18391 value = inverted;
18392 break;
18393
18394 case T2_OPCODE_SBC:
18395 new_inst = T2_OPCODE_ADC;
18396 value = inverted;
18397 break;
18398
18399 /* We cannot do anything. */
18400 default:
18401 return FAIL;
18402 }
18403
18404 if (value == (unsigned int)FAIL)
18405 return FAIL;
18406
18407 *instruction &= T2_OPCODE_MASK;
18408 *instruction |= new_inst << T2_DATA_OP_SHIFT;
18409 return value;
18410 }
18411
18412 /* Read a 32-bit thumb instruction from buf. */
18413 static unsigned long
18414 get_thumb32_insn (char * buf)
18415 {
18416 unsigned long insn;
18417 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
18418 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18419
18420 return insn;
18421 }
18422
18423
18424 /* We usually want to set the low bit on the address of thumb function
18425 symbols. In particular .word foo - . should have the low bit set.
18426 Generic code tries to fold the difference of two symbols to
18427 a constant. Prevent this and force a relocation when the first symbols
18428 is a thumb function. */
18429 int
18430 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
18431 {
18432 if (op == O_subtract
18433 && l->X_op == O_symbol
18434 && r->X_op == O_symbol
18435 && THUMB_IS_FUNC (l->X_add_symbol))
18436 {
18437 l->X_op = O_subtract;
18438 l->X_op_symbol = r->X_add_symbol;
18439 l->X_add_number -= r->X_add_number;
18440 return 1;
18441 }
18442 /* Process as normal. */
18443 return 0;
18444 }
18445
18446 void
18447 md_apply_fix (fixS * fixP,
18448 valueT * valP,
18449 segT seg)
18450 {
18451 offsetT value = * valP;
18452 offsetT newval;
18453 unsigned int newimm;
18454 unsigned long temp;
18455 int sign;
18456 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
18457
18458 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
18459
18460 /* Note whether this will delete the relocation. */
18461
18462 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
18463 fixP->fx_done = 1;
18464
18465 /* On a 64-bit host, silently truncate 'value' to 32 bits for
18466 consistency with the behaviour on 32-bit hosts. Remember value
18467 for emit_reloc. */
18468 value &= 0xffffffff;
18469 value ^= 0x80000000;
18470 value -= 0x80000000;
18471
18472 *valP = value;
18473 fixP->fx_addnumber = value;
18474
18475 /* Same treatment for fixP->fx_offset. */
18476 fixP->fx_offset &= 0xffffffff;
18477 fixP->fx_offset ^= 0x80000000;
18478 fixP->fx_offset -= 0x80000000;
18479
18480 switch (fixP->fx_r_type)
18481 {
18482 case BFD_RELOC_NONE:
18483 /* This will need to go in the object file. */
18484 fixP->fx_done = 0;
18485 break;
18486
18487 case BFD_RELOC_ARM_IMMEDIATE:
18488 /* We claim that this fixup has been processed here,
18489 even if in fact we generate an error because we do
18490 not have a reloc for it, so tc_gen_reloc will reject it. */
18491 fixP->fx_done = 1;
18492
18493 if (fixP->fx_addsy
18494 && ! S_IS_DEFINED (fixP->fx_addsy))
18495 {
18496 as_bad_where (fixP->fx_file, fixP->fx_line,
18497 _("undefined symbol %s used as an immediate value"),
18498 S_GET_NAME (fixP->fx_addsy));
18499 break;
18500 }
18501
18502 newimm = encode_arm_immediate (value);
18503 temp = md_chars_to_number (buf, INSN_SIZE);
18504
18505 /* If the instruction will fail, see if we can fix things up by
18506 changing the opcode. */
18507 if (newimm == (unsigned int) FAIL
18508 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
18509 {
18510 as_bad_where (fixP->fx_file, fixP->fx_line,
18511 _("invalid constant (%lx) after fixup"),
18512 (unsigned long) value);
18513 break;
18514 }
18515
18516 newimm |= (temp & 0xfffff000);
18517 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18518 break;
18519
18520 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18521 {
18522 unsigned int highpart = 0;
18523 unsigned int newinsn = 0xe1a00000; /* nop. */
18524
18525 newimm = encode_arm_immediate (value);
18526 temp = md_chars_to_number (buf, INSN_SIZE);
18527
18528 /* If the instruction will fail, see if we can fix things up by
18529 changing the opcode. */
18530 if (newimm == (unsigned int) FAIL
18531 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
18532 {
18533 /* No ? OK - try using two ADD instructions to generate
18534 the value. */
18535 newimm = validate_immediate_twopart (value, & highpart);
18536
18537 /* Yes - then make sure that the second instruction is
18538 also an add. */
18539 if (newimm != (unsigned int) FAIL)
18540 newinsn = temp;
18541 /* Still No ? Try using a negated value. */
18542 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
18543 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
18544 /* Otherwise - give up. */
18545 else
18546 {
18547 as_bad_where (fixP->fx_file, fixP->fx_line,
18548 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
18549 (long) value);
18550 break;
18551 }
18552
18553 /* Replace the first operand in the 2nd instruction (which
18554 is the PC) with the destination register. We have
18555 already added in the PC in the first instruction and we
18556 do not want to do it again. */
18557 newinsn &= ~ 0xf0000;
18558 newinsn |= ((newinsn & 0x0f000) << 4);
18559 }
18560
18561 newimm |= (temp & 0xfffff000);
18562 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
18563
18564 highpart |= (newinsn & 0xfffff000);
18565 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
18566 }
18567 break;
18568
18569 case BFD_RELOC_ARM_OFFSET_IMM:
18570 if (!fixP->fx_done && seg->use_rela_p)
18571 value = 0;
18572
18573 case BFD_RELOC_ARM_LITERAL:
18574 sign = value >= 0;
18575
18576 if (value < 0)
18577 value = - value;
18578
18579 if (validate_offset_imm (value, 0) == FAIL)
18580 {
18581 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
18582 as_bad_where (fixP->fx_file, fixP->fx_line,
18583 _("invalid literal constant: pool needs to be closer"));
18584 else
18585 as_bad_where (fixP->fx_file, fixP->fx_line,
18586 _("bad immediate value for offset (%ld)"),
18587 (long) value);
18588 break;
18589 }
18590
18591 newval = md_chars_to_number (buf, INSN_SIZE);
18592 newval &= 0xff7ff000;
18593 newval |= value | (sign ? INDEX_UP : 0);
18594 md_number_to_chars (buf, newval, INSN_SIZE);
18595 break;
18596
18597 case BFD_RELOC_ARM_OFFSET_IMM8:
18598 case BFD_RELOC_ARM_HWLITERAL:
18599 sign = value >= 0;
18600
18601 if (value < 0)
18602 value = - value;
18603
18604 if (validate_offset_imm (value, 1) == FAIL)
18605 {
18606 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
18607 as_bad_where (fixP->fx_file, fixP->fx_line,
18608 _("invalid literal constant: pool needs to be closer"));
18609 else
18610 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18611 (long) value);
18612 break;
18613 }
18614
18615 newval = md_chars_to_number (buf, INSN_SIZE);
18616 newval &= 0xff7ff0f0;
18617 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
18618 md_number_to_chars (buf, newval, INSN_SIZE);
18619 break;
18620
18621 case BFD_RELOC_ARM_T32_OFFSET_U8:
18622 if (value < 0 || value > 1020 || value % 4 != 0)
18623 as_bad_where (fixP->fx_file, fixP->fx_line,
18624 _("bad immediate value for offset (%ld)"), (long) value);
18625 value /= 4;
18626
18627 newval = md_chars_to_number (buf+2, THUMB_SIZE);
18628 newval |= value;
18629 md_number_to_chars (buf+2, newval, THUMB_SIZE);
18630 break;
18631
18632 case BFD_RELOC_ARM_T32_OFFSET_IMM:
18633 /* This is a complicated relocation used for all varieties of Thumb32
18634 load/store instruction with immediate offset:
18635
18636 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18637 *4, optional writeback(W)
18638 (doubleword load/store)
18639
18640 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18641 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18642 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18643 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18644 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18645
18646 Uppercase letters indicate bits that are already encoded at
18647 this point. Lowercase letters are our problem. For the
18648 second block of instructions, the secondary opcode nybble
18649 (bits 8..11) is present, and bit 23 is zero, even if this is
18650 a PC-relative operation. */
18651 newval = md_chars_to_number (buf, THUMB_SIZE);
18652 newval <<= 16;
18653 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
18654
18655 if ((newval & 0xf0000000) == 0xe0000000)
18656 {
18657 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18658 if (value >= 0)
18659 newval |= (1 << 23);
18660 else
18661 value = -value;
18662 if (value % 4 != 0)
18663 {
18664 as_bad_where (fixP->fx_file, fixP->fx_line,
18665 _("offset not a multiple of 4"));
18666 break;
18667 }
18668 value /= 4;
18669 if (value > 0xff)
18670 {
18671 as_bad_where (fixP->fx_file, fixP->fx_line,
18672 _("offset out of range"));
18673 break;
18674 }
18675 newval &= ~0xff;
18676 }
18677 else if ((newval & 0x000f0000) == 0x000f0000)
18678 {
18679 /* PC-relative, 12-bit offset. */
18680 if (value >= 0)
18681 newval |= (1 << 23);
18682 else
18683 value = -value;
18684 if (value > 0xfff)
18685 {
18686 as_bad_where (fixP->fx_file, fixP->fx_line,
18687 _("offset out of range"));
18688 break;
18689 }
18690 newval &= ~0xfff;
18691 }
18692 else if ((newval & 0x00000100) == 0x00000100)
18693 {
18694 /* Writeback: 8-bit, +/- offset. */
18695 if (value >= 0)
18696 newval |= (1 << 9);
18697 else
18698 value = -value;
18699 if (value > 0xff)
18700 {
18701 as_bad_where (fixP->fx_file, fixP->fx_line,
18702 _("offset out of range"));
18703 break;
18704 }
18705 newval &= ~0xff;
18706 }
18707 else if ((newval & 0x00000f00) == 0x00000e00)
18708 {
18709 /* T-instruction: positive 8-bit offset. */
18710 if (value < 0 || value > 0xff)
18711 {
18712 as_bad_where (fixP->fx_file, fixP->fx_line,
18713 _("offset out of range"));
18714 break;
18715 }
18716 newval &= ~0xff;
18717 newval |= value;
18718 }
18719 else
18720 {
18721 /* Positive 12-bit or negative 8-bit offset. */
18722 int limit;
18723 if (value >= 0)
18724 {
18725 newval |= (1 << 23);
18726 limit = 0xfff;
18727 }
18728 else
18729 {
18730 value = -value;
18731 limit = 0xff;
18732 }
18733 if (value > limit)
18734 {
18735 as_bad_where (fixP->fx_file, fixP->fx_line,
18736 _("offset out of range"));
18737 break;
18738 }
18739 newval &= ~limit;
18740 }
18741
18742 newval |= value;
18743 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18744 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18745 break;
18746
18747 case BFD_RELOC_ARM_SHIFT_IMM:
18748 newval = md_chars_to_number (buf, INSN_SIZE);
18749 if (((unsigned long) value) > 32
18750 || (value == 32
18751 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18752 {
18753 as_bad_where (fixP->fx_file, fixP->fx_line,
18754 _("shift expression is too large"));
18755 break;
18756 }
18757
18758 if (value == 0)
18759 /* Shifts of zero must be done as lsl. */
18760 newval &= ~0x60;
18761 else if (value == 32)
18762 value = 0;
18763 newval &= 0xfffff07f;
18764 newval |= (value & 0x1f) << 7;
18765 md_number_to_chars (buf, newval, INSN_SIZE);
18766 break;
18767
18768 case BFD_RELOC_ARM_T32_IMMEDIATE:
18769 case BFD_RELOC_ARM_T32_ADD_IMM:
18770 case BFD_RELOC_ARM_T32_IMM12:
18771 case BFD_RELOC_ARM_T32_ADD_PC12:
18772 /* We claim that this fixup has been processed here,
18773 even if in fact we generate an error because we do
18774 not have a reloc for it, so tc_gen_reloc will reject it. */
18775 fixP->fx_done = 1;
18776
18777 if (fixP->fx_addsy
18778 && ! S_IS_DEFINED (fixP->fx_addsy))
18779 {
18780 as_bad_where (fixP->fx_file, fixP->fx_line,
18781 _("undefined symbol %s used as an immediate value"),
18782 S_GET_NAME (fixP->fx_addsy));
18783 break;
18784 }
18785
18786 newval = md_chars_to_number (buf, THUMB_SIZE);
18787 newval <<= 16;
18788 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18789
18790 newimm = FAIL;
18791 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18792 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18793 {
18794 newimm = encode_thumb32_immediate (value);
18795 if (newimm == (unsigned int) FAIL)
18796 newimm = thumb32_negate_data_op (&newval, value);
18797 }
18798 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18799 && newimm == (unsigned int) FAIL)
18800 {
18801 /* Turn add/sum into addw/subw. */
18802 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18803 newval = (newval & 0xfeffffff) | 0x02000000;
18804
18805 /* 12 bit immediate for addw/subw. */
18806 if (value < 0)
18807 {
18808 value = -value;
18809 newval ^= 0x00a00000;
18810 }
18811 if (value > 0xfff)
18812 newimm = (unsigned int) FAIL;
18813 else
18814 newimm = value;
18815 }
18816
18817 if (newimm == (unsigned int)FAIL)
18818 {
18819 as_bad_where (fixP->fx_file, fixP->fx_line,
18820 _("invalid constant (%lx) after fixup"),
18821 (unsigned long) value);
18822 break;
18823 }
18824
18825 newval |= (newimm & 0x800) << 15;
18826 newval |= (newimm & 0x700) << 4;
18827 newval |= (newimm & 0x0ff);
18828
18829 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18830 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18831 break;
18832
18833 case BFD_RELOC_ARM_SMC:
18834 if (((unsigned long) value) > 0xffff)
18835 as_bad_where (fixP->fx_file, fixP->fx_line,
18836 _("invalid smc expression"));
18837 newval = md_chars_to_number (buf, INSN_SIZE);
18838 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18839 md_number_to_chars (buf, newval, INSN_SIZE);
18840 break;
18841
18842 case BFD_RELOC_ARM_SWI:
18843 if (fixP->tc_fix_data != 0)
18844 {
18845 if (((unsigned long) value) > 0xff)
18846 as_bad_where (fixP->fx_file, fixP->fx_line,
18847 _("invalid swi expression"));
18848 newval = md_chars_to_number (buf, THUMB_SIZE);
18849 newval |= value;
18850 md_number_to_chars (buf, newval, THUMB_SIZE);
18851 }
18852 else
18853 {
18854 if (((unsigned long) value) > 0x00ffffff)
18855 as_bad_where (fixP->fx_file, fixP->fx_line,
18856 _("invalid swi expression"));
18857 newval = md_chars_to_number (buf, INSN_SIZE);
18858 newval |= value;
18859 md_number_to_chars (buf, newval, INSN_SIZE);
18860 }
18861 break;
18862
18863 case BFD_RELOC_ARM_MULTI:
18864 if (((unsigned long) value) > 0xffff)
18865 as_bad_where (fixP->fx_file, fixP->fx_line,
18866 _("invalid expression in load/store multiple"));
18867 newval = value | md_chars_to_number (buf, INSN_SIZE);
18868 md_number_to_chars (buf, newval, INSN_SIZE);
18869 break;
18870
18871 #ifdef OBJ_ELF
18872 case BFD_RELOC_ARM_PCREL_CALL:
18873 newval = md_chars_to_number (buf, INSN_SIZE);
18874 if ((newval & 0xf0000000) == 0xf0000000)
18875 temp = 1;
18876 else
18877 temp = 3;
18878 goto arm_branch_common;
18879
18880 case BFD_RELOC_ARM_PCREL_JUMP:
18881 case BFD_RELOC_ARM_PLT32:
18882 #endif
18883 case BFD_RELOC_ARM_PCREL_BRANCH:
18884 temp = 3;
18885 goto arm_branch_common;
18886
18887 case BFD_RELOC_ARM_PCREL_BLX:
18888 temp = 1;
18889 arm_branch_common:
18890 /* We are going to store value (shifted right by two) in the
18891 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18892 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18893 also be be clear. */
18894 if (value & temp)
18895 as_bad_where (fixP->fx_file, fixP->fx_line,
18896 _("misaligned branch destination"));
18897 if ((value & (offsetT)0xfe000000) != (offsetT)0
18898 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18899 as_bad_where (fixP->fx_file, fixP->fx_line,
18900 _("branch out of range"));
18901
18902 if (fixP->fx_done || !seg->use_rela_p)
18903 {
18904 newval = md_chars_to_number (buf, INSN_SIZE);
18905 newval |= (value >> 2) & 0x00ffffff;
18906 /* Set the H bit on BLX instructions. */
18907 if (temp == 1)
18908 {
18909 if (value & 2)
18910 newval |= 0x01000000;
18911 else
18912 newval &= ~0x01000000;
18913 }
18914 md_number_to_chars (buf, newval, INSN_SIZE);
18915 }
18916 break;
18917
18918 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18919 /* CBZ can only branch forward. */
18920
18921 /* Attempts to use CBZ to branch to the next instruction
18922 (which, strictly speaking, are prohibited) will be turned into
18923 no-ops.
18924
18925 FIXME: It may be better to remove the instruction completely and
18926 perform relaxation. */
18927 if (value == -2)
18928 {
18929 newval = md_chars_to_number (buf, THUMB_SIZE);
18930 newval = 0xbf00; /* NOP encoding T1 */
18931 md_number_to_chars (buf, newval, THUMB_SIZE);
18932 }
18933 else
18934 {
18935 if (value & ~0x7e)
18936 as_bad_where (fixP->fx_file, fixP->fx_line,
18937 _("branch out of range"));
18938
18939 if (fixP->fx_done || !seg->use_rela_p)
18940 {
18941 newval = md_chars_to_number (buf, THUMB_SIZE);
18942 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18943 md_number_to_chars (buf, newval, THUMB_SIZE);
18944 }
18945 }
18946 break;
18947
18948 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18949 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18950 as_bad_where (fixP->fx_file, fixP->fx_line,
18951 _("branch out of range"));
18952
18953 if (fixP->fx_done || !seg->use_rela_p)
18954 {
18955 newval = md_chars_to_number (buf, THUMB_SIZE);
18956 newval |= (value & 0x1ff) >> 1;
18957 md_number_to_chars (buf, newval, THUMB_SIZE);
18958 }
18959 break;
18960
18961 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18962 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18963 as_bad_where (fixP->fx_file, fixP->fx_line,
18964 _("branch out of range"));
18965
18966 if (fixP->fx_done || !seg->use_rela_p)
18967 {
18968 newval = md_chars_to_number (buf, THUMB_SIZE);
18969 newval |= (value & 0xfff) >> 1;
18970 md_number_to_chars (buf, newval, THUMB_SIZE);
18971 }
18972 break;
18973
18974 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18975 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18976 as_bad_where (fixP->fx_file, fixP->fx_line,
18977 _("conditional branch out of range"));
18978
18979 if (fixP->fx_done || !seg->use_rela_p)
18980 {
18981 offsetT newval2;
18982 addressT S, J1, J2, lo, hi;
18983
18984 S = (value & 0x00100000) >> 20;
18985 J2 = (value & 0x00080000) >> 19;
18986 J1 = (value & 0x00040000) >> 18;
18987 hi = (value & 0x0003f000) >> 12;
18988 lo = (value & 0x00000ffe) >> 1;
18989
18990 newval = md_chars_to_number (buf, THUMB_SIZE);
18991 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18992 newval |= (S << 10) | hi;
18993 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18994 md_number_to_chars (buf, newval, THUMB_SIZE);
18995 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18996 }
18997 break;
18998
18999 case BFD_RELOC_THUMB_PCREL_BLX:
19000 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19001 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
19002 as_bad_where (fixP->fx_file, fixP->fx_line,
19003 _("branch out of range"));
19004
19005 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
19006 /* For a BLX instruction, make sure that the relocation is rounded up
19007 to a word boundary. This follows the semantics of the instruction
19008 which specifies that bit 1 of the target address will come from bit
19009 1 of the base address. */
19010 value = (value + 1) & ~ 1;
19011
19012 if (fixP->fx_done || !seg->use_rela_p)
19013 {
19014 offsetT newval2;
19015
19016 newval = md_chars_to_number (buf, THUMB_SIZE);
19017 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19018 newval |= (value & 0x7fffff) >> 12;
19019 newval2 |= (value & 0xfff) >> 1;
19020 md_number_to_chars (buf, newval, THUMB_SIZE);
19021 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19022 }
19023 break;
19024
19025 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19026 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
19027 as_bad_where (fixP->fx_file, fixP->fx_line,
19028 _("branch out of range"));
19029
19030 if (fixP->fx_done || !seg->use_rela_p)
19031 {
19032 offsetT newval2;
19033 addressT S, I1, I2, lo, hi;
19034
19035 S = (value & 0x01000000) >> 24;
19036 I1 = (value & 0x00800000) >> 23;
19037 I2 = (value & 0x00400000) >> 22;
19038 hi = (value & 0x003ff000) >> 12;
19039 lo = (value & 0x00000ffe) >> 1;
19040
19041 I1 = !(I1 ^ S);
19042 I2 = !(I2 ^ S);
19043
19044 newval = md_chars_to_number (buf, THUMB_SIZE);
19045 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19046 newval |= (S << 10) | hi;
19047 newval2 |= (I1 << 13) | (I2 << 11) | lo;
19048 md_number_to_chars (buf, newval, THUMB_SIZE);
19049 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19050 }
19051 break;
19052
19053 case BFD_RELOC_8:
19054 if (fixP->fx_done || !seg->use_rela_p)
19055 md_number_to_chars (buf, value, 1);
19056 break;
19057
19058 case BFD_RELOC_16:
19059 if (fixP->fx_done || !seg->use_rela_p)
19060 md_number_to_chars (buf, value, 2);
19061 break;
19062
19063 #ifdef OBJ_ELF
19064 case BFD_RELOC_ARM_TLS_GD32:
19065 case BFD_RELOC_ARM_TLS_LE32:
19066 case BFD_RELOC_ARM_TLS_IE32:
19067 case BFD_RELOC_ARM_TLS_LDM32:
19068 case BFD_RELOC_ARM_TLS_LDO32:
19069 S_SET_THREAD_LOCAL (fixP->fx_addsy);
19070 /* fall through */
19071
19072 case BFD_RELOC_ARM_GOT32:
19073 case BFD_RELOC_ARM_GOTOFF:
19074 case BFD_RELOC_ARM_TARGET2:
19075 if (fixP->fx_done || !seg->use_rela_p)
19076 md_number_to_chars (buf, 0, 4);
19077 break;
19078 #endif
19079
19080 case BFD_RELOC_RVA:
19081 case BFD_RELOC_32:
19082 case BFD_RELOC_ARM_TARGET1:
19083 case BFD_RELOC_ARM_ROSEGREL32:
19084 case BFD_RELOC_ARM_SBREL32:
19085 case BFD_RELOC_32_PCREL:
19086 #ifdef TE_PE
19087 case BFD_RELOC_32_SECREL:
19088 #endif
19089 if (fixP->fx_done || !seg->use_rela_p)
19090 #ifdef TE_WINCE
19091 /* For WinCE we only do this for pcrel fixups. */
19092 if (fixP->fx_done || fixP->fx_pcrel)
19093 #endif
19094 md_number_to_chars (buf, value, 4);
19095 break;
19096
19097 #ifdef OBJ_ELF
19098 case BFD_RELOC_ARM_PREL31:
19099 if (fixP->fx_done || !seg->use_rela_p)
19100 {
19101 newval = md_chars_to_number (buf, 4) & 0x80000000;
19102 if ((value ^ (value >> 1)) & 0x40000000)
19103 {
19104 as_bad_where (fixP->fx_file, fixP->fx_line,
19105 _("rel31 relocation overflow"));
19106 }
19107 newval |= value & 0x7fffffff;
19108 md_number_to_chars (buf, newval, 4);
19109 }
19110 break;
19111 #endif
19112
19113 case BFD_RELOC_ARM_CP_OFF_IMM:
19114 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19115 if (value < -1023 || value > 1023 || (value & 3))
19116 as_bad_where (fixP->fx_file, fixP->fx_line,
19117 _("co-processor offset out of range"));
19118 cp_off_common:
19119 sign = value >= 0;
19120 if (value < 0)
19121 value = -value;
19122 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
19123 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
19124 newval = md_chars_to_number (buf, INSN_SIZE);
19125 else
19126 newval = get_thumb32_insn (buf);
19127 newval &= 0xff7fff00;
19128 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
19129 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
19130 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
19131 md_number_to_chars (buf, newval, INSN_SIZE);
19132 else
19133 put_thumb32_insn (buf, newval);
19134 break;
19135
19136 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
19137 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
19138 if (value < -255 || value > 255)
19139 as_bad_where (fixP->fx_file, fixP->fx_line,
19140 _("co-processor offset out of range"));
19141 value *= 4;
19142 goto cp_off_common;
19143
19144 case BFD_RELOC_ARM_THUMB_OFFSET:
19145 newval = md_chars_to_number (buf, THUMB_SIZE);
19146 /* Exactly what ranges, and where the offset is inserted depends
19147 on the type of instruction, we can establish this from the
19148 top 4 bits. */
19149 switch (newval >> 12)
19150 {
19151 case 4: /* PC load. */
19152 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
19153 forced to zero for these loads; md_pcrel_from has already
19154 compensated for this. */
19155 if (value & 3)
19156 as_bad_where (fixP->fx_file, fixP->fx_line,
19157 _("invalid offset, target not word aligned (0x%08lX)"),
19158 (((unsigned long) fixP->fx_frag->fr_address
19159 + (unsigned long) fixP->fx_where) & ~3)
19160 + (unsigned long) value);
19161
19162 if (value & ~0x3fc)
19163 as_bad_where (fixP->fx_file, fixP->fx_line,
19164 _("invalid offset, value too big (0x%08lX)"),
19165 (long) value);
19166
19167 newval |= value >> 2;
19168 break;
19169
19170 case 9: /* SP load/store. */
19171 if (value & ~0x3fc)
19172 as_bad_where (fixP->fx_file, fixP->fx_line,
19173 _("invalid offset, value too big (0x%08lX)"),
19174 (long) value);
19175 newval |= value >> 2;
19176 break;
19177
19178 case 6: /* Word load/store. */
19179 if (value & ~0x7c)
19180 as_bad_where (fixP->fx_file, fixP->fx_line,
19181 _("invalid offset, value too big (0x%08lX)"),
19182 (long) value);
19183 newval |= value << 4; /* 6 - 2. */
19184 break;
19185
19186 case 7: /* Byte load/store. */
19187 if (value & ~0x1f)
19188 as_bad_where (fixP->fx_file, fixP->fx_line,
19189 _("invalid offset, value too big (0x%08lX)"),
19190 (long) value);
19191 newval |= value << 6;
19192 break;
19193
19194 case 8: /* Halfword load/store. */
19195 if (value & ~0x3e)
19196 as_bad_where (fixP->fx_file, fixP->fx_line,
19197 _("invalid offset, value too big (0x%08lX)"),
19198 (long) value);
19199 newval |= value << 5; /* 6 - 1. */
19200 break;
19201
19202 default:
19203 as_bad_where (fixP->fx_file, fixP->fx_line,
19204 "Unable to process relocation for thumb opcode: %lx",
19205 (unsigned long) newval);
19206 break;
19207 }
19208 md_number_to_chars (buf, newval, THUMB_SIZE);
19209 break;
19210
19211 case BFD_RELOC_ARM_THUMB_ADD:
19212 /* This is a complicated relocation, since we use it for all of
19213 the following immediate relocations:
19214
19215 3bit ADD/SUB
19216 8bit ADD/SUB
19217 9bit ADD/SUB SP word-aligned
19218 10bit ADD PC/SP word-aligned
19219
19220 The type of instruction being processed is encoded in the
19221 instruction field:
19222
19223 0x8000 SUB
19224 0x00F0 Rd
19225 0x000F Rs
19226 */
19227 newval = md_chars_to_number (buf, THUMB_SIZE);
19228 {
19229 int rd = (newval >> 4) & 0xf;
19230 int rs = newval & 0xf;
19231 int subtract = !!(newval & 0x8000);
19232
19233 /* Check for HI regs, only very restricted cases allowed:
19234 Adjusting SP, and using PC or SP to get an address. */
19235 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
19236 || (rs > 7 && rs != REG_SP && rs != REG_PC))
19237 as_bad_where (fixP->fx_file, fixP->fx_line,
19238 _("invalid Hi register with immediate"));
19239
19240 /* If value is negative, choose the opposite instruction. */
19241 if (value < 0)
19242 {
19243 value = -value;
19244 subtract = !subtract;
19245 if (value < 0)
19246 as_bad_where (fixP->fx_file, fixP->fx_line,
19247 _("immediate value out of range"));
19248 }
19249
19250 if (rd == REG_SP)
19251 {
19252 if (value & ~0x1fc)
19253 as_bad_where (fixP->fx_file, fixP->fx_line,
19254 _("invalid immediate for stack address calculation"));
19255 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
19256 newval |= value >> 2;
19257 }
19258 else if (rs == REG_PC || rs == REG_SP)
19259 {
19260 if (subtract || value & ~0x3fc)
19261 as_bad_where (fixP->fx_file, fixP->fx_line,
19262 _("invalid immediate for address calculation (value = 0x%08lX)"),
19263 (unsigned long) value);
19264 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
19265 newval |= rd << 8;
19266 newval |= value >> 2;
19267 }
19268 else if (rs == rd)
19269 {
19270 if (value & ~0xff)
19271 as_bad_where (fixP->fx_file, fixP->fx_line,
19272 _("immediate value out of range"));
19273 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
19274 newval |= (rd << 8) | value;
19275 }
19276 else
19277 {
19278 if (value & ~0x7)
19279 as_bad_where (fixP->fx_file, fixP->fx_line,
19280 _("immediate value out of range"));
19281 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
19282 newval |= rd | (rs << 3) | (value << 6);
19283 }
19284 }
19285 md_number_to_chars (buf, newval, THUMB_SIZE);
19286 break;
19287
19288 case BFD_RELOC_ARM_THUMB_IMM:
19289 newval = md_chars_to_number (buf, THUMB_SIZE);
19290 if (value < 0 || value > 255)
19291 as_bad_where (fixP->fx_file, fixP->fx_line,
19292 _("invalid immediate: %ld is out of range"),
19293 (long) value);
19294 newval |= value;
19295 md_number_to_chars (buf, newval, THUMB_SIZE);
19296 break;
19297
19298 case BFD_RELOC_ARM_THUMB_SHIFT:
19299 /* 5bit shift value (0..32). LSL cannot take 32. */
19300 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
19301 temp = newval & 0xf800;
19302 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
19303 as_bad_where (fixP->fx_file, fixP->fx_line,
19304 _("invalid shift value: %ld"), (long) value);
19305 /* Shifts of zero must be encoded as LSL. */
19306 if (value == 0)
19307 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
19308 /* Shifts of 32 are encoded as zero. */
19309 else if (value == 32)
19310 value = 0;
19311 newval |= value << 6;
19312 md_number_to_chars (buf, newval, THUMB_SIZE);
19313 break;
19314
19315 case BFD_RELOC_VTABLE_INHERIT:
19316 case BFD_RELOC_VTABLE_ENTRY:
19317 fixP->fx_done = 0;
19318 return;
19319
19320 case BFD_RELOC_ARM_MOVW:
19321 case BFD_RELOC_ARM_MOVT:
19322 case BFD_RELOC_ARM_THUMB_MOVW:
19323 case BFD_RELOC_ARM_THUMB_MOVT:
19324 if (fixP->fx_done || !seg->use_rela_p)
19325 {
19326 /* REL format relocations are limited to a 16-bit addend. */
19327 if (!fixP->fx_done)
19328 {
19329 if (value < -0x8000 || value > 0x7fff)
19330 as_bad_where (fixP->fx_file, fixP->fx_line,
19331 _("offset out of range"));
19332 }
19333 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
19334 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
19335 {
19336 value >>= 16;
19337 }
19338
19339 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
19340 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
19341 {
19342 newval = get_thumb32_insn (buf);
19343 newval &= 0xfbf08f00;
19344 newval |= (value & 0xf000) << 4;
19345 newval |= (value & 0x0800) << 15;
19346 newval |= (value & 0x0700) << 4;
19347 newval |= (value & 0x00ff);
19348 put_thumb32_insn (buf, newval);
19349 }
19350 else
19351 {
19352 newval = md_chars_to_number (buf, 4);
19353 newval &= 0xfff0f000;
19354 newval |= value & 0x0fff;
19355 newval |= (value & 0xf000) << 4;
19356 md_number_to_chars (buf, newval, 4);
19357 }
19358 }
19359 return;
19360
19361 case BFD_RELOC_ARM_ALU_PC_G0_NC:
19362 case BFD_RELOC_ARM_ALU_PC_G0:
19363 case BFD_RELOC_ARM_ALU_PC_G1_NC:
19364 case BFD_RELOC_ARM_ALU_PC_G1:
19365 case BFD_RELOC_ARM_ALU_PC_G2:
19366 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19367 case BFD_RELOC_ARM_ALU_SB_G0:
19368 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19369 case BFD_RELOC_ARM_ALU_SB_G1:
19370 case BFD_RELOC_ARM_ALU_SB_G2:
19371 assert (!fixP->fx_done);
19372 if (!seg->use_rela_p)
19373 {
19374 bfd_vma insn;
19375 bfd_vma encoded_addend;
19376 bfd_vma addend_abs = abs (value);
19377
19378 /* Check that the absolute value of the addend can be
19379 expressed as an 8-bit constant plus a rotation. */
19380 encoded_addend = encode_arm_immediate (addend_abs);
19381 if (encoded_addend == (unsigned int) FAIL)
19382 as_bad_where (fixP->fx_file, fixP->fx_line,
19383 _("the offset 0x%08lX is not representable"),
19384 (unsigned long) addend_abs);
19385
19386 /* Extract the instruction. */
19387 insn = md_chars_to_number (buf, INSN_SIZE);
19388
19389 /* If the addend is positive, use an ADD instruction.
19390 Otherwise use a SUB. Take care not to destroy the S bit. */
19391 insn &= 0xff1fffff;
19392 if (value < 0)
19393 insn |= 1 << 22;
19394 else
19395 insn |= 1 << 23;
19396
19397 /* Place the encoded addend into the first 12 bits of the
19398 instruction. */
19399 insn &= 0xfffff000;
19400 insn |= encoded_addend;
19401
19402 /* Update the instruction. */
19403 md_number_to_chars (buf, insn, INSN_SIZE);
19404 }
19405 break;
19406
19407 case BFD_RELOC_ARM_LDR_PC_G0:
19408 case BFD_RELOC_ARM_LDR_PC_G1:
19409 case BFD_RELOC_ARM_LDR_PC_G2:
19410 case BFD_RELOC_ARM_LDR_SB_G0:
19411 case BFD_RELOC_ARM_LDR_SB_G1:
19412 case BFD_RELOC_ARM_LDR_SB_G2:
19413 assert (!fixP->fx_done);
19414 if (!seg->use_rela_p)
19415 {
19416 bfd_vma insn;
19417 bfd_vma addend_abs = abs (value);
19418
19419 /* Check that the absolute value of the addend can be
19420 encoded in 12 bits. */
19421 if (addend_abs >= 0x1000)
19422 as_bad_where (fixP->fx_file, fixP->fx_line,
19423 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
19424 (unsigned long) addend_abs);
19425
19426 /* Extract the instruction. */
19427 insn = md_chars_to_number (buf, INSN_SIZE);
19428
19429 /* If the addend is negative, clear bit 23 of the instruction.
19430 Otherwise set it. */
19431 if (value < 0)
19432 insn &= ~(1 << 23);
19433 else
19434 insn |= 1 << 23;
19435
19436 /* Place the absolute value of the addend into the first 12 bits
19437 of the instruction. */
19438 insn &= 0xfffff000;
19439 insn |= addend_abs;
19440
19441 /* Update the instruction. */
19442 md_number_to_chars (buf, insn, INSN_SIZE);
19443 }
19444 break;
19445
19446 case BFD_RELOC_ARM_LDRS_PC_G0:
19447 case BFD_RELOC_ARM_LDRS_PC_G1:
19448 case BFD_RELOC_ARM_LDRS_PC_G2:
19449 case BFD_RELOC_ARM_LDRS_SB_G0:
19450 case BFD_RELOC_ARM_LDRS_SB_G1:
19451 case BFD_RELOC_ARM_LDRS_SB_G2:
19452 assert (!fixP->fx_done);
19453 if (!seg->use_rela_p)
19454 {
19455 bfd_vma insn;
19456 bfd_vma addend_abs = abs (value);
19457
19458 /* Check that the absolute value of the addend can be
19459 encoded in 8 bits. */
19460 if (addend_abs >= 0x100)
19461 as_bad_where (fixP->fx_file, fixP->fx_line,
19462 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
19463 (unsigned long) addend_abs);
19464
19465 /* Extract the instruction. */
19466 insn = md_chars_to_number (buf, INSN_SIZE);
19467
19468 /* If the addend is negative, clear bit 23 of the instruction.
19469 Otherwise set it. */
19470 if (value < 0)
19471 insn &= ~(1 << 23);
19472 else
19473 insn |= 1 << 23;
19474
19475 /* Place the first four bits of the absolute value of the addend
19476 into the first 4 bits of the instruction, and the remaining
19477 four into bits 8 .. 11. */
19478 insn &= 0xfffff0f0;
19479 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
19480
19481 /* Update the instruction. */
19482 md_number_to_chars (buf, insn, INSN_SIZE);
19483 }
19484 break;
19485
19486 case BFD_RELOC_ARM_LDC_PC_G0:
19487 case BFD_RELOC_ARM_LDC_PC_G1:
19488 case BFD_RELOC_ARM_LDC_PC_G2:
19489 case BFD_RELOC_ARM_LDC_SB_G0:
19490 case BFD_RELOC_ARM_LDC_SB_G1:
19491 case BFD_RELOC_ARM_LDC_SB_G2:
19492 assert (!fixP->fx_done);
19493 if (!seg->use_rela_p)
19494 {
19495 bfd_vma insn;
19496 bfd_vma addend_abs = abs (value);
19497
19498 /* Check that the absolute value of the addend is a multiple of
19499 four and, when divided by four, fits in 8 bits. */
19500 if (addend_abs & 0x3)
19501 as_bad_where (fixP->fx_file, fixP->fx_line,
19502 _("bad offset 0x%08lX (must be word-aligned)"),
19503 (unsigned long) addend_abs);
19504
19505 if ((addend_abs >> 2) > 0xff)
19506 as_bad_where (fixP->fx_file, fixP->fx_line,
19507 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
19508 (unsigned long) addend_abs);
19509
19510 /* Extract the instruction. */
19511 insn = md_chars_to_number (buf, INSN_SIZE);
19512
19513 /* If the addend is negative, clear bit 23 of the instruction.
19514 Otherwise set it. */
19515 if (value < 0)
19516 insn &= ~(1 << 23);
19517 else
19518 insn |= 1 << 23;
19519
19520 /* Place the addend (divided by four) into the first eight
19521 bits of the instruction. */
19522 insn &= 0xfffffff0;
19523 insn |= addend_abs >> 2;
19524
19525 /* Update the instruction. */
19526 md_number_to_chars (buf, insn, INSN_SIZE);
19527 }
19528 break;
19529
19530 case BFD_RELOC_ARM_V4BX:
19531 /* This will need to go in the object file. */
19532 fixP->fx_done = 0;
19533 break;
19534
19535 case BFD_RELOC_UNUSED:
19536 default:
19537 as_bad_where (fixP->fx_file, fixP->fx_line,
19538 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
19539 }
19540 }
19541
19542 /* Translate internal representation of relocation info to BFD target
19543 format. */
19544
19545 arelent *
19546 tc_gen_reloc (asection *section, fixS *fixp)
19547 {
19548 arelent * reloc;
19549 bfd_reloc_code_real_type code;
19550
19551 reloc = xmalloc (sizeof (arelent));
19552
19553 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
19554 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
19555 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
19556
19557 if (fixp->fx_pcrel)
19558 {
19559 if (section->use_rela_p)
19560 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
19561 else
19562 fixp->fx_offset = reloc->address;
19563 }
19564 reloc->addend = fixp->fx_offset;
19565
19566 switch (fixp->fx_r_type)
19567 {
19568 case BFD_RELOC_8:
19569 if (fixp->fx_pcrel)
19570 {
19571 code = BFD_RELOC_8_PCREL;
19572 break;
19573 }
19574
19575 case BFD_RELOC_16:
19576 if (fixp->fx_pcrel)
19577 {
19578 code = BFD_RELOC_16_PCREL;
19579 break;
19580 }
19581
19582 case BFD_RELOC_32:
19583 if (fixp->fx_pcrel)
19584 {
19585 code = BFD_RELOC_32_PCREL;
19586 break;
19587 }
19588
19589 case BFD_RELOC_ARM_MOVW:
19590 if (fixp->fx_pcrel)
19591 {
19592 code = BFD_RELOC_ARM_MOVW_PCREL;
19593 break;
19594 }
19595
19596 case BFD_RELOC_ARM_MOVT:
19597 if (fixp->fx_pcrel)
19598 {
19599 code = BFD_RELOC_ARM_MOVT_PCREL;
19600 break;
19601 }
19602
19603 case BFD_RELOC_ARM_THUMB_MOVW:
19604 if (fixp->fx_pcrel)
19605 {
19606 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
19607 break;
19608 }
19609
19610 case BFD_RELOC_ARM_THUMB_MOVT:
19611 if (fixp->fx_pcrel)
19612 {
19613 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
19614 break;
19615 }
19616
19617 case BFD_RELOC_NONE:
19618 case BFD_RELOC_ARM_PCREL_BRANCH:
19619 case BFD_RELOC_ARM_PCREL_BLX:
19620 case BFD_RELOC_RVA:
19621 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19622 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19623 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19624 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19625 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19626 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19627 case BFD_RELOC_THUMB_PCREL_BLX:
19628 case BFD_RELOC_VTABLE_ENTRY:
19629 case BFD_RELOC_VTABLE_INHERIT:
19630 #ifdef TE_PE
19631 case BFD_RELOC_32_SECREL:
19632 #endif
19633 code = fixp->fx_r_type;
19634 break;
19635
19636 case BFD_RELOC_ARM_LITERAL:
19637 case BFD_RELOC_ARM_HWLITERAL:
19638 /* If this is called then the a literal has
19639 been referenced across a section boundary. */
19640 as_bad_where (fixp->fx_file, fixp->fx_line,
19641 _("literal referenced across section boundary"));
19642 return NULL;
19643
19644 #ifdef OBJ_ELF
19645 case BFD_RELOC_ARM_GOT32:
19646 case BFD_RELOC_ARM_GOTOFF:
19647 case BFD_RELOC_ARM_PLT32:
19648 case BFD_RELOC_ARM_TARGET1:
19649 case BFD_RELOC_ARM_ROSEGREL32:
19650 case BFD_RELOC_ARM_SBREL32:
19651 case BFD_RELOC_ARM_PREL31:
19652 case BFD_RELOC_ARM_TARGET2:
19653 case BFD_RELOC_ARM_TLS_LE32:
19654 case BFD_RELOC_ARM_TLS_LDO32:
19655 case BFD_RELOC_ARM_PCREL_CALL:
19656 case BFD_RELOC_ARM_PCREL_JUMP:
19657 case BFD_RELOC_ARM_ALU_PC_G0_NC:
19658 case BFD_RELOC_ARM_ALU_PC_G0:
19659 case BFD_RELOC_ARM_ALU_PC_G1_NC:
19660 case BFD_RELOC_ARM_ALU_PC_G1:
19661 case BFD_RELOC_ARM_ALU_PC_G2:
19662 case BFD_RELOC_ARM_LDR_PC_G0:
19663 case BFD_RELOC_ARM_LDR_PC_G1:
19664 case BFD_RELOC_ARM_LDR_PC_G2:
19665 case BFD_RELOC_ARM_LDRS_PC_G0:
19666 case BFD_RELOC_ARM_LDRS_PC_G1:
19667 case BFD_RELOC_ARM_LDRS_PC_G2:
19668 case BFD_RELOC_ARM_LDC_PC_G0:
19669 case BFD_RELOC_ARM_LDC_PC_G1:
19670 case BFD_RELOC_ARM_LDC_PC_G2:
19671 case BFD_RELOC_ARM_ALU_SB_G0_NC:
19672 case BFD_RELOC_ARM_ALU_SB_G0:
19673 case BFD_RELOC_ARM_ALU_SB_G1_NC:
19674 case BFD_RELOC_ARM_ALU_SB_G1:
19675 case BFD_RELOC_ARM_ALU_SB_G2:
19676 case BFD_RELOC_ARM_LDR_SB_G0:
19677 case BFD_RELOC_ARM_LDR_SB_G1:
19678 case BFD_RELOC_ARM_LDR_SB_G2:
19679 case BFD_RELOC_ARM_LDRS_SB_G0:
19680 case BFD_RELOC_ARM_LDRS_SB_G1:
19681 case BFD_RELOC_ARM_LDRS_SB_G2:
19682 case BFD_RELOC_ARM_LDC_SB_G0:
19683 case BFD_RELOC_ARM_LDC_SB_G1:
19684 case BFD_RELOC_ARM_LDC_SB_G2:
19685 case BFD_RELOC_ARM_V4BX:
19686 code = fixp->fx_r_type;
19687 break;
19688
19689 case BFD_RELOC_ARM_TLS_GD32:
19690 case BFD_RELOC_ARM_TLS_IE32:
19691 case BFD_RELOC_ARM_TLS_LDM32:
19692 /* BFD will include the symbol's address in the addend.
19693 But we don't want that, so subtract it out again here. */
19694 if (!S_IS_COMMON (fixp->fx_addsy))
19695 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
19696 code = fixp->fx_r_type;
19697 break;
19698 #endif
19699
19700 case BFD_RELOC_ARM_IMMEDIATE:
19701 as_bad_where (fixp->fx_file, fixp->fx_line,
19702 _("internal relocation (type: IMMEDIATE) not fixed up"));
19703 return NULL;
19704
19705 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19706 as_bad_where (fixp->fx_file, fixp->fx_line,
19707 _("ADRL used for a symbol not defined in the same file"));
19708 return NULL;
19709
19710 case BFD_RELOC_ARM_OFFSET_IMM:
19711 if (section->use_rela_p)
19712 {
19713 code = fixp->fx_r_type;
19714 break;
19715 }
19716
19717 if (fixp->fx_addsy != NULL
19718 && !S_IS_DEFINED (fixp->fx_addsy)
19719 && S_IS_LOCAL (fixp->fx_addsy))
19720 {
19721 as_bad_where (fixp->fx_file, fixp->fx_line,
19722 _("undefined local label `%s'"),
19723 S_GET_NAME (fixp->fx_addsy));
19724 return NULL;
19725 }
19726
19727 as_bad_where (fixp->fx_file, fixp->fx_line,
19728 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19729 return NULL;
19730
19731 default:
19732 {
19733 char * type;
19734
19735 switch (fixp->fx_r_type)
19736 {
19737 case BFD_RELOC_NONE: type = "NONE"; break;
19738 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19739 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19740 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19741 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19742 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19743 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19744 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19745 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19746 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19747 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19748 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19749 default: type = _("<unknown>"); break;
19750 }
19751 as_bad_where (fixp->fx_file, fixp->fx_line,
19752 _("cannot represent %s relocation in this object file format"),
19753 type);
19754 return NULL;
19755 }
19756 }
19757
19758 #ifdef OBJ_ELF
19759 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19760 && GOT_symbol
19761 && fixp->fx_addsy == GOT_symbol)
19762 {
19763 code = BFD_RELOC_ARM_GOTPC;
19764 reloc->addend = fixp->fx_offset = reloc->address;
19765 }
19766 #endif
19767
19768 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19769
19770 if (reloc->howto == NULL)
19771 {
19772 as_bad_where (fixp->fx_file, fixp->fx_line,
19773 _("cannot represent %s relocation in this object file format"),
19774 bfd_get_reloc_code_name (code));
19775 return NULL;
19776 }
19777
19778 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19779 vtable entry to be used in the relocation's section offset. */
19780 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19781 reloc->address = fixp->fx_offset;
19782
19783 return reloc;
19784 }
19785
19786 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19787
19788 void
19789 cons_fix_new_arm (fragS * frag,
19790 int where,
19791 int size,
19792 expressionS * exp)
19793 {
19794 bfd_reloc_code_real_type type;
19795 int pcrel = 0;
19796
19797 /* Pick a reloc.
19798 FIXME: @@ Should look at CPU word size. */
19799 switch (size)
19800 {
19801 case 1:
19802 type = BFD_RELOC_8;
19803 break;
19804 case 2:
19805 type = BFD_RELOC_16;
19806 break;
19807 case 4:
19808 default:
19809 type = BFD_RELOC_32;
19810 break;
19811 case 8:
19812 type = BFD_RELOC_64;
19813 break;
19814 }
19815
19816 #ifdef TE_PE
19817 if (exp->X_op == O_secrel)
19818 {
19819 exp->X_op = O_symbol;
19820 type = BFD_RELOC_32_SECREL;
19821 }
19822 #endif
19823
19824 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19825 }
19826
19827 #if defined OBJ_COFF || defined OBJ_ELF
19828 void
19829 arm_validate_fix (fixS * fixP)
19830 {
19831 /* If the destination of the branch is a defined symbol which does not have
19832 the THUMB_FUNC attribute, then we must be calling a function which has
19833 the (interfacearm) attribute. We look for the Thumb entry point to that
19834 function and change the branch to refer to that function instead. */
19835 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19836 && fixP->fx_addsy != NULL
19837 && S_IS_DEFINED (fixP->fx_addsy)
19838 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19839 {
19840 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19841 }
19842 }
19843 #endif
19844
19845 int
19846 arm_force_relocation (struct fix * fixp)
19847 {
19848 #if defined (OBJ_COFF) && defined (TE_PE)
19849 if (fixp->fx_r_type == BFD_RELOC_RVA)
19850 return 1;
19851 #endif
19852
19853 /* Resolve these relocations even if the symbol is extern or weak. */
19854 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19855 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19856 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19857 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19858 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19859 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19860 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19861 return 0;
19862
19863 /* Always leave these relocations for the linker. */
19864 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19865 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19866 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19867 return 1;
19868
19869 /* Always generate relocations against function symbols. */
19870 if (fixp->fx_r_type == BFD_RELOC_32
19871 && fixp->fx_addsy
19872 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19873 return 1;
19874
19875 return generic_force_reloc (fixp);
19876 }
19877
19878 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19879 /* Relocations against function names must be left unadjusted,
19880 so that the linker can use this information to generate interworking
19881 stubs. The MIPS version of this function
19882 also prevents relocations that are mips-16 specific, but I do not
19883 know why it does this.
19884
19885 FIXME:
19886 There is one other problem that ought to be addressed here, but
19887 which currently is not: Taking the address of a label (rather
19888 than a function) and then later jumping to that address. Such
19889 addresses also ought to have their bottom bit set (assuming that
19890 they reside in Thumb code), but at the moment they will not. */
19891
19892 bfd_boolean
19893 arm_fix_adjustable (fixS * fixP)
19894 {
19895 if (fixP->fx_addsy == NULL)
19896 return 1;
19897
19898 /* Preserve relocations against symbols with function type. */
19899 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19900 return 0;
19901
19902 if (THUMB_IS_FUNC (fixP->fx_addsy)
19903 && fixP->fx_subsy == NULL)
19904 return 0;
19905
19906 /* We need the symbol name for the VTABLE entries. */
19907 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19908 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19909 return 0;
19910
19911 /* Don't allow symbols to be discarded on GOT related relocs. */
19912 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19913 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19914 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19915 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19916 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19917 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19918 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19919 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19920 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19921 return 0;
19922
19923 /* Similarly for group relocations. */
19924 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19925 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19926 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19927 return 0;
19928
19929 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
19930 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
19931 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
19932 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
19933 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
19934 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
19935 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
19936 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
19937 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
19938 return 0;
19939
19940 return 1;
19941 }
19942 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19943
19944 #ifdef OBJ_ELF
19945
19946 const char *
19947 elf32_arm_target_format (void)
19948 {
19949 #ifdef TE_SYMBIAN
19950 return (target_big_endian
19951 ? "elf32-bigarm-symbian"
19952 : "elf32-littlearm-symbian");
19953 #elif defined (TE_VXWORKS)
19954 return (target_big_endian
19955 ? "elf32-bigarm-vxworks"
19956 : "elf32-littlearm-vxworks");
19957 #else
19958 if (target_big_endian)
19959 return "elf32-bigarm";
19960 else
19961 return "elf32-littlearm";
19962 #endif
19963 }
19964
19965 void
19966 armelf_frob_symbol (symbolS * symp,
19967 int * puntp)
19968 {
19969 elf_frob_symbol (symp, puntp);
19970 }
19971 #endif
19972
19973 /* MD interface: Finalization. */
19974
19975 /* A good place to do this, although this was probably not intended
19976 for this kind of use. We need to dump the literal pool before
19977 references are made to a null symbol pointer. */
19978
19979 void
19980 arm_cleanup (void)
19981 {
19982 literal_pool * pool;
19983
19984 for (pool = list_of_pools; pool; pool = pool->next)
19985 {
19986 /* Put it at the end of the relevant section. */
19987 subseg_set (pool->section, pool->sub_section);
19988 #ifdef OBJ_ELF
19989 arm_elf_change_section ();
19990 #endif
19991 s_ltorg (0);
19992 }
19993 }
19994
19995 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19996 ARM ones. */
19997
19998 void
19999 arm_adjust_symtab (void)
20000 {
20001 #ifdef OBJ_COFF
20002 symbolS * sym;
20003
20004 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
20005 {
20006 if (ARM_IS_THUMB (sym))
20007 {
20008 if (THUMB_IS_FUNC (sym))
20009 {
20010 /* Mark the symbol as a Thumb function. */
20011 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
20012 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
20013 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
20014
20015 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
20016 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
20017 else
20018 as_bad (_("%s: unexpected function type: %d"),
20019 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
20020 }
20021 else switch (S_GET_STORAGE_CLASS (sym))
20022 {
20023 case C_EXT:
20024 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
20025 break;
20026 case C_STAT:
20027 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
20028 break;
20029 case C_LABEL:
20030 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
20031 break;
20032 default:
20033 /* Do nothing. */
20034 break;
20035 }
20036 }
20037
20038 if (ARM_IS_INTERWORK (sym))
20039 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
20040 }
20041 #endif
20042 #ifdef OBJ_ELF
20043 symbolS * sym;
20044 char bind;
20045
20046 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
20047 {
20048 if (ARM_IS_THUMB (sym))
20049 {
20050 elf_symbol_type * elf_sym;
20051
20052 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
20053 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
20054
20055 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
20056 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
20057 {
20058 /* If it's a .thumb_func, declare it as so,
20059 otherwise tag label as .code 16. */
20060 if (THUMB_IS_FUNC (sym))
20061 elf_sym->internal_elf_sym.st_info =
20062 ELF_ST_INFO (bind, STT_ARM_TFUNC);
20063 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20064 elf_sym->internal_elf_sym.st_info =
20065 ELF_ST_INFO (bind, STT_ARM_16BIT);
20066 }
20067 }
20068 }
20069 #endif
20070 }
20071
20072 /* MD interface: Initialization. */
20073
20074 static void
20075 set_constant_flonums (void)
20076 {
20077 int i;
20078
20079 for (i = 0; i < NUM_FLOAT_VALS; i++)
20080 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
20081 abort ();
20082 }
20083
20084 /* Auto-select Thumb mode if it's the only available instruction set for the
20085 given architecture. */
20086
20087 static void
20088 autoselect_thumb_from_cpu_variant (void)
20089 {
20090 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
20091 opcode_select (16);
20092 }
20093
20094 void
20095 md_begin (void)
20096 {
20097 unsigned mach;
20098 unsigned int i;
20099
20100 if ( (arm_ops_hsh = hash_new ()) == NULL
20101 || (arm_cond_hsh = hash_new ()) == NULL
20102 || (arm_shift_hsh = hash_new ()) == NULL
20103 || (arm_psr_hsh = hash_new ()) == NULL
20104 || (arm_v7m_psr_hsh = hash_new ()) == NULL
20105 || (arm_reg_hsh = hash_new ()) == NULL
20106 || (arm_reloc_hsh = hash_new ()) == NULL
20107 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
20108 as_fatal (_("virtual memory exhausted"));
20109
20110 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
20111 hash_insert (arm_ops_hsh, insns[i].template, (void *) (insns + i));
20112 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
20113 hash_insert (arm_cond_hsh, conds[i].template, (void *) (conds + i));
20114 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
20115 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
20116 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
20117 hash_insert (arm_psr_hsh, psrs[i].template, (void *) (psrs + i));
20118 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
20119 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (void *) (v7m_psrs + i));
20120 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
20121 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
20122 for (i = 0;
20123 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
20124 i++)
20125 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
20126 (void *) (barrier_opt_names + i));
20127 #ifdef OBJ_ELF
20128 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
20129 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
20130 #endif
20131
20132 set_constant_flonums ();
20133
20134 /* Set the cpu variant based on the command-line options. We prefer
20135 -mcpu= over -march= if both are set (as for GCC); and we prefer
20136 -mfpu= over any other way of setting the floating point unit.
20137 Use of legacy options with new options are faulted. */
20138 if (legacy_cpu)
20139 {
20140 if (mcpu_cpu_opt || march_cpu_opt)
20141 as_bad (_("use of old and new-style options to set CPU type"));
20142
20143 mcpu_cpu_opt = legacy_cpu;
20144 }
20145 else if (!mcpu_cpu_opt)
20146 mcpu_cpu_opt = march_cpu_opt;
20147
20148 if (legacy_fpu)
20149 {
20150 if (mfpu_opt)
20151 as_bad (_("use of old and new-style options to set FPU type"));
20152
20153 mfpu_opt = legacy_fpu;
20154 }
20155 else if (!mfpu_opt)
20156 {
20157 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
20158 /* Some environments specify a default FPU. If they don't, infer it
20159 from the processor. */
20160 if (mcpu_fpu_opt)
20161 mfpu_opt = mcpu_fpu_opt;
20162 else
20163 mfpu_opt = march_fpu_opt;
20164 #else
20165 mfpu_opt = &fpu_default;
20166 #endif
20167 }
20168
20169 if (!mfpu_opt)
20170 {
20171 if (mcpu_cpu_opt != NULL)
20172 mfpu_opt = &fpu_default;
20173 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
20174 mfpu_opt = &fpu_arch_vfp_v2;
20175 else
20176 mfpu_opt = &fpu_arch_fpa;
20177 }
20178
20179 #ifdef CPU_DEFAULT
20180 if (!mcpu_cpu_opt)
20181 {
20182 mcpu_cpu_opt = &cpu_default;
20183 selected_cpu = cpu_default;
20184 }
20185 #else
20186 if (mcpu_cpu_opt)
20187 selected_cpu = *mcpu_cpu_opt;
20188 else
20189 mcpu_cpu_opt = &arm_arch_any;
20190 #endif
20191
20192 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20193
20194 autoselect_thumb_from_cpu_variant ();
20195
20196 arm_arch_used = thumb_arch_used = arm_arch_none;
20197
20198 #if defined OBJ_COFF || defined OBJ_ELF
20199 {
20200 unsigned int flags = 0;
20201
20202 #if defined OBJ_ELF
20203 flags = meabi_flags;
20204
20205 switch (meabi_flags)
20206 {
20207 case EF_ARM_EABI_UNKNOWN:
20208 #endif
20209 /* Set the flags in the private structure. */
20210 if (uses_apcs_26) flags |= F_APCS26;
20211 if (support_interwork) flags |= F_INTERWORK;
20212 if (uses_apcs_float) flags |= F_APCS_FLOAT;
20213 if (pic_code) flags |= F_PIC;
20214 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
20215 flags |= F_SOFT_FLOAT;
20216
20217 switch (mfloat_abi_opt)
20218 {
20219 case ARM_FLOAT_ABI_SOFT:
20220 case ARM_FLOAT_ABI_SOFTFP:
20221 flags |= F_SOFT_FLOAT;
20222 break;
20223
20224 case ARM_FLOAT_ABI_HARD:
20225 if (flags & F_SOFT_FLOAT)
20226 as_bad (_("hard-float conflicts with specified fpu"));
20227 break;
20228 }
20229
20230 /* Using pure-endian doubles (even if soft-float). */
20231 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
20232 flags |= F_VFP_FLOAT;
20233
20234 #if defined OBJ_ELF
20235 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
20236 flags |= EF_ARM_MAVERICK_FLOAT;
20237 break;
20238
20239 case EF_ARM_EABI_VER4:
20240 case EF_ARM_EABI_VER5:
20241 /* No additional flags to set. */
20242 break;
20243
20244 default:
20245 abort ();
20246 }
20247 #endif
20248 bfd_set_private_flags (stdoutput, flags);
20249
20250 /* We have run out flags in the COFF header to encode the
20251 status of ATPCS support, so instead we create a dummy,
20252 empty, debug section called .arm.atpcs. */
20253 if (atpcs)
20254 {
20255 asection * sec;
20256
20257 sec = bfd_make_section (stdoutput, ".arm.atpcs");
20258
20259 if (sec != NULL)
20260 {
20261 bfd_set_section_flags
20262 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
20263 bfd_set_section_size (stdoutput, sec, 0);
20264 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
20265 }
20266 }
20267 }
20268 #endif
20269
20270 /* Record the CPU type as well. */
20271 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
20272 mach = bfd_mach_arm_iWMMXt2;
20273 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
20274 mach = bfd_mach_arm_iWMMXt;
20275 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
20276 mach = bfd_mach_arm_XScale;
20277 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
20278 mach = bfd_mach_arm_ep9312;
20279 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
20280 mach = bfd_mach_arm_5TE;
20281 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
20282 {
20283 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
20284 mach = bfd_mach_arm_5T;
20285 else
20286 mach = bfd_mach_arm_5;
20287 }
20288 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
20289 {
20290 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
20291 mach = bfd_mach_arm_4T;
20292 else
20293 mach = bfd_mach_arm_4;
20294 }
20295 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
20296 mach = bfd_mach_arm_3M;
20297 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
20298 mach = bfd_mach_arm_3;
20299 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
20300 mach = bfd_mach_arm_2a;
20301 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
20302 mach = bfd_mach_arm_2;
20303 else
20304 mach = bfd_mach_arm_unknown;
20305
20306 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
20307 }
20308
20309 /* Command line processing. */
20310
20311 /* md_parse_option
20312 Invocation line includes a switch not recognized by the base assembler.
20313 See if it's a processor-specific option.
20314
20315 This routine is somewhat complicated by the need for backwards
20316 compatibility (since older releases of gcc can't be changed).
20317 The new options try to make the interface as compatible as
20318 possible with GCC.
20319
20320 New options (supported) are:
20321
20322 -mcpu=<cpu name> Assemble for selected processor
20323 -march=<architecture name> Assemble for selected architecture
20324 -mfpu=<fpu architecture> Assemble for selected FPU.
20325 -EB/-mbig-endian Big-endian
20326 -EL/-mlittle-endian Little-endian
20327 -k Generate PIC code
20328 -mthumb Start in Thumb mode
20329 -mthumb-interwork Code supports ARM/Thumb interworking
20330
20331 -m[no-]warn-deprecated Warn about deprecated features
20332
20333 For now we will also provide support for:
20334
20335 -mapcs-32 32-bit Program counter
20336 -mapcs-26 26-bit Program counter
20337 -macps-float Floats passed in FP registers
20338 -mapcs-reentrant Reentrant code
20339 -matpcs
20340 (sometime these will probably be replaced with -mapcs=<list of options>
20341 and -matpcs=<list of options>)
20342
20343 The remaining options are only supported for back-wards compatibility.
20344 Cpu variants, the arm part is optional:
20345 -m[arm]1 Currently not supported.
20346 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
20347 -m[arm]3 Arm 3 processor
20348 -m[arm]6[xx], Arm 6 processors
20349 -m[arm]7[xx][t][[d]m] Arm 7 processors
20350 -m[arm]8[10] Arm 8 processors
20351 -m[arm]9[20][tdmi] Arm 9 processors
20352 -mstrongarm[110[0]] StrongARM processors
20353 -mxscale XScale processors
20354 -m[arm]v[2345[t[e]]] Arm architectures
20355 -mall All (except the ARM1)
20356 FP variants:
20357 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
20358 -mfpe-old (No float load/store multiples)
20359 -mvfpxd VFP Single precision
20360 -mvfp All VFP
20361 -mno-fpu Disable all floating point instructions
20362
20363 The following CPU names are recognized:
20364 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
20365 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
20366 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
20367 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
20368 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
20369 arm10t arm10e, arm1020t, arm1020e, arm10200e,
20370 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
20371
20372 */
20373
20374 const char * md_shortopts = "m:k";
20375
20376 #ifdef ARM_BI_ENDIAN
20377 #define OPTION_EB (OPTION_MD_BASE + 0)
20378 #define OPTION_EL (OPTION_MD_BASE + 1)
20379 #else
20380 #if TARGET_BYTES_BIG_ENDIAN
20381 #define OPTION_EB (OPTION_MD_BASE + 0)
20382 #else
20383 #define OPTION_EL (OPTION_MD_BASE + 1)
20384 #endif
20385 #endif
20386 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
20387
20388 struct option md_longopts[] =
20389 {
20390 #ifdef OPTION_EB
20391 {"EB", no_argument, NULL, OPTION_EB},
20392 #endif
20393 #ifdef OPTION_EL
20394 {"EL", no_argument, NULL, OPTION_EL},
20395 #endif
20396 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
20397 {NULL, no_argument, NULL, 0}
20398 };
20399
20400 size_t md_longopts_size = sizeof (md_longopts);
20401
20402 struct arm_option_table
20403 {
20404 char *option; /* Option name to match. */
20405 char *help; /* Help information. */
20406 int *var; /* Variable to change. */
20407 int value; /* What to change it to. */
20408 char *deprecated; /* If non-null, print this message. */
20409 };
20410
20411 struct arm_option_table arm_opts[] =
20412 {
20413 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
20414 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
20415 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
20416 &support_interwork, 1, NULL},
20417 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
20418 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
20419 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
20420 1, NULL},
20421 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
20422 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
20423 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
20424 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
20425 NULL},
20426
20427 /* These are recognized by the assembler, but have no affect on code. */
20428 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
20429 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
20430
20431 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
20432 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
20433 &warn_on_deprecated, 0, NULL},
20434 {NULL, NULL, NULL, 0, NULL}
20435 };
20436
20437 struct arm_legacy_option_table
20438 {
20439 char *option; /* Option name to match. */
20440 const arm_feature_set **var; /* Variable to change. */
20441 const arm_feature_set value; /* What to change it to. */
20442 char *deprecated; /* If non-null, print this message. */
20443 };
20444
20445 const struct arm_legacy_option_table arm_legacy_opts[] =
20446 {
20447 /* DON'T add any new processors to this list -- we want the whole list
20448 to go away... Add them to the processors table instead. */
20449 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20450 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
20451 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20452 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
20453 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20454 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
20455 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20456 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
20457 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20458 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
20459 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20460 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
20461 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20462 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
20463 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20464 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
20465 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20466 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
20467 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20468 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
20469 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20470 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
20471 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20472 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
20473 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20474 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
20475 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20476 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
20477 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20478 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
20479 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20480 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
20481 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20482 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
20483 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20484 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
20485 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20486 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
20487 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20488 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
20489 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20490 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
20491 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20492 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
20493 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20494 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
20495 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20496 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20497 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20498 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
20499 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20500 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
20501 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20502 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
20503 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20504 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
20505 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20506 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
20507 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20508 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
20509 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20510 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
20511 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20512 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
20513 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20514 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
20515 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20516 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
20517 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
20518 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
20519 N_("use -mcpu=strongarm110")},
20520 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
20521 N_("use -mcpu=strongarm1100")},
20522 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
20523 N_("use -mcpu=strongarm1110")},
20524 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
20525 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
20526 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
20527
20528 /* Architecture variants -- don't add any more to this list either. */
20529 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20530 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
20531 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20532 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
20533 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20534 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
20535 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20536 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
20537 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20538 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
20539 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20540 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
20541 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20542 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
20543 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20544 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
20545 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20546 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
20547
20548 /* Floating point variants -- don't add any more to this list either. */
20549 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
20550 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
20551 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
20552 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
20553 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
20554
20555 {NULL, NULL, ARM_ARCH_NONE, NULL}
20556 };
20557
20558 struct arm_cpu_option_table
20559 {
20560 char *name;
20561 const arm_feature_set value;
20562 /* For some CPUs we assume an FPU unless the user explicitly sets
20563 -mfpu=... */
20564 const arm_feature_set default_fpu;
20565 /* The canonical name of the CPU, or NULL to use NAME converted to upper
20566 case. */
20567 const char *canonical_name;
20568 };
20569
20570 /* This list should, at a minimum, contain all the cpu names
20571 recognized by GCC. */
20572 static const struct arm_cpu_option_table arm_cpus[] =
20573 {
20574 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
20575 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
20576 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
20577 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20578 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
20579 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20580 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20581 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20582 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20583 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20584 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20585 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20586 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20587 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20588 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20589 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
20590 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20591 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20592 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20593 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20594 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20595 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20596 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20597 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20598 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20599 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20600 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20601 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
20602 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20603 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20604 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20605 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20606 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20607 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20608 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20609 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20610 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20611 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20612 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20613 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
20614 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20615 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20616 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20617 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
20618 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20619 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
20620 /* For V5 or later processors we default to using VFP; but the user
20621 should really set the FPU type explicitly. */
20622 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20623 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20624 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20625 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
20626 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20627 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20628 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
20629 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20630 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
20631 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
20632 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20633 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20634 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20635 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20636 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20637 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
20638 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
20639 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20640 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20641 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
20642 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
20643 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
20644 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
20645 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
20646 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
20647 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
20648 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
20649 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
20650 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
20651 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
20652 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
20653 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
20654 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
20655 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20656 | FPU_NEON_EXT_V1),
20657 NULL},
20658 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
20659 | FPU_NEON_EXT_V1),
20660 NULL},
20661 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
20662 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
20663 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
20664 /* ??? XSCALE is really an architecture. */
20665 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20666 /* ??? iwmmxt is not a processor. */
20667 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
20668 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
20669 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
20670 /* Maverick */
20671 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
20672 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
20673 };
20674
20675 struct arm_arch_option_table
20676 {
20677 char *name;
20678 const arm_feature_set value;
20679 const arm_feature_set default_fpu;
20680 };
20681
20682 /* This list should, at a minimum, contain all the architecture names
20683 recognized by GCC. */
20684 static const struct arm_arch_option_table arm_archs[] =
20685 {
20686 {"all", ARM_ANY, FPU_ARCH_FPA},
20687 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
20688 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
20689 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
20690 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
20691 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
20692 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
20693 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
20694 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
20695 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
20696 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
20697 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
20698 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
20699 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
20700 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
20701 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
20702 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
20703 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
20704 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
20705 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
20706 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
20707 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
20708 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
20709 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
20710 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
20711 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
20712 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
20713 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
20714 /* The official spelling of the ARMv7 profile variants is the dashed form.
20715 Accept the non-dashed form for compatibility with old toolchains. */
20716 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20717 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20718 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20719 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
20720 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
20721 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
20722 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
20723 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
20724 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
20725 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
20726 };
20727
20728 /* ISA extensions in the co-processor space. */
20729 struct arm_option_cpu_value_table
20730 {
20731 char *name;
20732 const arm_feature_set value;
20733 };
20734
20735 static const struct arm_option_cpu_value_table arm_extensions[] =
20736 {
20737 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20738 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20739 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20740 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20741 {NULL, ARM_ARCH_NONE}
20742 };
20743
20744 /* This list should, at a minimum, contain all the fpu names
20745 recognized by GCC. */
20746 static const struct arm_option_cpu_value_table arm_fpus[] =
20747 {
20748 {"softfpa", FPU_NONE},
20749 {"fpe", FPU_ARCH_FPE},
20750 {"fpe2", FPU_ARCH_FPE},
20751 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20752 {"fpa", FPU_ARCH_FPA},
20753 {"fpa10", FPU_ARCH_FPA},
20754 {"fpa11", FPU_ARCH_FPA},
20755 {"arm7500fe", FPU_ARCH_FPA},
20756 {"softvfp", FPU_ARCH_VFP},
20757 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20758 {"vfp", FPU_ARCH_VFP_V2},
20759 {"vfp9", FPU_ARCH_VFP_V2},
20760 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
20761 {"vfp10", FPU_ARCH_VFP_V2},
20762 {"vfp10-r0", FPU_ARCH_VFP_V1},
20763 {"vfpxd", FPU_ARCH_VFP_V1xD},
20764 {"vfpv2", FPU_ARCH_VFP_V2},
20765 {"vfpv3", FPU_ARCH_VFP_V3},
20766 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
20767 {"arm1020t", FPU_ARCH_VFP_V1},
20768 {"arm1020e", FPU_ARCH_VFP_V2},
20769 {"arm1136jfs", FPU_ARCH_VFP_V2},
20770 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20771 {"maverick", FPU_ARCH_MAVERICK},
20772 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20773 {"neon-fp16", FPU_ARCH_NEON_FP16},
20774 {NULL, ARM_ARCH_NONE}
20775 };
20776
20777 struct arm_option_value_table
20778 {
20779 char *name;
20780 long value;
20781 };
20782
20783 static const struct arm_option_value_table arm_float_abis[] =
20784 {
20785 {"hard", ARM_FLOAT_ABI_HARD},
20786 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20787 {"soft", ARM_FLOAT_ABI_SOFT},
20788 {NULL, 0}
20789 };
20790
20791 #ifdef OBJ_ELF
20792 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20793 static const struct arm_option_value_table arm_eabis[] =
20794 {
20795 {"gnu", EF_ARM_EABI_UNKNOWN},
20796 {"4", EF_ARM_EABI_VER4},
20797 {"5", EF_ARM_EABI_VER5},
20798 {NULL, 0}
20799 };
20800 #endif
20801
20802 struct arm_long_option_table
20803 {
20804 char * option; /* Substring to match. */
20805 char * help; /* Help information. */
20806 int (* func) (char * subopt); /* Function to decode sub-option. */
20807 char * deprecated; /* If non-null, print this message. */
20808 };
20809
20810 static int
20811 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20812 {
20813 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20814
20815 /* Copy the feature set, so that we can modify it. */
20816 *ext_set = **opt_p;
20817 *opt_p = ext_set;
20818
20819 while (str != NULL && *str != 0)
20820 {
20821 const struct arm_option_cpu_value_table * opt;
20822 char * ext;
20823 int optlen;
20824
20825 if (*str != '+')
20826 {
20827 as_bad (_("invalid architectural extension"));
20828 return 0;
20829 }
20830
20831 str++;
20832 ext = strchr (str, '+');
20833
20834 if (ext != NULL)
20835 optlen = ext - str;
20836 else
20837 optlen = strlen (str);
20838
20839 if (optlen == 0)
20840 {
20841 as_bad (_("missing architectural extension"));
20842 return 0;
20843 }
20844
20845 for (opt = arm_extensions; opt->name != NULL; opt++)
20846 if (strncmp (opt->name, str, optlen) == 0)
20847 {
20848 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20849 break;
20850 }
20851
20852 if (opt->name == NULL)
20853 {
20854 as_bad (_("unknown architectural extension `%s'"), str);
20855 return 0;
20856 }
20857
20858 str = ext;
20859 };
20860
20861 return 1;
20862 }
20863
20864 static int
20865 arm_parse_cpu (char * str)
20866 {
20867 const struct arm_cpu_option_table * opt;
20868 char * ext = strchr (str, '+');
20869 int optlen;
20870
20871 if (ext != NULL)
20872 optlen = ext - str;
20873 else
20874 optlen = strlen (str);
20875
20876 if (optlen == 0)
20877 {
20878 as_bad (_("missing cpu name `%s'"), str);
20879 return 0;
20880 }
20881
20882 for (opt = arm_cpus; opt->name != NULL; opt++)
20883 if (strncmp (opt->name, str, optlen) == 0)
20884 {
20885 mcpu_cpu_opt = &opt->value;
20886 mcpu_fpu_opt = &opt->default_fpu;
20887 if (opt->canonical_name)
20888 strcpy (selected_cpu_name, opt->canonical_name);
20889 else
20890 {
20891 int i;
20892 for (i = 0; i < optlen; i++)
20893 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20894 selected_cpu_name[i] = 0;
20895 }
20896
20897 if (ext != NULL)
20898 return arm_parse_extension (ext, &mcpu_cpu_opt);
20899
20900 return 1;
20901 }
20902
20903 as_bad (_("unknown cpu `%s'"), str);
20904 return 0;
20905 }
20906
20907 static int
20908 arm_parse_arch (char * str)
20909 {
20910 const struct arm_arch_option_table *opt;
20911 char *ext = strchr (str, '+');
20912 int optlen;
20913
20914 if (ext != NULL)
20915 optlen = ext - str;
20916 else
20917 optlen = strlen (str);
20918
20919 if (optlen == 0)
20920 {
20921 as_bad (_("missing architecture name `%s'"), str);
20922 return 0;
20923 }
20924
20925 for (opt = arm_archs; opt->name != NULL; opt++)
20926 if (streq (opt->name, str))
20927 {
20928 march_cpu_opt = &opt->value;
20929 march_fpu_opt = &opt->default_fpu;
20930 strcpy (selected_cpu_name, opt->name);
20931
20932 if (ext != NULL)
20933 return arm_parse_extension (ext, &march_cpu_opt);
20934
20935 return 1;
20936 }
20937
20938 as_bad (_("unknown architecture `%s'\n"), str);
20939 return 0;
20940 }
20941
20942 static int
20943 arm_parse_fpu (char * str)
20944 {
20945 const struct arm_option_cpu_value_table * opt;
20946
20947 for (opt = arm_fpus; opt->name != NULL; opt++)
20948 if (streq (opt->name, str))
20949 {
20950 mfpu_opt = &opt->value;
20951 return 1;
20952 }
20953
20954 as_bad (_("unknown floating point format `%s'\n"), str);
20955 return 0;
20956 }
20957
20958 static int
20959 arm_parse_float_abi (char * str)
20960 {
20961 const struct arm_option_value_table * opt;
20962
20963 for (opt = arm_float_abis; opt->name != NULL; opt++)
20964 if (streq (opt->name, str))
20965 {
20966 mfloat_abi_opt = opt->value;
20967 return 1;
20968 }
20969
20970 as_bad (_("unknown floating point abi `%s'\n"), str);
20971 return 0;
20972 }
20973
20974 #ifdef OBJ_ELF
20975 static int
20976 arm_parse_eabi (char * str)
20977 {
20978 const struct arm_option_value_table *opt;
20979
20980 for (opt = arm_eabis; opt->name != NULL; opt++)
20981 if (streq (opt->name, str))
20982 {
20983 meabi_flags = opt->value;
20984 return 1;
20985 }
20986 as_bad (_("unknown EABI `%s'\n"), str);
20987 return 0;
20988 }
20989 #endif
20990
20991 struct arm_long_option_table arm_long_opts[] =
20992 {
20993 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20994 arm_parse_cpu, NULL},
20995 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20996 arm_parse_arch, NULL},
20997 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20998 arm_parse_fpu, NULL},
20999 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
21000 arm_parse_float_abi, NULL},
21001 #ifdef OBJ_ELF
21002 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
21003 arm_parse_eabi, NULL},
21004 #endif
21005 {NULL, NULL, 0, NULL}
21006 };
21007
21008 int
21009 md_parse_option (int c, char * arg)
21010 {
21011 struct arm_option_table *opt;
21012 const struct arm_legacy_option_table *fopt;
21013 struct arm_long_option_table *lopt;
21014
21015 switch (c)
21016 {
21017 #ifdef OPTION_EB
21018 case OPTION_EB:
21019 target_big_endian = 1;
21020 break;
21021 #endif
21022
21023 #ifdef OPTION_EL
21024 case OPTION_EL:
21025 target_big_endian = 0;
21026 break;
21027 #endif
21028
21029 case OPTION_FIX_V4BX:
21030 fix_v4bx = TRUE;
21031 break;
21032
21033 case 'a':
21034 /* Listing option. Just ignore these, we don't support additional
21035 ones. */
21036 return 0;
21037
21038 default:
21039 for (opt = arm_opts; opt->option != NULL; opt++)
21040 {
21041 if (c == opt->option[0]
21042 && ((arg == NULL && opt->option[1] == 0)
21043 || streq (arg, opt->option + 1)))
21044 {
21045 /* If the option is deprecated, tell the user. */
21046 if (warn_on_deprecated && opt->deprecated != NULL)
21047 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
21048 arg ? arg : "", _(opt->deprecated));
21049
21050 if (opt->var != NULL)
21051 *opt->var = opt->value;
21052
21053 return 1;
21054 }
21055 }
21056
21057 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
21058 {
21059 if (c == fopt->option[0]
21060 && ((arg == NULL && fopt->option[1] == 0)
21061 || streq (arg, fopt->option + 1)))
21062 {
21063 /* If the option is deprecated, tell the user. */
21064 if (warn_on_deprecated && fopt->deprecated != NULL)
21065 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
21066 arg ? arg : "", _(fopt->deprecated));
21067
21068 if (fopt->var != NULL)
21069 *fopt->var = &fopt->value;
21070
21071 return 1;
21072 }
21073 }
21074
21075 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
21076 {
21077 /* These options are expected to have an argument. */
21078 if (c == lopt->option[0]
21079 && arg != NULL
21080 && strncmp (arg, lopt->option + 1,
21081 strlen (lopt->option + 1)) == 0)
21082 {
21083 /* If the option is deprecated, tell the user. */
21084 if (warn_on_deprecated && lopt->deprecated != NULL)
21085 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
21086 _(lopt->deprecated));
21087
21088 /* Call the sup-option parser. */
21089 return lopt->func (arg + strlen (lopt->option) - 1);
21090 }
21091 }
21092
21093 return 0;
21094 }
21095
21096 return 1;
21097 }
21098
21099 void
21100 md_show_usage (FILE * fp)
21101 {
21102 struct arm_option_table *opt;
21103 struct arm_long_option_table *lopt;
21104
21105 fprintf (fp, _(" ARM-specific assembler options:\n"));
21106
21107 for (opt = arm_opts; opt->option != NULL; opt++)
21108 if (opt->help != NULL)
21109 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
21110
21111 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
21112 if (lopt->help != NULL)
21113 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
21114
21115 #ifdef OPTION_EB
21116 fprintf (fp, _("\
21117 -EB assemble code for a big-endian cpu\n"));
21118 #endif
21119
21120 #ifdef OPTION_EL
21121 fprintf (fp, _("\
21122 -EL assemble code for a little-endian cpu\n"));
21123 #endif
21124
21125 fprintf (fp, _("\
21126 --fix-v4bx Allow BX in ARMv4 code\n"));
21127 }
21128
21129
21130 #ifdef OBJ_ELF
21131 typedef struct
21132 {
21133 int val;
21134 arm_feature_set flags;
21135 } cpu_arch_ver_table;
21136
21137 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
21138 least features first. */
21139 static const cpu_arch_ver_table cpu_arch_ver[] =
21140 {
21141 {1, ARM_ARCH_V4},
21142 {2, ARM_ARCH_V4T},
21143 {3, ARM_ARCH_V5},
21144 {3, ARM_ARCH_V5T},
21145 {4, ARM_ARCH_V5TE},
21146 {5, ARM_ARCH_V5TEJ},
21147 {6, ARM_ARCH_V6},
21148 {7, ARM_ARCH_V6Z},
21149 {9, ARM_ARCH_V6K},
21150 {11, ARM_ARCH_V6M},
21151 {8, ARM_ARCH_V6T2},
21152 {10, ARM_ARCH_V7A},
21153 {10, ARM_ARCH_V7R},
21154 {10, ARM_ARCH_V7M},
21155 {0, ARM_ARCH_NONE}
21156 };
21157
21158 /* Set an attribute if it has not already been set by the user. */
21159 static void
21160 aeabi_set_attribute_int (int tag, int value)
21161 {
21162 if (tag < 1
21163 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
21164 || !attributes_set_explicitly[tag])
21165 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
21166 }
21167
21168 static void
21169 aeabi_set_attribute_string (int tag, const char *value)
21170 {
21171 if (tag < 1
21172 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
21173 || !attributes_set_explicitly[tag])
21174 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
21175 }
21176
21177 /* Set the public EABI object attributes. */
21178 static void
21179 aeabi_set_public_attributes (void)
21180 {
21181 int arch;
21182 arm_feature_set flags;
21183 arm_feature_set tmp;
21184 const cpu_arch_ver_table *p;
21185
21186 /* Choose the architecture based on the capabilities of the requested cpu
21187 (if any) and/or the instructions actually used. */
21188 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
21189 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
21190 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
21191 /*Allow the user to override the reported architecture. */
21192 if (object_arch)
21193 {
21194 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
21195 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
21196 }
21197
21198 tmp = flags;
21199 arch = 0;
21200 for (p = cpu_arch_ver; p->val; p++)
21201 {
21202 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
21203 {
21204 arch = p->val;
21205 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
21206 }
21207 }
21208
21209 /* Tag_CPU_name. */
21210 if (selected_cpu_name[0])
21211 {
21212 char *p;
21213
21214 p = selected_cpu_name;
21215 if (strncmp (p, "armv", 4) == 0)
21216 {
21217 int i;
21218
21219 p += 4;
21220 for (i = 0; p[i]; i++)
21221 p[i] = TOUPPER (p[i]);
21222 }
21223 aeabi_set_attribute_string (Tag_CPU_name, p);
21224 }
21225 /* Tag_CPU_arch. */
21226 aeabi_set_attribute_int (Tag_CPU_arch, arch);
21227 /* Tag_CPU_arch_profile. */
21228 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
21229 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
21230 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
21231 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
21232 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
21233 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
21234 /* Tag_ARM_ISA_use. */
21235 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
21236 || arch == 0)
21237 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
21238 /* Tag_THUMB_ISA_use. */
21239 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
21240 || arch == 0)
21241 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
21242 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
21243 /* Tag_VFP_arch. */
21244 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
21245 aeabi_set_attribute_int (Tag_VFP_arch, 3);
21246 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3))
21247 aeabi_set_attribute_int (Tag_VFP_arch, 4);
21248 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
21249 aeabi_set_attribute_int (Tag_VFP_arch, 2);
21250 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
21251 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
21252 aeabi_set_attribute_int (Tag_VFP_arch, 1);
21253 /* Tag_WMMX_arch. */
21254 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
21255 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
21256 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
21257 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
21258 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
21259 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
21260 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
21261 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
21262 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_fp16))
21263 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
21264 }
21265
21266 /* Add the default contents for the .ARM.attributes section. */
21267 void
21268 arm_md_end (void)
21269 {
21270 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21271 return;
21272
21273 aeabi_set_public_attributes ();
21274 }
21275 #endif /* OBJ_ELF */
21276
21277
21278 /* Parse a .cpu directive. */
21279
21280 static void
21281 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
21282 {
21283 const struct arm_cpu_option_table *opt;
21284 char *name;
21285 char saved_char;
21286
21287 name = input_line_pointer;
21288 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21289 input_line_pointer++;
21290 saved_char = *input_line_pointer;
21291 *input_line_pointer = 0;
21292
21293 /* Skip the first "all" entry. */
21294 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
21295 if (streq (opt->name, name))
21296 {
21297 mcpu_cpu_opt = &opt->value;
21298 selected_cpu = opt->value;
21299 if (opt->canonical_name)
21300 strcpy (selected_cpu_name, opt->canonical_name);
21301 else
21302 {
21303 int i;
21304 for (i = 0; opt->name[i]; i++)
21305 selected_cpu_name[i] = TOUPPER (opt->name[i]);
21306 selected_cpu_name[i] = 0;
21307 }
21308 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21309 *input_line_pointer = saved_char;
21310 demand_empty_rest_of_line ();
21311 return;
21312 }
21313 as_bad (_("unknown cpu `%s'"), name);
21314 *input_line_pointer = saved_char;
21315 ignore_rest_of_line ();
21316 }
21317
21318
21319 /* Parse a .arch directive. */
21320
21321 static void
21322 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
21323 {
21324 const struct arm_arch_option_table *opt;
21325 char saved_char;
21326 char *name;
21327
21328 name = input_line_pointer;
21329 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21330 input_line_pointer++;
21331 saved_char = *input_line_pointer;
21332 *input_line_pointer = 0;
21333
21334 /* Skip the first "all" entry. */
21335 for (opt = arm_archs + 1; opt->name != NULL; opt++)
21336 if (streq (opt->name, name))
21337 {
21338 mcpu_cpu_opt = &opt->value;
21339 selected_cpu = opt->value;
21340 strcpy (selected_cpu_name, opt->name);
21341 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21342 *input_line_pointer = saved_char;
21343 demand_empty_rest_of_line ();
21344 return;
21345 }
21346
21347 as_bad (_("unknown architecture `%s'\n"), name);
21348 *input_line_pointer = saved_char;
21349 ignore_rest_of_line ();
21350 }
21351
21352
21353 /* Parse a .object_arch directive. */
21354
21355 static void
21356 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
21357 {
21358 const struct arm_arch_option_table *opt;
21359 char saved_char;
21360 char *name;
21361
21362 name = input_line_pointer;
21363 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21364 input_line_pointer++;
21365 saved_char = *input_line_pointer;
21366 *input_line_pointer = 0;
21367
21368 /* Skip the first "all" entry. */
21369 for (opt = arm_archs + 1; opt->name != NULL; opt++)
21370 if (streq (opt->name, name))
21371 {
21372 object_arch = &opt->value;
21373 *input_line_pointer = saved_char;
21374 demand_empty_rest_of_line ();
21375 return;
21376 }
21377
21378 as_bad (_("unknown architecture `%s'\n"), name);
21379 *input_line_pointer = saved_char;
21380 ignore_rest_of_line ();
21381 }
21382
21383 /* Parse a .fpu directive. */
21384
21385 static void
21386 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
21387 {
21388 const struct arm_option_cpu_value_table *opt;
21389 char saved_char;
21390 char *name;
21391
21392 name = input_line_pointer;
21393 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
21394 input_line_pointer++;
21395 saved_char = *input_line_pointer;
21396 *input_line_pointer = 0;
21397
21398 for (opt = arm_fpus; opt->name != NULL; opt++)
21399 if (streq (opt->name, name))
21400 {
21401 mfpu_opt = &opt->value;
21402 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21403 *input_line_pointer = saved_char;
21404 demand_empty_rest_of_line ();
21405 return;
21406 }
21407
21408 as_bad (_("unknown floating point format `%s'\n"), name);
21409 *input_line_pointer = saved_char;
21410 ignore_rest_of_line ();
21411 }
21412
21413 /* Copy symbol information. */
21414
21415 void
21416 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
21417 {
21418 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
21419 }
21420
21421 #ifdef OBJ_ELF
21422 /* Given a symbolic attribute NAME, return the proper integer value.
21423 Returns -1 if the attribute is not known. */
21424
21425 int
21426 arm_convert_symbolic_attribute (const char *name)
21427 {
21428 static const struct
21429 {
21430 const char * name;
21431 const int tag;
21432 }
21433 attribute_table[] =
21434 {
21435 /* When you modify this table you should
21436 also modify the list in doc/c-arm.texi. */
21437 #define T(tag) {#tag, tag}
21438 T (Tag_CPU_raw_name),
21439 T (Tag_CPU_name),
21440 T (Tag_CPU_arch),
21441 T (Tag_CPU_arch_profile),
21442 T (Tag_ARM_ISA_use),
21443 T (Tag_THUMB_ISA_use),
21444 T (Tag_VFP_arch),
21445 T (Tag_WMMX_arch),
21446 T (Tag_Advanced_SIMD_arch),
21447 T (Tag_PCS_config),
21448 T (Tag_ABI_PCS_R9_use),
21449 T (Tag_ABI_PCS_RW_data),
21450 T (Tag_ABI_PCS_RO_data),
21451 T (Tag_ABI_PCS_GOT_use),
21452 T (Tag_ABI_PCS_wchar_t),
21453 T (Tag_ABI_FP_rounding),
21454 T (Tag_ABI_FP_denormal),
21455 T (Tag_ABI_FP_exceptions),
21456 T (Tag_ABI_FP_user_exceptions),
21457 T (Tag_ABI_FP_number_model),
21458 T (Tag_ABI_align8_needed),
21459 T (Tag_ABI_align8_preserved),
21460 T (Tag_ABI_enum_size),
21461 T (Tag_ABI_HardFP_use),
21462 T (Tag_ABI_VFP_args),
21463 T (Tag_ABI_WMMX_args),
21464 T (Tag_ABI_optimization_goals),
21465 T (Tag_ABI_FP_optimization_goals),
21466 T (Tag_compatibility),
21467 T (Tag_CPU_unaligned_access),
21468 T (Tag_VFP_HP_extension),
21469 T (Tag_ABI_FP_16bit_format),
21470 T (Tag_nodefaults),
21471 T (Tag_also_compatible_with),
21472 T (Tag_conformance),
21473 T (Tag_T2EE_use),
21474 T (Tag_Virtualization_use),
21475 T (Tag_MPextension_use)
21476 #undef T
21477 };
21478 unsigned int i;
21479
21480 if (name == NULL)
21481 return -1;
21482
21483 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
21484 if (strcmp (name, attribute_table[i].name) == 0)
21485 return attribute_table[i].tag;
21486
21487 return -1;
21488 }
21489 #endif /* OBJ_ELF */
This page took 0.477524 seconds and 5 git commands to generate.