* gas/arm/itblock.s: New file. Helper macro for making all-true IT
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #define NO_RELOC 0
30 #include "as.h"
31 #include "safe-ctype.h"
32 #include "subsegs.h"
33 #include "obstack.h"
34
35 #include "opcode/arm.h"
36
37 #ifdef OBJ_ELF
38 #include "elf/arm.h"
39 #include "dwarf2dbg.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 /* XXX Set this to 1 after the next binutils release. */
44 #define WARN_DEPRECATED 0
45
46 #ifdef OBJ_ELF
47 /* Must be at least the size of the largest unwind opcode (currently two). */
48 #define ARM_OPCODE_CHUNK_SIZE 8
49
50 /* This structure holds the unwinding state. */
51
52 static struct
53 {
54 symbolS * proc_start;
55 symbolS * table_entry;
56 symbolS * personality_routine;
57 int personality_index;
58 /* The segment containing the function. */
59 segT saved_seg;
60 subsegT saved_subseg;
61 /* Opcodes generated from this function. */
62 unsigned char * opcodes;
63 int opcode_count;
64 int opcode_alloc;
65 /* The number of bytes pushed to the stack. */
66 offsetT frame_size;
67 /* We don't add stack adjustment opcodes immediately so that we can merge
68 multiple adjustments. We can also omit the final adjustment
69 when using a frame pointer. */
70 offsetT pending_offset;
71 /* These two fields are set by both unwind_movsp and unwind_setfp. They
72 hold the reg+offset to use when restoring sp from a frame pointer. */
73 offsetT fp_offset;
74 int fp_reg;
75 /* Nonzero if an unwind_setfp directive has been seen. */
76 unsigned fp_used:1;
77 /* Nonzero if the last opcode restores sp from fp_reg. */
78 unsigned sp_restored:1;
79 } unwind;
80
81 /* Bit N indicates that an R_ARM_NONE relocation has been output for
82 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
83 emitted only once per section, to save unnecessary bloat. */
84 static unsigned int marked_pr_dependency = 0;
85
86 #endif /* OBJ_ELF */
87
88 enum arm_float_abi
89 {
90 ARM_FLOAT_ABI_HARD,
91 ARM_FLOAT_ABI_SOFTFP,
92 ARM_FLOAT_ABI_SOFT
93 };
94
95 /* Types of processor to assemble for. */
96 #ifndef CPU_DEFAULT
97 #if defined __XSCALE__
98 #define CPU_DEFAULT ARM_ARCH_XSCALE
99 #else
100 #if defined __thumb__
101 #define CPU_DEFAULT ARM_ARCH_V5T
102 #endif
103 #endif
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136
137 /* Variables that we set while parsing command-line options. Once all
138 options have been read we re-process these values to set the real
139 assembly flags. */
140 static const arm_feature_set *legacy_cpu = NULL;
141 static const arm_feature_set *legacy_fpu = NULL;
142
143 static const arm_feature_set *mcpu_cpu_opt = NULL;
144 static const arm_feature_set *mcpu_fpu_opt = NULL;
145 static const arm_feature_set *march_cpu_opt = NULL;
146 static const arm_feature_set *march_fpu_opt = NULL;
147 static const arm_feature_set *mfpu_opt = NULL;
148
149 /* Constants for known architecture features. */
150 static const arm_feature_set fpu_default = FPU_DEFAULT;
151 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
152 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
153 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
154 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
155 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
156 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
157 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
158 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
159
160 #ifdef CPU_DEFAULT
161 static const arm_feature_set cpu_default = CPU_DEFAULT;
162 #endif
163
164 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
165 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
166 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
167 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
168 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
169 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
170 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
171 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
172 static const arm_feature_set arm_ext_v4t_5 =
173 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
174 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
175 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
176 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
177 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
178 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
179 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
180 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
181 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
182 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
183 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
184 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
185 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
186 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
187 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
188
189 static const arm_feature_set arm_arch_any = ARM_ANY;
190 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
191 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
192 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
193
194 static const arm_feature_set arm_cext_iwmmxt =
195 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
196 static const arm_feature_set arm_cext_xscale =
197 ARM_FEATURE (0, ARM_CEXT_XSCALE);
198 static const arm_feature_set arm_cext_maverick =
199 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
200 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
201 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
202 static const arm_feature_set fpu_vfp_ext_v1xd =
203 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
204 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
205 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
206 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
207 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
208 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
209 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
210
211 static int mfloat_abi_opt = -1;
212 /* Record user cpu selection for object attributes. */
213 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
214 /* Must be long enough to hold any of the names in arm_cpus. */
215 static char selected_cpu_name[16];
216 #ifdef OBJ_ELF
217 # ifdef EABI_DEFAULT
218 static int meabi_flags = EABI_DEFAULT;
219 # else
220 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
221 # endif
222 #endif
223
224 #ifdef OBJ_ELF
225 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
226 symbolS * GOT_symbol;
227 #endif
228
229 /* 0: assemble for ARM,
230 1: assemble for Thumb,
231 2: assemble for Thumb even though target CPU does not support thumb
232 instructions. */
233 static int thumb_mode = 0;
234
235 /* If unified_syntax is true, we are processing the new unified
236 ARM/Thumb syntax. Important differences from the old ARM mode:
237
238 - Immediate operands do not require a # prefix.
239 - Conditional affixes always appear at the end of the
240 instruction. (For backward compatibility, those instructions
241 that formerly had them in the middle, continue to accept them
242 there.)
243 - The IT instruction may appear, and if it does is validated
244 against subsequent conditional affixes. It does not generate
245 machine code.
246
247 Important differences from the old Thumb mode:
248
249 - Immediate operands do not require a # prefix.
250 - Most of the V6T2 instructions are only available in unified mode.
251 - The .N and .W suffixes are recognized and honored (it is an error
252 if they cannot be honored).
253 - All instructions set the flags if and only if they have an 's' affix.
254 - Conditional affixes may be used. They are validated against
255 preceding IT instructions. Unlike ARM mode, you cannot use a
256 conditional affix except in the scope of an IT instruction. */
257
258 static bfd_boolean unified_syntax = FALSE;
259
260 enum neon_el_type
261 {
262 NT_invtype,
263 NT_untyped,
264 NT_integer,
265 NT_float,
266 NT_poly,
267 NT_signed,
268 NT_unsigned
269 };
270
271 struct neon_type_el
272 {
273 enum neon_el_type type;
274 unsigned size;
275 };
276
277 #define NEON_MAX_TYPE_ELS 4
278
279 struct neon_type
280 {
281 struct neon_type_el el[NEON_MAX_TYPE_ELS];
282 unsigned elems;
283 };
284
285 struct arm_it
286 {
287 const char * error;
288 unsigned long instruction;
289 int size;
290 int size_req;
291 int cond;
292 struct neon_type vectype;
293 /* Set to the opcode if the instruction needs relaxation.
294 Zero if the instruction is not relaxed. */
295 unsigned long relax;
296 struct
297 {
298 bfd_reloc_code_real_type type;
299 expressionS exp;
300 int pc_rel;
301 } reloc;
302
303 struct
304 {
305 unsigned reg;
306 signed int imm;
307 struct neon_type_el vectype;
308 unsigned present : 1; /* Operand present. */
309 unsigned isreg : 1; /* Operand was a register. */
310 unsigned immisreg : 1; /* .imm field is a second register. */
311 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
312 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
313 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
314 instructions. This allows us to disambiguate ARM <-> vector insns. */
315 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
316 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
317 unsigned hasreloc : 1; /* Operand has relocation suffix. */
318 unsigned writeback : 1; /* Operand has trailing ! */
319 unsigned preind : 1; /* Preindexed address. */
320 unsigned postind : 1; /* Postindexed address. */
321 unsigned negative : 1; /* Index register was negated. */
322 unsigned shifted : 1; /* Shift applied to operation. */
323 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
324 } operands[6];
325 };
326
327 static struct arm_it inst;
328
329 #define NUM_FLOAT_VALS 8
330
331 const char * fp_const[] =
332 {
333 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
334 };
335
336 /* Number of littlenums required to hold an extended precision number. */
337 #define MAX_LITTLENUMS 6
338
339 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
340
341 #define FAIL (-1)
342 #define SUCCESS (0)
343
344 #define SUFF_S 1
345 #define SUFF_D 2
346 #define SUFF_E 3
347 #define SUFF_P 4
348
349 #define CP_T_X 0x00008000
350 #define CP_T_Y 0x00400000
351
352 #define CONDS_BIT 0x00100000
353 #define LOAD_BIT 0x00100000
354
355 #define DOUBLE_LOAD_FLAG 0x00000001
356
357 struct asm_cond
358 {
359 const char * template;
360 unsigned long value;
361 };
362
363 #define COND_ALWAYS 0xE
364
365 struct asm_psr
366 {
367 const char *template;
368 unsigned long field;
369 };
370
371 struct asm_barrier_opt
372 {
373 const char *template;
374 unsigned long value;
375 };
376
377 /* The bit that distinguishes CPSR and SPSR. */
378 #define SPSR_BIT (1 << 22)
379
380 /* The individual PSR flag bits. */
381 #define PSR_c (1 << 16)
382 #define PSR_x (1 << 17)
383 #define PSR_s (1 << 18)
384 #define PSR_f (1 << 19)
385
386 struct reloc_entry
387 {
388 char *name;
389 bfd_reloc_code_real_type reloc;
390 };
391
392 enum vfp_reg_pos
393 {
394 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
395 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
396 };
397
398 enum vfp_ldstm_type
399 {
400 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
401 };
402
403 /* Bits for DEFINED field in neon_typed_alias. */
404 #define NTA_HASTYPE 1
405 #define NTA_HASINDEX 2
406
407 struct neon_typed_alias
408 {
409 unsigned char defined;
410 unsigned char index;
411 struct neon_type_el eltype;
412 };
413
414 /* ARM register categories. This includes coprocessor numbers and various
415 architecture extensions' registers. */
416 enum arm_reg_type
417 {
418 REG_TYPE_RN,
419 REG_TYPE_CP,
420 REG_TYPE_CN,
421 REG_TYPE_FN,
422 REG_TYPE_VFS,
423 REG_TYPE_VFD,
424 REG_TYPE_NQ,
425 REG_TYPE_NDQ,
426 REG_TYPE_VFC,
427 REG_TYPE_MVF,
428 REG_TYPE_MVD,
429 REG_TYPE_MVFX,
430 REG_TYPE_MVDX,
431 REG_TYPE_MVAX,
432 REG_TYPE_DSPSC,
433 REG_TYPE_MMXWR,
434 REG_TYPE_MMXWC,
435 REG_TYPE_MMXWCG,
436 REG_TYPE_XSCALE,
437 };
438
439 /* Structure for a hash table entry for a register.
440 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
441 information which states whether a vector type or index is specified (for a
442 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
443 struct reg_entry
444 {
445 const char *name;
446 unsigned char number;
447 unsigned char type;
448 unsigned char builtin;
449 struct neon_typed_alias *neon;
450 };
451
452 /* Diagnostics used when we don't get a register of the expected type. */
453 const char *const reg_expected_msgs[] =
454 {
455 N_("ARM register expected"),
456 N_("bad or missing co-processor number"),
457 N_("co-processor register expected"),
458 N_("FPA register expected"),
459 N_("VFP single precision register expected"),
460 N_("VFP/Neon double precision register expected"),
461 N_("Neon quad precision register expected"),
462 N_("Neon double or quad precision register expected"),
463 N_("VFP system register expected"),
464 N_("Maverick MVF register expected"),
465 N_("Maverick MVD register expected"),
466 N_("Maverick MVFX register expected"),
467 N_("Maverick MVDX register expected"),
468 N_("Maverick MVAX register expected"),
469 N_("Maverick DSPSC register expected"),
470 N_("iWMMXt data register expected"),
471 N_("iWMMXt control register expected"),
472 N_("iWMMXt scalar register expected"),
473 N_("XScale accumulator register expected"),
474 };
475
476 /* Some well known registers that we refer to directly elsewhere. */
477 #define REG_SP 13
478 #define REG_LR 14
479 #define REG_PC 15
480
481 /* ARM instructions take 4bytes in the object file, Thumb instructions
482 take 2: */
483 #define INSN_SIZE 4
484
485 struct asm_opcode
486 {
487 /* Basic string to match. */
488 const char *template;
489
490 /* Parameters to instruction. */
491 unsigned char operands[8];
492
493 /* Conditional tag - see opcode_lookup. */
494 unsigned int tag : 4;
495
496 /* Basic instruction code. */
497 unsigned int avalue : 28;
498
499 /* Thumb-format instruction code. */
500 unsigned int tvalue;
501
502 /* Which architecture variant provides this instruction. */
503 const arm_feature_set *avariant;
504 const arm_feature_set *tvariant;
505
506 /* Function to call to encode instruction in ARM format. */
507 void (* aencode) (void);
508
509 /* Function to call to encode instruction in Thumb format. */
510 void (* tencode) (void);
511 };
512
513 /* Defines for various bits that we will want to toggle. */
514 #define INST_IMMEDIATE 0x02000000
515 #define OFFSET_REG 0x02000000
516 #define HWOFFSET_IMM 0x00400000
517 #define SHIFT_BY_REG 0x00000010
518 #define PRE_INDEX 0x01000000
519 #define INDEX_UP 0x00800000
520 #define WRITE_BACK 0x00200000
521 #define LDM_TYPE_2_OR_3 0x00400000
522
523 #define LITERAL_MASK 0xf000f000
524 #define OPCODE_MASK 0xfe1fffff
525 #define V4_STR_BIT 0x00000020
526
527 #define DATA_OP_SHIFT 21
528
529 #define T2_OPCODE_MASK 0xfe1fffff
530 #define T2_DATA_OP_SHIFT 21
531
532 /* Codes to distinguish the arithmetic instructions. */
533 #define OPCODE_AND 0
534 #define OPCODE_EOR 1
535 #define OPCODE_SUB 2
536 #define OPCODE_RSB 3
537 #define OPCODE_ADD 4
538 #define OPCODE_ADC 5
539 #define OPCODE_SBC 6
540 #define OPCODE_RSC 7
541 #define OPCODE_TST 8
542 #define OPCODE_TEQ 9
543 #define OPCODE_CMP 10
544 #define OPCODE_CMN 11
545 #define OPCODE_ORR 12
546 #define OPCODE_MOV 13
547 #define OPCODE_BIC 14
548 #define OPCODE_MVN 15
549
550 #define T2_OPCODE_AND 0
551 #define T2_OPCODE_BIC 1
552 #define T2_OPCODE_ORR 2
553 #define T2_OPCODE_ORN 3
554 #define T2_OPCODE_EOR 4
555 #define T2_OPCODE_ADD 8
556 #define T2_OPCODE_ADC 10
557 #define T2_OPCODE_SBC 11
558 #define T2_OPCODE_SUB 13
559 #define T2_OPCODE_RSB 14
560
561 #define T_OPCODE_MUL 0x4340
562 #define T_OPCODE_TST 0x4200
563 #define T_OPCODE_CMN 0x42c0
564 #define T_OPCODE_NEG 0x4240
565 #define T_OPCODE_MVN 0x43c0
566
567 #define T_OPCODE_ADD_R3 0x1800
568 #define T_OPCODE_SUB_R3 0x1a00
569 #define T_OPCODE_ADD_HI 0x4400
570 #define T_OPCODE_ADD_ST 0xb000
571 #define T_OPCODE_SUB_ST 0xb080
572 #define T_OPCODE_ADD_SP 0xa800
573 #define T_OPCODE_ADD_PC 0xa000
574 #define T_OPCODE_ADD_I8 0x3000
575 #define T_OPCODE_SUB_I8 0x3800
576 #define T_OPCODE_ADD_I3 0x1c00
577 #define T_OPCODE_SUB_I3 0x1e00
578
579 #define T_OPCODE_ASR_R 0x4100
580 #define T_OPCODE_LSL_R 0x4080
581 #define T_OPCODE_LSR_R 0x40c0
582 #define T_OPCODE_ROR_R 0x41c0
583 #define T_OPCODE_ASR_I 0x1000
584 #define T_OPCODE_LSL_I 0x0000
585 #define T_OPCODE_LSR_I 0x0800
586
587 #define T_OPCODE_MOV_I8 0x2000
588 #define T_OPCODE_CMP_I8 0x2800
589 #define T_OPCODE_CMP_LR 0x4280
590 #define T_OPCODE_MOV_HR 0x4600
591 #define T_OPCODE_CMP_HR 0x4500
592
593 #define T_OPCODE_LDR_PC 0x4800
594 #define T_OPCODE_LDR_SP 0x9800
595 #define T_OPCODE_STR_SP 0x9000
596 #define T_OPCODE_LDR_IW 0x6800
597 #define T_OPCODE_STR_IW 0x6000
598 #define T_OPCODE_LDR_IH 0x8800
599 #define T_OPCODE_STR_IH 0x8000
600 #define T_OPCODE_LDR_IB 0x7800
601 #define T_OPCODE_STR_IB 0x7000
602 #define T_OPCODE_LDR_RW 0x5800
603 #define T_OPCODE_STR_RW 0x5000
604 #define T_OPCODE_LDR_RH 0x5a00
605 #define T_OPCODE_STR_RH 0x5200
606 #define T_OPCODE_LDR_RB 0x5c00
607 #define T_OPCODE_STR_RB 0x5400
608
609 #define T_OPCODE_PUSH 0xb400
610 #define T_OPCODE_POP 0xbc00
611
612 #define T_OPCODE_BRANCH 0xe000
613
614 #define THUMB_SIZE 2 /* Size of thumb instruction. */
615 #define THUMB_PP_PC_LR 0x0100
616 #define THUMB_LOAD_BIT 0x0800
617 #define THUMB2_LOAD_BIT 0x00100000
618
619 #define BAD_ARGS _("bad arguments to instruction")
620 #define BAD_PC _("r15 not allowed here")
621 #define BAD_COND _("instruction cannot be conditional")
622 #define BAD_OVERLAP _("registers may not be the same")
623 #define BAD_HIREG _("lo register required")
624 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
625 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
626 #define BAD_BRANCH _("branch must be last instruction in IT block")
627 #define BAD_NOT_IT _("instruction not allowed in IT block")
628
629 static struct hash_control *arm_ops_hsh;
630 static struct hash_control *arm_cond_hsh;
631 static struct hash_control *arm_shift_hsh;
632 static struct hash_control *arm_psr_hsh;
633 static struct hash_control *arm_v7m_psr_hsh;
634 static struct hash_control *arm_reg_hsh;
635 static struct hash_control *arm_reloc_hsh;
636 static struct hash_control *arm_barrier_opt_hsh;
637
638 /* Stuff needed to resolve the label ambiguity
639 As:
640 ...
641 label: <insn>
642 may differ from:
643 ...
644 label:
645 <insn>
646 */
647
648 symbolS * last_label_seen;
649 static int label_is_thumb_function_name = FALSE;
650 \f
651 /* Literal pool structure. Held on a per-section
652 and per-sub-section basis. */
653
654 #define MAX_LITERAL_POOL_SIZE 1024
655 typedef struct literal_pool
656 {
657 expressionS literals [MAX_LITERAL_POOL_SIZE];
658 unsigned int next_free_entry;
659 unsigned int id;
660 symbolS * symbol;
661 segT section;
662 subsegT sub_section;
663 struct literal_pool * next;
664 } literal_pool;
665
666 /* Pointer to a linked list of literal pools. */
667 literal_pool * list_of_pools = NULL;
668
669 /* State variables for IT block handling. */
670 static bfd_boolean current_it_mask = 0;
671 static int current_cc;
672
673 \f
674 /* Pure syntax. */
675
676 /* This array holds the chars that always start a comment. If the
677 pre-processor is disabled, these aren't very useful. */
678 const char comment_chars[] = "@";
679
680 /* This array holds the chars that only start a comment at the beginning of
681 a line. If the line seems to have the form '# 123 filename'
682 .line and .file directives will appear in the pre-processed output. */
683 /* Note that input_file.c hand checks for '#' at the beginning of the
684 first line of the input file. This is because the compiler outputs
685 #NO_APP at the beginning of its output. */
686 /* Also note that comments like this one will always work. */
687 const char line_comment_chars[] = "#";
688
689 const char line_separator_chars[] = ";";
690
691 /* Chars that can be used to separate mant
692 from exp in floating point numbers. */
693 const char EXP_CHARS[] = "eE";
694
695 /* Chars that mean this number is a floating point constant. */
696 /* As in 0f12.456 */
697 /* or 0d1.2345e12 */
698
699 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
700
701 /* Prefix characters that indicate the start of an immediate
702 value. */
703 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
704
705 /* Separator character handling. */
706
707 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
708
709 static inline int
710 skip_past_char (char ** str, char c)
711 {
712 if (**str == c)
713 {
714 (*str)++;
715 return SUCCESS;
716 }
717 else
718 return FAIL;
719 }
720 #define skip_past_comma(str) skip_past_char (str, ',')
721
722 /* Arithmetic expressions (possibly involving symbols). */
723
724 /* Return TRUE if anything in the expression is a bignum. */
725
726 static int
727 walk_no_bignums (symbolS * sp)
728 {
729 if (symbol_get_value_expression (sp)->X_op == O_big)
730 return 1;
731
732 if (symbol_get_value_expression (sp)->X_add_symbol)
733 {
734 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
735 || (symbol_get_value_expression (sp)->X_op_symbol
736 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
737 }
738
739 return 0;
740 }
741
742 static int in_my_get_expression = 0;
743
744 /* Third argument to my_get_expression. */
745 #define GE_NO_PREFIX 0
746 #define GE_IMM_PREFIX 1
747 #define GE_OPT_PREFIX 2
748 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
749 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
750 #define GE_OPT_PREFIX_BIG 3
751
752 static int
753 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
754 {
755 char * save_in;
756 segT seg;
757
758 /* In unified syntax, all prefixes are optional. */
759 if (unified_syntax)
760 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
761 : GE_OPT_PREFIX;
762
763 switch (prefix_mode)
764 {
765 case GE_NO_PREFIX: break;
766 case GE_IMM_PREFIX:
767 if (!is_immediate_prefix (**str))
768 {
769 inst.error = _("immediate expression requires a # prefix");
770 return FAIL;
771 }
772 (*str)++;
773 break;
774 case GE_OPT_PREFIX:
775 case GE_OPT_PREFIX_BIG:
776 if (is_immediate_prefix (**str))
777 (*str)++;
778 break;
779 default: abort ();
780 }
781
782 memset (ep, 0, sizeof (expressionS));
783
784 save_in = input_line_pointer;
785 input_line_pointer = *str;
786 in_my_get_expression = 1;
787 seg = expression (ep);
788 in_my_get_expression = 0;
789
790 if (ep->X_op == O_illegal)
791 {
792 /* We found a bad expression in md_operand(). */
793 *str = input_line_pointer;
794 input_line_pointer = save_in;
795 if (inst.error == NULL)
796 inst.error = _("bad expression");
797 return 1;
798 }
799
800 #ifdef OBJ_AOUT
801 if (seg != absolute_section
802 && seg != text_section
803 && seg != data_section
804 && seg != bss_section
805 && seg != undefined_section)
806 {
807 inst.error = _("bad segment");
808 *str = input_line_pointer;
809 input_line_pointer = save_in;
810 return 1;
811 }
812 #endif
813
814 /* Get rid of any bignums now, so that we don't generate an error for which
815 we can't establish a line number later on. Big numbers are never valid
816 in instructions, which is where this routine is always called. */
817 if (prefix_mode != GE_OPT_PREFIX_BIG
818 && (ep->X_op == O_big
819 || (ep->X_add_symbol
820 && (walk_no_bignums (ep->X_add_symbol)
821 || (ep->X_op_symbol
822 && walk_no_bignums (ep->X_op_symbol))))))
823 {
824 inst.error = _("invalid constant");
825 *str = input_line_pointer;
826 input_line_pointer = save_in;
827 return 1;
828 }
829
830 *str = input_line_pointer;
831 input_line_pointer = save_in;
832 return 0;
833 }
834
835 /* Turn a string in input_line_pointer into a floating point constant
836 of type TYPE, and store the appropriate bytes in *LITP. The number
837 of LITTLENUMS emitted is stored in *SIZEP. An error message is
838 returned, or NULL on OK.
839
840 Note that fp constants aren't represent in the normal way on the ARM.
841 In big endian mode, things are as expected. However, in little endian
842 mode fp constants are big-endian word-wise, and little-endian byte-wise
843 within the words. For example, (double) 1.1 in big endian mode is
844 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
845 the byte sequence 99 99 f1 3f 9a 99 99 99.
846
847 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
848
849 char *
850 md_atof (int type, char * litP, int * sizeP)
851 {
852 int prec;
853 LITTLENUM_TYPE words[MAX_LITTLENUMS];
854 char *t;
855 int i;
856
857 switch (type)
858 {
859 case 'f':
860 case 'F':
861 case 's':
862 case 'S':
863 prec = 2;
864 break;
865
866 case 'd':
867 case 'D':
868 case 'r':
869 case 'R':
870 prec = 4;
871 break;
872
873 case 'x':
874 case 'X':
875 prec = 6;
876 break;
877
878 case 'p':
879 case 'P':
880 prec = 6;
881 break;
882
883 default:
884 *sizeP = 0;
885 return _("bad call to MD_ATOF()");
886 }
887
888 t = atof_ieee (input_line_pointer, type, words);
889 if (t)
890 input_line_pointer = t;
891 *sizeP = prec * 2;
892
893 if (target_big_endian)
894 {
895 for (i = 0; i < prec; i++)
896 {
897 md_number_to_chars (litP, (valueT) words[i], 2);
898 litP += 2;
899 }
900 }
901 else
902 {
903 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
904 for (i = prec - 1; i >= 0; i--)
905 {
906 md_number_to_chars (litP, (valueT) words[i], 2);
907 litP += 2;
908 }
909 else
910 /* For a 4 byte float the order of elements in `words' is 1 0.
911 For an 8 byte float the order is 1 0 3 2. */
912 for (i = 0; i < prec; i += 2)
913 {
914 md_number_to_chars (litP, (valueT) words[i + 1], 2);
915 md_number_to_chars (litP + 2, (valueT) words[i], 2);
916 litP += 4;
917 }
918 }
919
920 return 0;
921 }
922
923 /* We handle all bad expressions here, so that we can report the faulty
924 instruction in the error message. */
925 void
926 md_operand (expressionS * expr)
927 {
928 if (in_my_get_expression)
929 expr->X_op = O_illegal;
930 }
931
932 /* Immediate values. */
933
934 /* Generic immediate-value read function for use in directives.
935 Accepts anything that 'expression' can fold to a constant.
936 *val receives the number. */
937 #ifdef OBJ_ELF
938 static int
939 immediate_for_directive (int *val)
940 {
941 expressionS exp;
942 exp.X_op = O_illegal;
943
944 if (is_immediate_prefix (*input_line_pointer))
945 {
946 input_line_pointer++;
947 expression (&exp);
948 }
949
950 if (exp.X_op != O_constant)
951 {
952 as_bad (_("expected #constant"));
953 ignore_rest_of_line ();
954 return FAIL;
955 }
956 *val = exp.X_add_number;
957 return SUCCESS;
958 }
959 #endif
960
961 /* Register parsing. */
962
963 /* Generic register parser. CCP points to what should be the
964 beginning of a register name. If it is indeed a valid register
965 name, advance CCP over it and return the reg_entry structure;
966 otherwise return NULL. Does not issue diagnostics. */
967
968 static struct reg_entry *
969 arm_reg_parse_multi (char **ccp)
970 {
971 char *start = *ccp;
972 char *p;
973 struct reg_entry *reg;
974
975 #ifdef REGISTER_PREFIX
976 if (*start != REGISTER_PREFIX)
977 return NULL;
978 start++;
979 #endif
980 #ifdef OPTIONAL_REGISTER_PREFIX
981 if (*start == OPTIONAL_REGISTER_PREFIX)
982 start++;
983 #endif
984
985 p = start;
986 if (!ISALPHA (*p) || !is_name_beginner (*p))
987 return NULL;
988
989 do
990 p++;
991 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
992
993 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
994
995 if (!reg)
996 return NULL;
997
998 *ccp = p;
999 return reg;
1000 }
1001
1002 static int
1003 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1004 enum arm_reg_type type)
1005 {
1006 /* Alternative syntaxes are accepted for a few register classes. */
1007 switch (type)
1008 {
1009 case REG_TYPE_MVF:
1010 case REG_TYPE_MVD:
1011 case REG_TYPE_MVFX:
1012 case REG_TYPE_MVDX:
1013 /* Generic coprocessor register names are allowed for these. */
1014 if (reg && reg->type == REG_TYPE_CN)
1015 return reg->number;
1016 break;
1017
1018 case REG_TYPE_CP:
1019 /* For backward compatibility, a bare number is valid here. */
1020 {
1021 unsigned long processor = strtoul (start, ccp, 10);
1022 if (*ccp != start && processor <= 15)
1023 return processor;
1024 }
1025
1026 case REG_TYPE_MMXWC:
1027 /* WC includes WCG. ??? I'm not sure this is true for all
1028 instructions that take WC registers. */
1029 if (reg && reg->type == REG_TYPE_MMXWCG)
1030 return reg->number;
1031 break;
1032
1033 default:
1034 break;
1035 }
1036
1037 return FAIL;
1038 }
1039
1040 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1041 return value is the register number or FAIL. */
1042
1043 static int
1044 arm_reg_parse (char **ccp, enum arm_reg_type type)
1045 {
1046 char *start = *ccp;
1047 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1048 int ret;
1049
1050 /* Do not allow a scalar (reg+index) to parse as a register. */
1051 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1052 return FAIL;
1053
1054 if (reg && reg->type == type)
1055 return reg->number;
1056
1057 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1058 return ret;
1059
1060 *ccp = start;
1061 return FAIL;
1062 }
1063
1064 /* Parse a Neon type specifier. *STR should point at the leading '.'
1065 character. Does no verification at this stage that the type fits the opcode
1066 properly. E.g.,
1067
1068 .i32.i32.s16
1069 .s32.f32
1070 .u16
1071
1072 Can all be legally parsed by this function.
1073
1074 Fills in neon_type struct pointer with parsed information, and updates STR
1075 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1076 type, FAIL if not. */
1077
1078 static int
1079 parse_neon_type (struct neon_type *type, char **str)
1080 {
1081 char *ptr = *str;
1082
1083 if (type)
1084 type->elems = 0;
1085
1086 while (type->elems < NEON_MAX_TYPE_ELS)
1087 {
1088 enum neon_el_type thistype = NT_untyped;
1089 unsigned thissize = -1u;
1090
1091 if (*ptr != '.')
1092 break;
1093
1094 ptr++;
1095
1096 /* Just a size without an explicit type. */
1097 if (ISDIGIT (*ptr))
1098 goto parsesize;
1099
1100 switch (TOLOWER (*ptr))
1101 {
1102 case 'i': thistype = NT_integer; break;
1103 case 'f': thistype = NT_float; break;
1104 case 'p': thistype = NT_poly; break;
1105 case 's': thistype = NT_signed; break;
1106 case 'u': thistype = NT_unsigned; break;
1107 default:
1108 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1109 return FAIL;
1110 }
1111
1112 ptr++;
1113
1114 /* .f is an abbreviation for .f32. */
1115 if (thistype == NT_float && !ISDIGIT (*ptr))
1116 thissize = 32;
1117 else
1118 {
1119 parsesize:
1120 thissize = strtoul (ptr, &ptr, 10);
1121
1122 if (thissize != 8 && thissize != 16 && thissize != 32
1123 && thissize != 64)
1124 {
1125 as_bad (_("bad size %d in type specifier"), thissize);
1126 return FAIL;
1127 }
1128 }
1129
1130 if (type)
1131 {
1132 type->el[type->elems].type = thistype;
1133 type->el[type->elems].size = thissize;
1134 type->elems++;
1135 }
1136 }
1137
1138 /* Empty/missing type is not a successful parse. */
1139 if (type->elems == 0)
1140 return FAIL;
1141
1142 *str = ptr;
1143
1144 return SUCCESS;
1145 }
1146
1147 /* Errors may be set multiple times during parsing or bit encoding
1148 (particularly in the Neon bits), but usually the earliest error which is set
1149 will be the most meaningful. Avoid overwriting it with later (cascading)
1150 errors by calling this function. */
1151
1152 static void
1153 first_error (const char *err)
1154 {
1155 if (!inst.error)
1156 inst.error = err;
1157 }
1158
1159 /* Parse a single type, e.g. ".s32", leading period included. */
1160 static int
1161 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1162 {
1163 char *str = *ccp;
1164 struct neon_type optype;
1165
1166 if (*str == '.')
1167 {
1168 if (parse_neon_type (&optype, &str) == SUCCESS)
1169 {
1170 if (optype.elems == 1)
1171 *vectype = optype.el[0];
1172 else
1173 {
1174 first_error (_("only one type should be specified for operand"));
1175 return FAIL;
1176 }
1177 }
1178 else
1179 {
1180 first_error (_("vector type expected"));
1181 return FAIL;
1182 }
1183 }
1184 else
1185 return FAIL;
1186
1187 *ccp = str;
1188
1189 return SUCCESS;
1190 }
1191
1192 /* Special meanings for indices (which have a range of 0-7), which will fit into
1193 a 4-bit integer. */
1194
1195 #define NEON_ALL_LANES 15
1196 #define NEON_INTERLEAVE_LANES 14
1197
1198 /* Parse either a register or a scalar, with an optional type. Return the
1199 register number, and optionally fill in the actual type of the register
1200 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1201 type/index information in *TYPEINFO. */
1202
1203 static int
1204 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1205 enum arm_reg_type *rtype,
1206 struct neon_typed_alias *typeinfo)
1207 {
1208 char *str = *ccp;
1209 struct reg_entry *reg = arm_reg_parse_multi (&str);
1210 struct neon_typed_alias atype;
1211 struct neon_type_el parsetype;
1212
1213 atype.defined = 0;
1214 atype.index = -1;
1215 atype.eltype.type = NT_invtype;
1216 atype.eltype.size = -1;
1217
1218 /* Try alternate syntax for some types of register. Note these are mutually
1219 exclusive with the Neon syntax extensions. */
1220 if (reg == NULL)
1221 {
1222 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1223 if (altreg != FAIL)
1224 *ccp = str;
1225 if (typeinfo)
1226 *typeinfo = atype;
1227 return altreg;
1228 }
1229
1230 /* Undo polymorphism for Neon D and Q registers. */
1231 if (type == REG_TYPE_NDQ
1232 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1233 type = reg->type;
1234
1235 if (type != reg->type)
1236 return FAIL;
1237
1238 if (reg->neon)
1239 atype = *reg->neon;
1240
1241 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1242 {
1243 if ((atype.defined & NTA_HASTYPE) != 0)
1244 {
1245 first_error (_("can't redefine type for operand"));
1246 return FAIL;
1247 }
1248 atype.defined |= NTA_HASTYPE;
1249 atype.eltype = parsetype;
1250 }
1251
1252 if (skip_past_char (&str, '[') == SUCCESS)
1253 {
1254 if (type != REG_TYPE_VFD)
1255 {
1256 first_error (_("only D registers may be indexed"));
1257 return FAIL;
1258 }
1259
1260 if ((atype.defined & NTA_HASINDEX) != 0)
1261 {
1262 first_error (_("can't change index for operand"));
1263 return FAIL;
1264 }
1265
1266 atype.defined |= NTA_HASINDEX;
1267
1268 if (skip_past_char (&str, ']') == SUCCESS)
1269 atype.index = NEON_ALL_LANES;
1270 else
1271 {
1272 expressionS exp;
1273
1274 my_get_expression (&exp, &str, GE_NO_PREFIX);
1275
1276 if (exp.X_op != O_constant)
1277 {
1278 first_error (_("constant expression required"));
1279 return FAIL;
1280 }
1281
1282 if (skip_past_char (&str, ']') == FAIL)
1283 return FAIL;
1284
1285 atype.index = exp.X_add_number;
1286 }
1287 }
1288
1289 if (typeinfo)
1290 *typeinfo = atype;
1291
1292 if (rtype)
1293 *rtype = type;
1294
1295 *ccp = str;
1296
1297 return reg->number;
1298 }
1299
1300 /* Like arm_reg_parse, but allow allow the following extra features:
1301 - If RTYPE is non-zero, return the (possibly restricted) type of the
1302 register (e.g. Neon double or quad reg when either has been requested).
1303 - If this is a Neon vector type with additional type information, fill
1304 in the struct pointed to by VECTYPE (if non-NULL).
1305 This function will fault on encountering a scalar.
1306 */
1307
1308 static int
1309 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1310 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1311 {
1312 struct neon_typed_alias atype;
1313 char *str = *ccp;
1314 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1315
1316 if (reg == FAIL)
1317 return FAIL;
1318
1319 /* Do not allow a scalar (reg+index) to parse as a register. */
1320 if ((atype.defined & NTA_HASINDEX) != 0)
1321 {
1322 first_error (_("register operand expected, but got scalar"));
1323 return FAIL;
1324 }
1325
1326 if (vectype)
1327 *vectype = atype.eltype;
1328
1329 *ccp = str;
1330
1331 return reg;
1332 }
1333
1334 #define NEON_SCALAR_REG(X) ((X) >> 4)
1335 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1336
1337 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1338 have enough information to be able to do a good job bounds-checking. So, we
1339 just do easy checks here, and do further checks later. */
1340
1341 static int
1342 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1343 {
1344 int reg;
1345 char *str = *ccp;
1346 struct neon_typed_alias atype;
1347
1348 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1349
1350 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1351 return FAIL;
1352
1353 if (atype.index == NEON_ALL_LANES)
1354 {
1355 first_error (_("scalar must have an index"));
1356 return FAIL;
1357 }
1358 else if (atype.index >= 64 / elsize)
1359 {
1360 first_error (_("scalar index out of range"));
1361 return FAIL;
1362 }
1363
1364 if (type)
1365 *type = atype.eltype;
1366
1367 *ccp = str;
1368
1369 return reg * 16 + atype.index;
1370 }
1371
1372 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1373 static long
1374 parse_reg_list (char ** strp)
1375 {
1376 char * str = * strp;
1377 long range = 0;
1378 int another_range;
1379
1380 /* We come back here if we get ranges concatenated by '+' or '|'. */
1381 do
1382 {
1383 another_range = 0;
1384
1385 if (*str == '{')
1386 {
1387 int in_range = 0;
1388 int cur_reg = -1;
1389
1390 str++;
1391 do
1392 {
1393 int reg;
1394
1395 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1396 {
1397 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1398 return FAIL;
1399 }
1400
1401 if (in_range)
1402 {
1403 int i;
1404
1405 if (reg <= cur_reg)
1406 {
1407 first_error (_("bad range in register list"));
1408 return FAIL;
1409 }
1410
1411 for (i = cur_reg + 1; i < reg; i++)
1412 {
1413 if (range & (1 << i))
1414 as_tsktsk
1415 (_("Warning: duplicated register (r%d) in register list"),
1416 i);
1417 else
1418 range |= 1 << i;
1419 }
1420 in_range = 0;
1421 }
1422
1423 if (range & (1 << reg))
1424 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1425 reg);
1426 else if (reg <= cur_reg)
1427 as_tsktsk (_("Warning: register range not in ascending order"));
1428
1429 range |= 1 << reg;
1430 cur_reg = reg;
1431 }
1432 while (skip_past_comma (&str) != FAIL
1433 || (in_range = 1, *str++ == '-'));
1434 str--;
1435
1436 if (*str++ != '}')
1437 {
1438 first_error (_("missing `}'"));
1439 return FAIL;
1440 }
1441 }
1442 else
1443 {
1444 expressionS expr;
1445
1446 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1447 return FAIL;
1448
1449 if (expr.X_op == O_constant)
1450 {
1451 if (expr.X_add_number
1452 != (expr.X_add_number & 0x0000ffff))
1453 {
1454 inst.error = _("invalid register mask");
1455 return FAIL;
1456 }
1457
1458 if ((range & expr.X_add_number) != 0)
1459 {
1460 int regno = range & expr.X_add_number;
1461
1462 regno &= -regno;
1463 regno = (1 << regno) - 1;
1464 as_tsktsk
1465 (_("Warning: duplicated register (r%d) in register list"),
1466 regno);
1467 }
1468
1469 range |= expr.X_add_number;
1470 }
1471 else
1472 {
1473 if (inst.reloc.type != 0)
1474 {
1475 inst.error = _("expression too complex");
1476 return FAIL;
1477 }
1478
1479 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1480 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1481 inst.reloc.pc_rel = 0;
1482 }
1483 }
1484
1485 if (*str == '|' || *str == '+')
1486 {
1487 str++;
1488 another_range = 1;
1489 }
1490 }
1491 while (another_range);
1492
1493 *strp = str;
1494 return range;
1495 }
1496
1497 /* Types of registers in a list. */
1498
1499 enum reg_list_els
1500 {
1501 REGLIST_VFP_S,
1502 REGLIST_VFP_D,
1503 REGLIST_NEON_D
1504 };
1505
1506 /* Parse a VFP register list. If the string is invalid return FAIL.
1507 Otherwise return the number of registers, and set PBASE to the first
1508 register. Parses registers of type ETYPE.
1509 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1510 - Q registers can be used to specify pairs of D registers
1511 - { } can be omitted from around a singleton register list
1512 FIXME: This is not implemented, as it would require backtracking in
1513 some cases, e.g.:
1514 vtbl.8 d3,d4,d5
1515 This could be done (the meaning isn't really ambiguous), but doesn't
1516 fit in well with the current parsing framework.
1517 - 32 D registers may be used (also true for VFPv3).
1518 FIXME: Types are ignored in these register lists, which is probably a
1519 bug. */
1520
1521 static int
1522 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1523 {
1524 int base_reg;
1525 int new_base;
1526 enum arm_reg_type regtype = 0;
1527 int max_regs = 0;
1528 int count = 0;
1529 int warned = 0;
1530 unsigned long mask = 0;
1531 int i;
1532
1533 if (**str != '{')
1534 {
1535 inst.error = _("expecting {");
1536 return FAIL;
1537 }
1538
1539 (*str)++;
1540
1541 switch (etype)
1542 {
1543 case REGLIST_VFP_S:
1544 regtype = REG_TYPE_VFS;
1545 max_regs = 32;
1546 break;
1547
1548 case REGLIST_VFP_D:
1549 regtype = REG_TYPE_VFD;
1550 break;
1551
1552 case REGLIST_NEON_D:
1553 regtype = REG_TYPE_NDQ;
1554 break;
1555 }
1556
1557 if (etype != REGLIST_VFP_S)
1558 {
1559 /* VFPv3 allows 32 D registers. */
1560 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1561 {
1562 max_regs = 32;
1563 if (thumb_mode)
1564 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1565 fpu_vfp_ext_v3);
1566 else
1567 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1568 fpu_vfp_ext_v3);
1569 }
1570 else
1571 max_regs = 16;
1572 }
1573
1574 base_reg = max_regs;
1575
1576 do
1577 {
1578 int setmask = 1, addregs = 1;
1579
1580 new_base = arm_typed_reg_parse (str, regtype, &regtype, NULL);
1581
1582 if (new_base == FAIL)
1583 {
1584 first_error (_(reg_expected_msgs[regtype]));
1585 return FAIL;
1586 }
1587
1588 if (new_base >= max_regs)
1589 {
1590 first_error (_("register out of range in list"));
1591 return FAIL;
1592 }
1593
1594 /* Note: a value of 2 * n is returned for the register Q<n>. */
1595 if (regtype == REG_TYPE_NQ)
1596 {
1597 setmask = 3;
1598 addregs = 2;
1599 }
1600
1601 if (new_base < base_reg)
1602 base_reg = new_base;
1603
1604 if (mask & (setmask << new_base))
1605 {
1606 first_error (_("invalid register list"));
1607 return FAIL;
1608 }
1609
1610 if ((mask >> new_base) != 0 && ! warned)
1611 {
1612 as_tsktsk (_("register list not in ascending order"));
1613 warned = 1;
1614 }
1615
1616 mask |= setmask << new_base;
1617 count += addregs;
1618
1619 if (**str == '-') /* We have the start of a range expression */
1620 {
1621 int high_range;
1622
1623 (*str)++;
1624
1625 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1626 == FAIL)
1627 {
1628 inst.error = gettext (reg_expected_msgs[regtype]);
1629 return FAIL;
1630 }
1631
1632 if (high_range >= max_regs)
1633 {
1634 first_error (_("register out of range in list"));
1635 return FAIL;
1636 }
1637
1638 if (regtype == REG_TYPE_NQ)
1639 high_range = high_range + 1;
1640
1641 if (high_range <= new_base)
1642 {
1643 inst.error = _("register range not in ascending order");
1644 return FAIL;
1645 }
1646
1647 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1648 {
1649 if (mask & (setmask << new_base))
1650 {
1651 inst.error = _("invalid register list");
1652 return FAIL;
1653 }
1654
1655 mask |= setmask << new_base;
1656 count += addregs;
1657 }
1658 }
1659 }
1660 while (skip_past_comma (str) != FAIL);
1661
1662 (*str)++;
1663
1664 /* Sanity check -- should have raised a parse error above. */
1665 if (count == 0 || count > max_regs)
1666 abort ();
1667
1668 *pbase = base_reg;
1669
1670 /* Final test -- the registers must be consecutive. */
1671 mask >>= base_reg;
1672 for (i = 0; i < count; i++)
1673 {
1674 if ((mask & (1u << i)) == 0)
1675 {
1676 inst.error = _("non-contiguous register range");
1677 return FAIL;
1678 }
1679 }
1680
1681 return count;
1682 }
1683
1684 /* True if two alias types are the same. */
1685
1686 static int
1687 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1688 {
1689 if (!a && !b)
1690 return 1;
1691
1692 if (!a || !b)
1693 return 0;
1694
1695 if (a->defined != b->defined)
1696 return 0;
1697
1698 if ((a->defined & NTA_HASTYPE) != 0
1699 && (a->eltype.type != b->eltype.type
1700 || a->eltype.size != b->eltype.size))
1701 return 0;
1702
1703 if ((a->defined & NTA_HASINDEX) != 0
1704 && (a->index != b->index))
1705 return 0;
1706
1707 return 1;
1708 }
1709
1710 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1711 The base register is put in *PBASE.
1712 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1713 the return value.
1714 The register stride (minus one) is put in bit 4 of the return value.
1715 Bits [6:5] encode the list length (minus one).
1716 The type of the list elements is put in *ELTYPE, if non-NULL. */
1717
1718 #define NEON_LANE(X) ((X) & 0xf)
1719 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1720 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1721
1722 static int
1723 parse_neon_el_struct_list (char **str, unsigned *pbase,
1724 struct neon_type_el *eltype)
1725 {
1726 char *ptr = *str;
1727 int base_reg = -1;
1728 int reg_incr = -1;
1729 int count = 0;
1730 int lane = -1;
1731 int leading_brace = 0;
1732 enum arm_reg_type rtype = REG_TYPE_NDQ;
1733 int addregs = 1;
1734 const char *const incr_error = "register stride must be 1 or 2";
1735 const char *const type_error = "mismatched element/structure types in list";
1736 struct neon_typed_alias firsttype;
1737
1738 if (skip_past_char (&ptr, '{') == SUCCESS)
1739 leading_brace = 1;
1740
1741 do
1742 {
1743 struct neon_typed_alias atype;
1744 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1745
1746 if (getreg == FAIL)
1747 {
1748 first_error (_(reg_expected_msgs[rtype]));
1749 return FAIL;
1750 }
1751
1752 if (base_reg == -1)
1753 {
1754 base_reg = getreg;
1755 if (rtype == REG_TYPE_NQ)
1756 {
1757 reg_incr = 1;
1758 addregs = 2;
1759 }
1760 firsttype = atype;
1761 }
1762 else if (reg_incr == -1)
1763 {
1764 reg_incr = getreg - base_reg;
1765 if (reg_incr < 1 || reg_incr > 2)
1766 {
1767 first_error (_(incr_error));
1768 return FAIL;
1769 }
1770 }
1771 else if (getreg != base_reg + reg_incr * count)
1772 {
1773 first_error (_(incr_error));
1774 return FAIL;
1775 }
1776
1777 if (!neon_alias_types_same (&atype, &firsttype))
1778 {
1779 first_error (_(type_error));
1780 return FAIL;
1781 }
1782
1783 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1784 modes. */
1785 if (ptr[0] == '-')
1786 {
1787 struct neon_typed_alias htype;
1788 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1789 if (lane == -1)
1790 lane = NEON_INTERLEAVE_LANES;
1791 else if (lane != NEON_INTERLEAVE_LANES)
1792 {
1793 first_error (_(type_error));
1794 return FAIL;
1795 }
1796 if (reg_incr == -1)
1797 reg_incr = 1;
1798 else if (reg_incr != 1)
1799 {
1800 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1801 return FAIL;
1802 }
1803 ptr++;
1804 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1805 if (hireg == FAIL)
1806 {
1807 first_error (_(reg_expected_msgs[rtype]));
1808 return FAIL;
1809 }
1810 if (!neon_alias_types_same (&htype, &firsttype))
1811 {
1812 first_error (_(type_error));
1813 return FAIL;
1814 }
1815 count += hireg + dregs - getreg;
1816 continue;
1817 }
1818
1819 /* If we're using Q registers, we can't use [] or [n] syntax. */
1820 if (rtype == REG_TYPE_NQ)
1821 {
1822 count += 2;
1823 continue;
1824 }
1825
1826 if ((atype.defined & NTA_HASINDEX) != 0)
1827 {
1828 if (lane == -1)
1829 lane = atype.index;
1830 else if (lane != atype.index)
1831 {
1832 first_error (_(type_error));
1833 return FAIL;
1834 }
1835 }
1836 else if (lane == -1)
1837 lane = NEON_INTERLEAVE_LANES;
1838 else if (lane != NEON_INTERLEAVE_LANES)
1839 {
1840 first_error (_(type_error));
1841 return FAIL;
1842 }
1843 count++;
1844 }
1845 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1846
1847 /* No lane set by [x]. We must be interleaving structures. */
1848 if (lane == -1)
1849 lane = NEON_INTERLEAVE_LANES;
1850
1851 /* Sanity check. */
1852 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1853 || (count > 1 && reg_incr == -1))
1854 {
1855 first_error (_("error parsing element/structure list"));
1856 return FAIL;
1857 }
1858
1859 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1860 {
1861 first_error (_("expected }"));
1862 return FAIL;
1863 }
1864
1865 if (reg_incr == -1)
1866 reg_incr = 1;
1867
1868 if (eltype)
1869 *eltype = firsttype.eltype;
1870
1871 *pbase = base_reg;
1872 *str = ptr;
1873
1874 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1875 }
1876
1877 /* Parse an explicit relocation suffix on an expression. This is
1878 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1879 arm_reloc_hsh contains no entries, so this function can only
1880 succeed if there is no () after the word. Returns -1 on error,
1881 BFD_RELOC_UNUSED if there wasn't any suffix. */
1882 static int
1883 parse_reloc (char **str)
1884 {
1885 struct reloc_entry *r;
1886 char *p, *q;
1887
1888 if (**str != '(')
1889 return BFD_RELOC_UNUSED;
1890
1891 p = *str + 1;
1892 q = p;
1893
1894 while (*q && *q != ')' && *q != ',')
1895 q++;
1896 if (*q != ')')
1897 return -1;
1898
1899 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1900 return -1;
1901
1902 *str = q + 1;
1903 return r->reloc;
1904 }
1905
1906 /* Directives: register aliases. */
1907
1908 static struct reg_entry *
1909 insert_reg_alias (char *str, int number, int type)
1910 {
1911 struct reg_entry *new;
1912 const char *name;
1913
1914 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1915 {
1916 if (new->builtin)
1917 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1918
1919 /* Only warn about a redefinition if it's not defined as the
1920 same register. */
1921 else if (new->number != number || new->type != type)
1922 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1923
1924 return 0;
1925 }
1926
1927 name = xstrdup (str);
1928 new = xmalloc (sizeof (struct reg_entry));
1929
1930 new->name = name;
1931 new->number = number;
1932 new->type = type;
1933 new->builtin = FALSE;
1934 new->neon = NULL;
1935
1936 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1937 abort ();
1938
1939 return new;
1940 }
1941
1942 static void
1943 insert_neon_reg_alias (char *str, int number, int type,
1944 struct neon_typed_alias *atype)
1945 {
1946 struct reg_entry *reg = insert_reg_alias (str, number, type);
1947
1948 if (!reg)
1949 {
1950 first_error (_("attempt to redefine typed alias"));
1951 return;
1952 }
1953
1954 if (atype)
1955 {
1956 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1957 *reg->neon = *atype;
1958 }
1959 }
1960
1961 /* Look for the .req directive. This is of the form:
1962
1963 new_register_name .req existing_register_name
1964
1965 If we find one, or if it looks sufficiently like one that we want to
1966 handle any error here, return non-zero. Otherwise return zero. */
1967
1968 static int
1969 create_register_alias (char * newname, char *p)
1970 {
1971 struct reg_entry *old;
1972 char *oldname, *nbuf;
1973 size_t nlen;
1974
1975 /* The input scrubber ensures that whitespace after the mnemonic is
1976 collapsed to single spaces. */
1977 oldname = p;
1978 if (strncmp (oldname, " .req ", 6) != 0)
1979 return 0;
1980
1981 oldname += 6;
1982 if (*oldname == '\0')
1983 return 0;
1984
1985 old = hash_find (arm_reg_hsh, oldname);
1986 if (!old)
1987 {
1988 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1989 return 1;
1990 }
1991
1992 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1993 the desired alias name, and p points to its end. If not, then
1994 the desired alias name is in the global original_case_string. */
1995 #ifdef TC_CASE_SENSITIVE
1996 nlen = p - newname;
1997 #else
1998 newname = original_case_string;
1999 nlen = strlen (newname);
2000 #endif
2001
2002 nbuf = alloca (nlen + 1);
2003 memcpy (nbuf, newname, nlen);
2004 nbuf[nlen] = '\0';
2005
2006 /* Create aliases under the new name as stated; an all-lowercase
2007 version of the new name; and an all-uppercase version of the new
2008 name. */
2009 insert_reg_alias (nbuf, old->number, old->type);
2010
2011 for (p = nbuf; *p; p++)
2012 *p = TOUPPER (*p);
2013
2014 if (strncmp (nbuf, newname, nlen))
2015 insert_reg_alias (nbuf, old->number, old->type);
2016
2017 for (p = nbuf; *p; p++)
2018 *p = TOLOWER (*p);
2019
2020 if (strncmp (nbuf, newname, nlen))
2021 insert_reg_alias (nbuf, old->number, old->type);
2022
2023 return 1;
2024 }
2025
2026 /* Create a Neon typed/indexed register alias using directives, e.g.:
2027 X .dn d5.s32[1]
2028 Y .qn 6.s16
2029 Z .dn d7
2030 T .dn Z[0]
2031 These typed registers can be used instead of the types specified after the
2032 Neon mnemonic, so long as all operands given have types. Types can also be
2033 specified directly, e.g.:
2034 vadd d0.s32, d1.s32, d2.s32
2035 */
2036
2037 static int
2038 create_neon_reg_alias (char *newname, char *p)
2039 {
2040 enum arm_reg_type basetype;
2041 struct reg_entry *basereg;
2042 struct reg_entry mybasereg;
2043 struct neon_type ntype;
2044 struct neon_typed_alias typeinfo;
2045 char *namebuf, *nameend;
2046 int namelen;
2047
2048 typeinfo.defined = 0;
2049 typeinfo.eltype.type = NT_invtype;
2050 typeinfo.eltype.size = -1;
2051 typeinfo.index = -1;
2052
2053 nameend = p;
2054
2055 if (strncmp (p, " .dn ", 5) == 0)
2056 basetype = REG_TYPE_VFD;
2057 else if (strncmp (p, " .qn ", 5) == 0)
2058 basetype = REG_TYPE_NQ;
2059 else
2060 return 0;
2061
2062 p += 5;
2063
2064 if (*p == '\0')
2065 return 0;
2066
2067 basereg = arm_reg_parse_multi (&p);
2068
2069 if (basereg && basereg->type != basetype)
2070 {
2071 as_bad (_("bad type for register"));
2072 return 0;
2073 }
2074
2075 if (basereg == NULL)
2076 {
2077 expressionS exp;
2078 /* Try parsing as an integer. */
2079 my_get_expression (&exp, &p, GE_NO_PREFIX);
2080 if (exp.X_op != O_constant)
2081 {
2082 as_bad (_("expression must be constant"));
2083 return 0;
2084 }
2085 basereg = &mybasereg;
2086 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2087 : exp.X_add_number;
2088 basereg->neon = 0;
2089 }
2090
2091 if (basereg->neon)
2092 typeinfo = *basereg->neon;
2093
2094 if (parse_neon_type (&ntype, &p) == SUCCESS)
2095 {
2096 /* We got a type. */
2097 if (typeinfo.defined & NTA_HASTYPE)
2098 {
2099 as_bad (_("can't redefine the type of a register alias"));
2100 return 0;
2101 }
2102
2103 typeinfo.defined |= NTA_HASTYPE;
2104 if (ntype.elems != 1)
2105 {
2106 as_bad (_("you must specify a single type only"));
2107 return 0;
2108 }
2109 typeinfo.eltype = ntype.el[0];
2110 }
2111
2112 if (skip_past_char (&p, '[') == SUCCESS)
2113 {
2114 expressionS exp;
2115 /* We got a scalar index. */
2116
2117 if (typeinfo.defined & NTA_HASINDEX)
2118 {
2119 as_bad (_("can't redefine the index of a scalar alias"));
2120 return 0;
2121 }
2122
2123 my_get_expression (&exp, &p, GE_NO_PREFIX);
2124
2125 if (exp.X_op != O_constant)
2126 {
2127 as_bad (_("scalar index must be constant"));
2128 return 0;
2129 }
2130
2131 typeinfo.defined |= NTA_HASINDEX;
2132 typeinfo.index = exp.X_add_number;
2133
2134 if (skip_past_char (&p, ']') == FAIL)
2135 {
2136 as_bad (_("expecting ]"));
2137 return 0;
2138 }
2139 }
2140
2141 namelen = nameend - newname;
2142 namebuf = alloca (namelen + 1);
2143 strncpy (namebuf, newname, namelen);
2144 namebuf[namelen] = '\0';
2145
2146 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2147 typeinfo.defined != 0 ? &typeinfo : NULL);
2148
2149 /* Insert name in all uppercase. */
2150 for (p = namebuf; *p; p++)
2151 *p = TOUPPER (*p);
2152
2153 if (strncmp (namebuf, newname, namelen))
2154 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2155 typeinfo.defined != 0 ? &typeinfo : NULL);
2156
2157 /* Insert name in all lowercase. */
2158 for (p = namebuf; *p; p++)
2159 *p = TOLOWER (*p);
2160
2161 if (strncmp (namebuf, newname, namelen))
2162 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2163 typeinfo.defined != 0 ? &typeinfo : NULL);
2164
2165 return 1;
2166 }
2167
2168 /* Should never be called, as .req goes between the alias and the
2169 register name, not at the beginning of the line. */
2170 static void
2171 s_req (int a ATTRIBUTE_UNUSED)
2172 {
2173 as_bad (_("invalid syntax for .req directive"));
2174 }
2175
2176 static void
2177 s_dn (int a ATTRIBUTE_UNUSED)
2178 {
2179 as_bad (_("invalid syntax for .dn directive"));
2180 }
2181
2182 static void
2183 s_qn (int a ATTRIBUTE_UNUSED)
2184 {
2185 as_bad (_("invalid syntax for .qn directive"));
2186 }
2187
2188 /* The .unreq directive deletes an alias which was previously defined
2189 by .req. For example:
2190
2191 my_alias .req r11
2192 .unreq my_alias */
2193
2194 static void
2195 s_unreq (int a ATTRIBUTE_UNUSED)
2196 {
2197 char * name;
2198 char saved_char;
2199
2200 name = input_line_pointer;
2201
2202 while (*input_line_pointer != 0
2203 && *input_line_pointer != ' '
2204 && *input_line_pointer != '\n')
2205 ++input_line_pointer;
2206
2207 saved_char = *input_line_pointer;
2208 *input_line_pointer = 0;
2209
2210 if (!*name)
2211 as_bad (_("invalid syntax for .unreq directive"));
2212 else
2213 {
2214 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2215
2216 if (!reg)
2217 as_bad (_("unknown register alias '%s'"), name);
2218 else if (reg->builtin)
2219 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2220 name);
2221 else
2222 {
2223 hash_delete (arm_reg_hsh, name);
2224 free ((char *) reg->name);
2225 if (reg->neon)
2226 free (reg->neon);
2227 free (reg);
2228 }
2229 }
2230
2231 *input_line_pointer = saved_char;
2232 demand_empty_rest_of_line ();
2233 }
2234
2235 /* Directives: Instruction set selection. */
2236
2237 #ifdef OBJ_ELF
2238 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2239 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2240 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2241 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2242
2243 static enum mstate mapstate = MAP_UNDEFINED;
2244
2245 static void
2246 mapping_state (enum mstate state)
2247 {
2248 symbolS * symbolP;
2249 const char * symname;
2250 int type;
2251
2252 if (mapstate == state)
2253 /* The mapping symbol has already been emitted.
2254 There is nothing else to do. */
2255 return;
2256
2257 mapstate = state;
2258
2259 switch (state)
2260 {
2261 case MAP_DATA:
2262 symname = "$d";
2263 type = BSF_NO_FLAGS;
2264 break;
2265 case MAP_ARM:
2266 symname = "$a";
2267 type = BSF_NO_FLAGS;
2268 break;
2269 case MAP_THUMB:
2270 symname = "$t";
2271 type = BSF_NO_FLAGS;
2272 break;
2273 case MAP_UNDEFINED:
2274 return;
2275 default:
2276 abort ();
2277 }
2278
2279 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2280
2281 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2282 symbol_table_insert (symbolP);
2283 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2284
2285 switch (state)
2286 {
2287 case MAP_ARM:
2288 THUMB_SET_FUNC (symbolP, 0);
2289 ARM_SET_THUMB (symbolP, 0);
2290 ARM_SET_INTERWORK (symbolP, support_interwork);
2291 break;
2292
2293 case MAP_THUMB:
2294 THUMB_SET_FUNC (symbolP, 1);
2295 ARM_SET_THUMB (symbolP, 1);
2296 ARM_SET_INTERWORK (symbolP, support_interwork);
2297 break;
2298
2299 case MAP_DATA:
2300 default:
2301 return;
2302 }
2303 }
2304 #else
2305 #define mapping_state(x) /* nothing */
2306 #endif
2307
2308 /* Find the real, Thumb encoded start of a Thumb function. */
2309
2310 static symbolS *
2311 find_real_start (symbolS * symbolP)
2312 {
2313 char * real_start;
2314 const char * name = S_GET_NAME (symbolP);
2315 symbolS * new_target;
2316
2317 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2318 #define STUB_NAME ".real_start_of"
2319
2320 if (name == NULL)
2321 abort ();
2322
2323 /* The compiler may generate BL instructions to local labels because
2324 it needs to perform a branch to a far away location. These labels
2325 do not have a corresponding ".real_start_of" label. We check
2326 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2327 the ".real_start_of" convention for nonlocal branches. */
2328 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2329 return symbolP;
2330
2331 real_start = ACONCAT ((STUB_NAME, name, NULL));
2332 new_target = symbol_find (real_start);
2333
2334 if (new_target == NULL)
2335 {
2336 as_warn ("Failed to find real start of function: %s\n", name);
2337 new_target = symbolP;
2338 }
2339
2340 return new_target;
2341 }
2342
2343 static void
2344 opcode_select (int width)
2345 {
2346 switch (width)
2347 {
2348 case 16:
2349 if (! thumb_mode)
2350 {
2351 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2352 as_bad (_("selected processor does not support THUMB opcodes"));
2353
2354 thumb_mode = 1;
2355 /* No need to force the alignment, since we will have been
2356 coming from ARM mode, which is word-aligned. */
2357 record_alignment (now_seg, 1);
2358 }
2359 mapping_state (MAP_THUMB);
2360 break;
2361
2362 case 32:
2363 if (thumb_mode)
2364 {
2365 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2366 as_bad (_("selected processor does not support ARM opcodes"));
2367
2368 thumb_mode = 0;
2369
2370 if (!need_pass_2)
2371 frag_align (2, 0, 0);
2372
2373 record_alignment (now_seg, 1);
2374 }
2375 mapping_state (MAP_ARM);
2376 break;
2377
2378 default:
2379 as_bad (_("invalid instruction size selected (%d)"), width);
2380 }
2381 }
2382
2383 static void
2384 s_arm (int ignore ATTRIBUTE_UNUSED)
2385 {
2386 opcode_select (32);
2387 demand_empty_rest_of_line ();
2388 }
2389
2390 static void
2391 s_thumb (int ignore ATTRIBUTE_UNUSED)
2392 {
2393 opcode_select (16);
2394 demand_empty_rest_of_line ();
2395 }
2396
2397 static void
2398 s_code (int unused ATTRIBUTE_UNUSED)
2399 {
2400 int temp;
2401
2402 temp = get_absolute_expression ();
2403 switch (temp)
2404 {
2405 case 16:
2406 case 32:
2407 opcode_select (temp);
2408 break;
2409
2410 default:
2411 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2412 }
2413 }
2414
2415 static void
2416 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2417 {
2418 /* If we are not already in thumb mode go into it, EVEN if
2419 the target processor does not support thumb instructions.
2420 This is used by gcc/config/arm/lib1funcs.asm for example
2421 to compile interworking support functions even if the
2422 target processor should not support interworking. */
2423 if (! thumb_mode)
2424 {
2425 thumb_mode = 2;
2426 record_alignment (now_seg, 1);
2427 }
2428
2429 demand_empty_rest_of_line ();
2430 }
2431
2432 static void
2433 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2434 {
2435 s_thumb (0);
2436
2437 /* The following label is the name/address of the start of a Thumb function.
2438 We need to know this for the interworking support. */
2439 label_is_thumb_function_name = TRUE;
2440 }
2441
2442 /* Perform a .set directive, but also mark the alias as
2443 being a thumb function. */
2444
2445 static void
2446 s_thumb_set (int equiv)
2447 {
2448 /* XXX the following is a duplicate of the code for s_set() in read.c
2449 We cannot just call that code as we need to get at the symbol that
2450 is created. */
2451 char * name;
2452 char delim;
2453 char * end_name;
2454 symbolS * symbolP;
2455
2456 /* Especial apologies for the random logic:
2457 This just grew, and could be parsed much more simply!
2458 Dean - in haste. */
2459 name = input_line_pointer;
2460 delim = get_symbol_end ();
2461 end_name = input_line_pointer;
2462 *end_name = delim;
2463
2464 if (*input_line_pointer != ',')
2465 {
2466 *end_name = 0;
2467 as_bad (_("expected comma after name \"%s\""), name);
2468 *end_name = delim;
2469 ignore_rest_of_line ();
2470 return;
2471 }
2472
2473 input_line_pointer++;
2474 *end_name = 0;
2475
2476 if (name[0] == '.' && name[1] == '\0')
2477 {
2478 /* XXX - this should not happen to .thumb_set. */
2479 abort ();
2480 }
2481
2482 if ((symbolP = symbol_find (name)) == NULL
2483 && (symbolP = md_undefined_symbol (name)) == NULL)
2484 {
2485 #ifndef NO_LISTING
2486 /* When doing symbol listings, play games with dummy fragments living
2487 outside the normal fragment chain to record the file and line info
2488 for this symbol. */
2489 if (listing & LISTING_SYMBOLS)
2490 {
2491 extern struct list_info_struct * listing_tail;
2492 fragS * dummy_frag = xmalloc (sizeof (fragS));
2493
2494 memset (dummy_frag, 0, sizeof (fragS));
2495 dummy_frag->fr_type = rs_fill;
2496 dummy_frag->line = listing_tail;
2497 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2498 dummy_frag->fr_symbol = symbolP;
2499 }
2500 else
2501 #endif
2502 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2503
2504 #ifdef OBJ_COFF
2505 /* "set" symbols are local unless otherwise specified. */
2506 SF_SET_LOCAL (symbolP);
2507 #endif /* OBJ_COFF */
2508 } /* Make a new symbol. */
2509
2510 symbol_table_insert (symbolP);
2511
2512 * end_name = delim;
2513
2514 if (equiv
2515 && S_IS_DEFINED (symbolP)
2516 && S_GET_SEGMENT (symbolP) != reg_section)
2517 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2518
2519 pseudo_set (symbolP);
2520
2521 demand_empty_rest_of_line ();
2522
2523 /* XXX Now we come to the Thumb specific bit of code. */
2524
2525 THUMB_SET_FUNC (symbolP, 1);
2526 ARM_SET_THUMB (symbolP, 1);
2527 #if defined OBJ_ELF || defined OBJ_COFF
2528 ARM_SET_INTERWORK (symbolP, support_interwork);
2529 #endif
2530 }
2531
2532 /* Directives: Mode selection. */
2533
2534 /* .syntax [unified|divided] - choose the new unified syntax
2535 (same for Arm and Thumb encoding, modulo slight differences in what
2536 can be represented) or the old divergent syntax for each mode. */
2537 static void
2538 s_syntax (int unused ATTRIBUTE_UNUSED)
2539 {
2540 char *name, delim;
2541
2542 name = input_line_pointer;
2543 delim = get_symbol_end ();
2544
2545 if (!strcasecmp (name, "unified"))
2546 unified_syntax = TRUE;
2547 else if (!strcasecmp (name, "divided"))
2548 unified_syntax = FALSE;
2549 else
2550 {
2551 as_bad (_("unrecognized syntax mode \"%s\""), name);
2552 return;
2553 }
2554 *input_line_pointer = delim;
2555 demand_empty_rest_of_line ();
2556 }
2557
2558 /* Directives: sectioning and alignment. */
2559
2560 /* Same as s_align_ptwo but align 0 => align 2. */
2561
2562 static void
2563 s_align (int unused ATTRIBUTE_UNUSED)
2564 {
2565 int temp;
2566 long temp_fill;
2567 long max_alignment = 15;
2568
2569 temp = get_absolute_expression ();
2570 if (temp > max_alignment)
2571 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2572 else if (temp < 0)
2573 {
2574 as_bad (_("alignment negative. 0 assumed."));
2575 temp = 0;
2576 }
2577
2578 if (*input_line_pointer == ',')
2579 {
2580 input_line_pointer++;
2581 temp_fill = get_absolute_expression ();
2582 }
2583 else
2584 temp_fill = 0;
2585
2586 if (!temp)
2587 temp = 2;
2588
2589 /* Only make a frag if we HAVE to. */
2590 if (temp && !need_pass_2)
2591 frag_align (temp, (int) temp_fill, 0);
2592 demand_empty_rest_of_line ();
2593
2594 record_alignment (now_seg, temp);
2595 }
2596
2597 static void
2598 s_bss (int ignore ATTRIBUTE_UNUSED)
2599 {
2600 /* We don't support putting frags in the BSS segment, we fake it by
2601 marking in_bss, then looking at s_skip for clues. */
2602 subseg_set (bss_section, 0);
2603 demand_empty_rest_of_line ();
2604 mapping_state (MAP_DATA);
2605 }
2606
2607 static void
2608 s_even (int ignore ATTRIBUTE_UNUSED)
2609 {
2610 /* Never make frag if expect extra pass. */
2611 if (!need_pass_2)
2612 frag_align (1, 0, 0);
2613
2614 record_alignment (now_seg, 1);
2615
2616 demand_empty_rest_of_line ();
2617 }
2618
2619 /* Directives: Literal pools. */
2620
2621 static literal_pool *
2622 find_literal_pool (void)
2623 {
2624 literal_pool * pool;
2625
2626 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2627 {
2628 if (pool->section == now_seg
2629 && pool->sub_section == now_subseg)
2630 break;
2631 }
2632
2633 return pool;
2634 }
2635
2636 static literal_pool *
2637 find_or_make_literal_pool (void)
2638 {
2639 /* Next literal pool ID number. */
2640 static unsigned int latest_pool_num = 1;
2641 literal_pool * pool;
2642
2643 pool = find_literal_pool ();
2644
2645 if (pool == NULL)
2646 {
2647 /* Create a new pool. */
2648 pool = xmalloc (sizeof (* pool));
2649 if (! pool)
2650 return NULL;
2651
2652 pool->next_free_entry = 0;
2653 pool->section = now_seg;
2654 pool->sub_section = now_subseg;
2655 pool->next = list_of_pools;
2656 pool->symbol = NULL;
2657
2658 /* Add it to the list. */
2659 list_of_pools = pool;
2660 }
2661
2662 /* New pools, and emptied pools, will have a NULL symbol. */
2663 if (pool->symbol == NULL)
2664 {
2665 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2666 (valueT) 0, &zero_address_frag);
2667 pool->id = latest_pool_num ++;
2668 }
2669
2670 /* Done. */
2671 return pool;
2672 }
2673
2674 /* Add the literal in the global 'inst'
2675 structure to the relevent literal pool. */
2676
2677 static int
2678 add_to_lit_pool (void)
2679 {
2680 literal_pool * pool;
2681 unsigned int entry;
2682
2683 pool = find_or_make_literal_pool ();
2684
2685 /* Check if this literal value is already in the pool. */
2686 for (entry = 0; entry < pool->next_free_entry; entry ++)
2687 {
2688 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2689 && (inst.reloc.exp.X_op == O_constant)
2690 && (pool->literals[entry].X_add_number
2691 == inst.reloc.exp.X_add_number)
2692 && (pool->literals[entry].X_unsigned
2693 == inst.reloc.exp.X_unsigned))
2694 break;
2695
2696 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2697 && (inst.reloc.exp.X_op == O_symbol)
2698 && (pool->literals[entry].X_add_number
2699 == inst.reloc.exp.X_add_number)
2700 && (pool->literals[entry].X_add_symbol
2701 == inst.reloc.exp.X_add_symbol)
2702 && (pool->literals[entry].X_op_symbol
2703 == inst.reloc.exp.X_op_symbol))
2704 break;
2705 }
2706
2707 /* Do we need to create a new entry? */
2708 if (entry == pool->next_free_entry)
2709 {
2710 if (entry >= MAX_LITERAL_POOL_SIZE)
2711 {
2712 inst.error = _("literal pool overflow");
2713 return FAIL;
2714 }
2715
2716 pool->literals[entry] = inst.reloc.exp;
2717 pool->next_free_entry += 1;
2718 }
2719
2720 inst.reloc.exp.X_op = O_symbol;
2721 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2722 inst.reloc.exp.X_add_symbol = pool->symbol;
2723
2724 return SUCCESS;
2725 }
2726
2727 /* Can't use symbol_new here, so have to create a symbol and then at
2728 a later date assign it a value. Thats what these functions do. */
2729
2730 static void
2731 symbol_locate (symbolS * symbolP,
2732 const char * name, /* It is copied, the caller can modify. */
2733 segT segment, /* Segment identifier (SEG_<something>). */
2734 valueT valu, /* Symbol value. */
2735 fragS * frag) /* Associated fragment. */
2736 {
2737 unsigned int name_length;
2738 char * preserved_copy_of_name;
2739
2740 name_length = strlen (name) + 1; /* +1 for \0. */
2741 obstack_grow (&notes, name, name_length);
2742 preserved_copy_of_name = obstack_finish (&notes);
2743
2744 #ifdef tc_canonicalize_symbol_name
2745 preserved_copy_of_name =
2746 tc_canonicalize_symbol_name (preserved_copy_of_name);
2747 #endif
2748
2749 S_SET_NAME (symbolP, preserved_copy_of_name);
2750
2751 S_SET_SEGMENT (symbolP, segment);
2752 S_SET_VALUE (symbolP, valu);
2753 symbol_clear_list_pointers (symbolP);
2754
2755 symbol_set_frag (symbolP, frag);
2756
2757 /* Link to end of symbol chain. */
2758 {
2759 extern int symbol_table_frozen;
2760
2761 if (symbol_table_frozen)
2762 abort ();
2763 }
2764
2765 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2766
2767 obj_symbol_new_hook (symbolP);
2768
2769 #ifdef tc_symbol_new_hook
2770 tc_symbol_new_hook (symbolP);
2771 #endif
2772
2773 #ifdef DEBUG_SYMS
2774 verify_symbol_chain (symbol_rootP, symbol_lastP);
2775 #endif /* DEBUG_SYMS */
2776 }
2777
2778
2779 static void
2780 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2781 {
2782 unsigned int entry;
2783 literal_pool * pool;
2784 char sym_name[20];
2785
2786 pool = find_literal_pool ();
2787 if (pool == NULL
2788 || pool->symbol == NULL
2789 || pool->next_free_entry == 0)
2790 return;
2791
2792 mapping_state (MAP_DATA);
2793
2794 /* Align pool as you have word accesses.
2795 Only make a frag if we have to. */
2796 if (!need_pass_2)
2797 frag_align (2, 0, 0);
2798
2799 record_alignment (now_seg, 2);
2800
2801 sprintf (sym_name, "$$lit_\002%x", pool->id);
2802
2803 symbol_locate (pool->symbol, sym_name, now_seg,
2804 (valueT) frag_now_fix (), frag_now);
2805 symbol_table_insert (pool->symbol);
2806
2807 ARM_SET_THUMB (pool->symbol, thumb_mode);
2808
2809 #if defined OBJ_COFF || defined OBJ_ELF
2810 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2811 #endif
2812
2813 for (entry = 0; entry < pool->next_free_entry; entry ++)
2814 /* First output the expression in the instruction to the pool. */
2815 emit_expr (&(pool->literals[entry]), 4); /* .word */
2816
2817 /* Mark the pool as empty. */
2818 pool->next_free_entry = 0;
2819 pool->symbol = NULL;
2820 }
2821
2822 #ifdef OBJ_ELF
2823 /* Forward declarations for functions below, in the MD interface
2824 section. */
2825 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2826 static valueT create_unwind_entry (int);
2827 static void start_unwind_section (const segT, int);
2828 static void add_unwind_opcode (valueT, int);
2829 static void flush_pending_unwind (void);
2830
2831 /* Directives: Data. */
2832
2833 static void
2834 s_arm_elf_cons (int nbytes)
2835 {
2836 expressionS exp;
2837
2838 #ifdef md_flush_pending_output
2839 md_flush_pending_output ();
2840 #endif
2841
2842 if (is_it_end_of_statement ())
2843 {
2844 demand_empty_rest_of_line ();
2845 return;
2846 }
2847
2848 #ifdef md_cons_align
2849 md_cons_align (nbytes);
2850 #endif
2851
2852 mapping_state (MAP_DATA);
2853 do
2854 {
2855 int reloc;
2856 char *base = input_line_pointer;
2857
2858 expression (& exp);
2859
2860 if (exp.X_op != O_symbol)
2861 emit_expr (&exp, (unsigned int) nbytes);
2862 else
2863 {
2864 char *before_reloc = input_line_pointer;
2865 reloc = parse_reloc (&input_line_pointer);
2866 if (reloc == -1)
2867 {
2868 as_bad (_("unrecognized relocation suffix"));
2869 ignore_rest_of_line ();
2870 return;
2871 }
2872 else if (reloc == BFD_RELOC_UNUSED)
2873 emit_expr (&exp, (unsigned int) nbytes);
2874 else
2875 {
2876 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2877 int size = bfd_get_reloc_size (howto);
2878
2879 if (reloc == BFD_RELOC_ARM_PLT32)
2880 {
2881 as_bad (_("(plt) is only valid on branch targets"));
2882 reloc = BFD_RELOC_UNUSED;
2883 size = 0;
2884 }
2885
2886 if (size > nbytes)
2887 as_bad (_("%s relocations do not fit in %d bytes"),
2888 howto->name, nbytes);
2889 else
2890 {
2891 /* We've parsed an expression stopping at O_symbol.
2892 But there may be more expression left now that we
2893 have parsed the relocation marker. Parse it again.
2894 XXX Surely there is a cleaner way to do this. */
2895 char *p = input_line_pointer;
2896 int offset;
2897 char *save_buf = alloca (input_line_pointer - base);
2898 memcpy (save_buf, base, input_line_pointer - base);
2899 memmove (base + (input_line_pointer - before_reloc),
2900 base, before_reloc - base);
2901
2902 input_line_pointer = base + (input_line_pointer-before_reloc);
2903 expression (&exp);
2904 memcpy (base, save_buf, p - base);
2905
2906 offset = nbytes - size;
2907 p = frag_more ((int) nbytes);
2908 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2909 size, &exp, 0, reloc);
2910 }
2911 }
2912 }
2913 }
2914 while (*input_line_pointer++ == ',');
2915
2916 /* Put terminator back into stream. */
2917 input_line_pointer --;
2918 demand_empty_rest_of_line ();
2919 }
2920
2921
2922 /* Parse a .rel31 directive. */
2923
2924 static void
2925 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2926 {
2927 expressionS exp;
2928 char *p;
2929 valueT highbit;
2930
2931 highbit = 0;
2932 if (*input_line_pointer == '1')
2933 highbit = 0x80000000;
2934 else if (*input_line_pointer != '0')
2935 as_bad (_("expected 0 or 1"));
2936
2937 input_line_pointer++;
2938 if (*input_line_pointer != ',')
2939 as_bad (_("missing comma"));
2940 input_line_pointer++;
2941
2942 #ifdef md_flush_pending_output
2943 md_flush_pending_output ();
2944 #endif
2945
2946 #ifdef md_cons_align
2947 md_cons_align (4);
2948 #endif
2949
2950 mapping_state (MAP_DATA);
2951
2952 expression (&exp);
2953
2954 p = frag_more (4);
2955 md_number_to_chars (p, highbit, 4);
2956 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2957 BFD_RELOC_ARM_PREL31);
2958
2959 demand_empty_rest_of_line ();
2960 }
2961
2962 /* Directives: AEABI stack-unwind tables. */
2963
2964 /* Parse an unwind_fnstart directive. Simply records the current location. */
2965
2966 static void
2967 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2968 {
2969 demand_empty_rest_of_line ();
2970 /* Mark the start of the function. */
2971 unwind.proc_start = expr_build_dot ();
2972
2973 /* Reset the rest of the unwind info. */
2974 unwind.opcode_count = 0;
2975 unwind.table_entry = NULL;
2976 unwind.personality_routine = NULL;
2977 unwind.personality_index = -1;
2978 unwind.frame_size = 0;
2979 unwind.fp_offset = 0;
2980 unwind.fp_reg = 13;
2981 unwind.fp_used = 0;
2982 unwind.sp_restored = 0;
2983 }
2984
2985
2986 /* Parse a handlerdata directive. Creates the exception handling table entry
2987 for the function. */
2988
2989 static void
2990 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2991 {
2992 demand_empty_rest_of_line ();
2993 if (unwind.table_entry)
2994 as_bad (_("dupicate .handlerdata directive"));
2995
2996 create_unwind_entry (1);
2997 }
2998
2999 /* Parse an unwind_fnend directive. Generates the index table entry. */
3000
3001 static void
3002 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3003 {
3004 long where;
3005 char *ptr;
3006 valueT val;
3007
3008 demand_empty_rest_of_line ();
3009
3010 /* Add eh table entry. */
3011 if (unwind.table_entry == NULL)
3012 val = create_unwind_entry (0);
3013 else
3014 val = 0;
3015
3016 /* Add index table entry. This is two words. */
3017 start_unwind_section (unwind.saved_seg, 1);
3018 frag_align (2, 0, 0);
3019 record_alignment (now_seg, 2);
3020
3021 ptr = frag_more (8);
3022 where = frag_now_fix () - 8;
3023
3024 /* Self relative offset of the function start. */
3025 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3026 BFD_RELOC_ARM_PREL31);
3027
3028 /* Indicate dependency on EHABI-defined personality routines to the
3029 linker, if it hasn't been done already. */
3030 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3031 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3032 {
3033 static const char *const name[] = {
3034 "__aeabi_unwind_cpp_pr0",
3035 "__aeabi_unwind_cpp_pr1",
3036 "__aeabi_unwind_cpp_pr2"
3037 };
3038 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3039 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3040 marked_pr_dependency |= 1 << unwind.personality_index;
3041 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3042 = marked_pr_dependency;
3043 }
3044
3045 if (val)
3046 /* Inline exception table entry. */
3047 md_number_to_chars (ptr + 4, val, 4);
3048 else
3049 /* Self relative offset of the table entry. */
3050 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3051 BFD_RELOC_ARM_PREL31);
3052
3053 /* Restore the original section. */
3054 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3055 }
3056
3057
3058 /* Parse an unwind_cantunwind directive. */
3059
3060 static void
3061 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3062 {
3063 demand_empty_rest_of_line ();
3064 if (unwind.personality_routine || unwind.personality_index != -1)
3065 as_bad (_("personality routine specified for cantunwind frame"));
3066
3067 unwind.personality_index = -2;
3068 }
3069
3070
3071 /* Parse a personalityindex directive. */
3072
3073 static void
3074 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3075 {
3076 expressionS exp;
3077
3078 if (unwind.personality_routine || unwind.personality_index != -1)
3079 as_bad (_("duplicate .personalityindex directive"));
3080
3081 expression (&exp);
3082
3083 if (exp.X_op != O_constant
3084 || exp.X_add_number < 0 || exp.X_add_number > 15)
3085 {
3086 as_bad (_("bad personality routine number"));
3087 ignore_rest_of_line ();
3088 return;
3089 }
3090
3091 unwind.personality_index = exp.X_add_number;
3092
3093 demand_empty_rest_of_line ();
3094 }
3095
3096
3097 /* Parse a personality directive. */
3098
3099 static void
3100 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3101 {
3102 char *name, *p, c;
3103
3104 if (unwind.personality_routine || unwind.personality_index != -1)
3105 as_bad (_("duplicate .personality directive"));
3106
3107 name = input_line_pointer;
3108 c = get_symbol_end ();
3109 p = input_line_pointer;
3110 unwind.personality_routine = symbol_find_or_make (name);
3111 *p = c;
3112 demand_empty_rest_of_line ();
3113 }
3114
3115
3116 /* Parse a directive saving core registers. */
3117
3118 static void
3119 s_arm_unwind_save_core (void)
3120 {
3121 valueT op;
3122 long range;
3123 int n;
3124
3125 range = parse_reg_list (&input_line_pointer);
3126 if (range == FAIL)
3127 {
3128 as_bad (_("expected register list"));
3129 ignore_rest_of_line ();
3130 return;
3131 }
3132
3133 demand_empty_rest_of_line ();
3134
3135 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3136 into .unwind_save {..., sp...}. We aren't bothered about the value of
3137 ip because it is clobbered by calls. */
3138 if (unwind.sp_restored && unwind.fp_reg == 12
3139 && (range & 0x3000) == 0x1000)
3140 {
3141 unwind.opcode_count--;
3142 unwind.sp_restored = 0;
3143 range = (range | 0x2000) & ~0x1000;
3144 unwind.pending_offset = 0;
3145 }
3146
3147 /* Pop r4-r15. */
3148 if (range & 0xfff0)
3149 {
3150 /* See if we can use the short opcodes. These pop a block of up to 8
3151 registers starting with r4, plus maybe r14. */
3152 for (n = 0; n < 8; n++)
3153 {
3154 /* Break at the first non-saved register. */
3155 if ((range & (1 << (n + 4))) == 0)
3156 break;
3157 }
3158 /* See if there are any other bits set. */
3159 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3160 {
3161 /* Use the long form. */
3162 op = 0x8000 | ((range >> 4) & 0xfff);
3163 add_unwind_opcode (op, 2);
3164 }
3165 else
3166 {
3167 /* Use the short form. */
3168 if (range & 0x4000)
3169 op = 0xa8; /* Pop r14. */
3170 else
3171 op = 0xa0; /* Do not pop r14. */
3172 op |= (n - 1);
3173 add_unwind_opcode (op, 1);
3174 }
3175 }
3176
3177 /* Pop r0-r3. */
3178 if (range & 0xf)
3179 {
3180 op = 0xb100 | (range & 0xf);
3181 add_unwind_opcode (op, 2);
3182 }
3183
3184 /* Record the number of bytes pushed. */
3185 for (n = 0; n < 16; n++)
3186 {
3187 if (range & (1 << n))
3188 unwind.frame_size += 4;
3189 }
3190 }
3191
3192
3193 /* Parse a directive saving FPA registers. */
3194
3195 static void
3196 s_arm_unwind_save_fpa (int reg)
3197 {
3198 expressionS exp;
3199 int num_regs;
3200 valueT op;
3201
3202 /* Get Number of registers to transfer. */
3203 if (skip_past_comma (&input_line_pointer) != FAIL)
3204 expression (&exp);
3205 else
3206 exp.X_op = O_illegal;
3207
3208 if (exp.X_op != O_constant)
3209 {
3210 as_bad (_("expected , <constant>"));
3211 ignore_rest_of_line ();
3212 return;
3213 }
3214
3215 num_regs = exp.X_add_number;
3216
3217 if (num_regs < 1 || num_regs > 4)
3218 {
3219 as_bad (_("number of registers must be in the range [1:4]"));
3220 ignore_rest_of_line ();
3221 return;
3222 }
3223
3224 demand_empty_rest_of_line ();
3225
3226 if (reg == 4)
3227 {
3228 /* Short form. */
3229 op = 0xb4 | (num_regs - 1);
3230 add_unwind_opcode (op, 1);
3231 }
3232 else
3233 {
3234 /* Long form. */
3235 op = 0xc800 | (reg << 4) | (num_regs - 1);
3236 add_unwind_opcode (op, 2);
3237 }
3238 unwind.frame_size += num_regs * 12;
3239 }
3240
3241
3242 /* Parse a directive saving VFP registers. */
3243
3244 static void
3245 s_arm_unwind_save_vfp (void)
3246 {
3247 int count;
3248 unsigned int reg;
3249 valueT op;
3250
3251 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3252 if (count == FAIL)
3253 {
3254 as_bad (_("expected register list"));
3255 ignore_rest_of_line ();
3256 return;
3257 }
3258
3259 demand_empty_rest_of_line ();
3260
3261 if (reg == 8)
3262 {
3263 /* Short form. */
3264 op = 0xb8 | (count - 1);
3265 add_unwind_opcode (op, 1);
3266 }
3267 else
3268 {
3269 /* Long form. */
3270 op = 0xb300 | (reg << 4) | (count - 1);
3271 add_unwind_opcode (op, 2);
3272 }
3273 unwind.frame_size += count * 8 + 4;
3274 }
3275
3276
3277 /* Parse a directive saving iWMMXt data registers. */
3278
3279 static void
3280 s_arm_unwind_save_mmxwr (void)
3281 {
3282 int reg;
3283 int hi_reg;
3284 int i;
3285 unsigned mask = 0;
3286 valueT op;
3287
3288 if (*input_line_pointer == '{')
3289 input_line_pointer++;
3290
3291 do
3292 {
3293 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3294
3295 if (reg == FAIL)
3296 {
3297 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3298 goto error;
3299 }
3300
3301 if (mask >> reg)
3302 as_tsktsk (_("register list not in ascending order"));
3303 mask |= 1 << reg;
3304
3305 if (*input_line_pointer == '-')
3306 {
3307 input_line_pointer++;
3308 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3309 if (hi_reg == FAIL)
3310 {
3311 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3312 goto error;
3313 }
3314 else if (reg >= hi_reg)
3315 {
3316 as_bad (_("bad register range"));
3317 goto error;
3318 }
3319 for (; reg < hi_reg; reg++)
3320 mask |= 1 << reg;
3321 }
3322 }
3323 while (skip_past_comma (&input_line_pointer) != FAIL);
3324
3325 if (*input_line_pointer == '}')
3326 input_line_pointer++;
3327
3328 demand_empty_rest_of_line ();
3329
3330 /* Generate any deferred opcodes because we're going to be looking at
3331 the list. */
3332 flush_pending_unwind ();
3333
3334 for (i = 0; i < 16; i++)
3335 {
3336 if (mask & (1 << i))
3337 unwind.frame_size += 8;
3338 }
3339
3340 /* Attempt to combine with a previous opcode. We do this because gcc
3341 likes to output separate unwind directives for a single block of
3342 registers. */
3343 if (unwind.opcode_count > 0)
3344 {
3345 i = unwind.opcodes[unwind.opcode_count - 1];
3346 if ((i & 0xf8) == 0xc0)
3347 {
3348 i &= 7;
3349 /* Only merge if the blocks are contiguous. */
3350 if (i < 6)
3351 {
3352 if ((mask & 0xfe00) == (1 << 9))
3353 {
3354 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3355 unwind.opcode_count--;
3356 }
3357 }
3358 else if (i == 6 && unwind.opcode_count >= 2)
3359 {
3360 i = unwind.opcodes[unwind.opcode_count - 2];
3361 reg = i >> 4;
3362 i &= 0xf;
3363
3364 op = 0xffff << (reg - 1);
3365 if (reg > 0
3366 || ((mask & op) == (1u << (reg - 1))))
3367 {
3368 op = (1 << (reg + i + 1)) - 1;
3369 op &= ~((1 << reg) - 1);
3370 mask |= op;
3371 unwind.opcode_count -= 2;
3372 }
3373 }
3374 }
3375 }
3376
3377 hi_reg = 15;
3378 /* We want to generate opcodes in the order the registers have been
3379 saved, ie. descending order. */
3380 for (reg = 15; reg >= -1; reg--)
3381 {
3382 /* Save registers in blocks. */
3383 if (reg < 0
3384 || !(mask & (1 << reg)))
3385 {
3386 /* We found an unsaved reg. Generate opcodes to save the
3387 preceeding block. */
3388 if (reg != hi_reg)
3389 {
3390 if (reg == 9)
3391 {
3392 /* Short form. */
3393 op = 0xc0 | (hi_reg - 10);
3394 add_unwind_opcode (op, 1);
3395 }
3396 else
3397 {
3398 /* Long form. */
3399 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3400 add_unwind_opcode (op, 2);
3401 }
3402 }
3403 hi_reg = reg - 1;
3404 }
3405 }
3406
3407 return;
3408 error:
3409 ignore_rest_of_line ();
3410 }
3411
3412 static void
3413 s_arm_unwind_save_mmxwcg (void)
3414 {
3415 int reg;
3416 int hi_reg;
3417 unsigned mask = 0;
3418 valueT op;
3419
3420 if (*input_line_pointer == '{')
3421 input_line_pointer++;
3422
3423 do
3424 {
3425 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3426
3427 if (reg == FAIL)
3428 {
3429 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3430 goto error;
3431 }
3432
3433 reg -= 8;
3434 if (mask >> reg)
3435 as_tsktsk (_("register list not in ascending order"));
3436 mask |= 1 << reg;
3437
3438 if (*input_line_pointer == '-')
3439 {
3440 input_line_pointer++;
3441 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3442 if (hi_reg == FAIL)
3443 {
3444 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3445 goto error;
3446 }
3447 else if (reg >= hi_reg)
3448 {
3449 as_bad (_("bad register range"));
3450 goto error;
3451 }
3452 for (; reg < hi_reg; reg++)
3453 mask |= 1 << reg;
3454 }
3455 }
3456 while (skip_past_comma (&input_line_pointer) != FAIL);
3457
3458 if (*input_line_pointer == '}')
3459 input_line_pointer++;
3460
3461 demand_empty_rest_of_line ();
3462
3463 /* Generate any deferred opcodes because we're going to be looking at
3464 the list. */
3465 flush_pending_unwind ();
3466
3467 for (reg = 0; reg < 16; reg++)
3468 {
3469 if (mask & (1 << reg))
3470 unwind.frame_size += 4;
3471 }
3472 op = 0xc700 | mask;
3473 add_unwind_opcode (op, 2);
3474 return;
3475 error:
3476 ignore_rest_of_line ();
3477 }
3478
3479
3480 /* Parse an unwind_save directive. */
3481
3482 static void
3483 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3484 {
3485 char *peek;
3486 struct reg_entry *reg;
3487 bfd_boolean had_brace = FALSE;
3488
3489 /* Figure out what sort of save we have. */
3490 peek = input_line_pointer;
3491
3492 if (*peek == '{')
3493 {
3494 had_brace = TRUE;
3495 peek++;
3496 }
3497
3498 reg = arm_reg_parse_multi (&peek);
3499
3500 if (!reg)
3501 {
3502 as_bad (_("register expected"));
3503 ignore_rest_of_line ();
3504 return;
3505 }
3506
3507 switch (reg->type)
3508 {
3509 case REG_TYPE_FN:
3510 if (had_brace)
3511 {
3512 as_bad (_("FPA .unwind_save does not take a register list"));
3513 ignore_rest_of_line ();
3514 return;
3515 }
3516 s_arm_unwind_save_fpa (reg->number);
3517 return;
3518
3519 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3520 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3521 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3522 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3523
3524 default:
3525 as_bad (_(".unwind_save does not support this kind of register"));
3526 ignore_rest_of_line ();
3527 }
3528 }
3529
3530
3531 /* Parse an unwind_movsp directive. */
3532
3533 static void
3534 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3535 {
3536 int reg;
3537 valueT op;
3538
3539 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3540 if (reg == FAIL)
3541 {
3542 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3543 ignore_rest_of_line ();
3544 return;
3545 }
3546 demand_empty_rest_of_line ();
3547
3548 if (reg == REG_SP || reg == REG_PC)
3549 {
3550 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3551 return;
3552 }
3553
3554 if (unwind.fp_reg != REG_SP)
3555 as_bad (_("unexpected .unwind_movsp directive"));
3556
3557 /* Generate opcode to restore the value. */
3558 op = 0x90 | reg;
3559 add_unwind_opcode (op, 1);
3560
3561 /* Record the information for later. */
3562 unwind.fp_reg = reg;
3563 unwind.fp_offset = unwind.frame_size;
3564 unwind.sp_restored = 1;
3565 }
3566
3567 /* Parse an unwind_pad directive. */
3568
3569 static void
3570 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3571 {
3572 int offset;
3573
3574 if (immediate_for_directive (&offset) == FAIL)
3575 return;
3576
3577 if (offset & 3)
3578 {
3579 as_bad (_("stack increment must be multiple of 4"));
3580 ignore_rest_of_line ();
3581 return;
3582 }
3583
3584 /* Don't generate any opcodes, just record the details for later. */
3585 unwind.frame_size += offset;
3586 unwind.pending_offset += offset;
3587
3588 demand_empty_rest_of_line ();
3589 }
3590
3591 /* Parse an unwind_setfp directive. */
3592
3593 static void
3594 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3595 {
3596 int sp_reg;
3597 int fp_reg;
3598 int offset;
3599
3600 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3601 if (skip_past_comma (&input_line_pointer) == FAIL)
3602 sp_reg = FAIL;
3603 else
3604 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3605
3606 if (fp_reg == FAIL || sp_reg == FAIL)
3607 {
3608 as_bad (_("expected <reg>, <reg>"));
3609 ignore_rest_of_line ();
3610 return;
3611 }
3612
3613 /* Optional constant. */
3614 if (skip_past_comma (&input_line_pointer) != FAIL)
3615 {
3616 if (immediate_for_directive (&offset) == FAIL)
3617 return;
3618 }
3619 else
3620 offset = 0;
3621
3622 demand_empty_rest_of_line ();
3623
3624 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3625 {
3626 as_bad (_("register must be either sp or set by a previous"
3627 "unwind_movsp directive"));
3628 return;
3629 }
3630
3631 /* Don't generate any opcodes, just record the information for later. */
3632 unwind.fp_reg = fp_reg;
3633 unwind.fp_used = 1;
3634 if (sp_reg == 13)
3635 unwind.fp_offset = unwind.frame_size - offset;
3636 else
3637 unwind.fp_offset -= offset;
3638 }
3639
3640 /* Parse an unwind_raw directive. */
3641
3642 static void
3643 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3644 {
3645 expressionS exp;
3646 /* This is an arbitrary limit. */
3647 unsigned char op[16];
3648 int count;
3649
3650 expression (&exp);
3651 if (exp.X_op == O_constant
3652 && skip_past_comma (&input_line_pointer) != FAIL)
3653 {
3654 unwind.frame_size += exp.X_add_number;
3655 expression (&exp);
3656 }
3657 else
3658 exp.X_op = O_illegal;
3659
3660 if (exp.X_op != O_constant)
3661 {
3662 as_bad (_("expected <offset>, <opcode>"));
3663 ignore_rest_of_line ();
3664 return;
3665 }
3666
3667 count = 0;
3668
3669 /* Parse the opcode. */
3670 for (;;)
3671 {
3672 if (count >= 16)
3673 {
3674 as_bad (_("unwind opcode too long"));
3675 ignore_rest_of_line ();
3676 }
3677 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3678 {
3679 as_bad (_("invalid unwind opcode"));
3680 ignore_rest_of_line ();
3681 return;
3682 }
3683 op[count++] = exp.X_add_number;
3684
3685 /* Parse the next byte. */
3686 if (skip_past_comma (&input_line_pointer) == FAIL)
3687 break;
3688
3689 expression (&exp);
3690 }
3691
3692 /* Add the opcode bytes in reverse order. */
3693 while (count--)
3694 add_unwind_opcode (op[count], 1);
3695
3696 demand_empty_rest_of_line ();
3697 }
3698
3699
3700 /* Parse a .eabi_attribute directive. */
3701
3702 static void
3703 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3704 {
3705 expressionS exp;
3706 bfd_boolean is_string;
3707 int tag;
3708 unsigned int i = 0;
3709 char *s = NULL;
3710 char saved_char;
3711
3712 expression (& exp);
3713 if (exp.X_op != O_constant)
3714 goto bad;
3715
3716 tag = exp.X_add_number;
3717 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3718 is_string = 1;
3719 else
3720 is_string = 0;
3721
3722 if (skip_past_comma (&input_line_pointer) == FAIL)
3723 goto bad;
3724 if (tag == 32 || !is_string)
3725 {
3726 expression (& exp);
3727 if (exp.X_op != O_constant)
3728 {
3729 as_bad (_("expected numeric constant"));
3730 ignore_rest_of_line ();
3731 return;
3732 }
3733 i = exp.X_add_number;
3734 }
3735 if (tag == Tag_compatibility
3736 && skip_past_comma (&input_line_pointer) == FAIL)
3737 {
3738 as_bad (_("expected comma"));
3739 ignore_rest_of_line ();
3740 return;
3741 }
3742 if (is_string)
3743 {
3744 skip_whitespace(input_line_pointer);
3745 if (*input_line_pointer != '"')
3746 goto bad_string;
3747 input_line_pointer++;
3748 s = input_line_pointer;
3749 while (*input_line_pointer && *input_line_pointer != '"')
3750 input_line_pointer++;
3751 if (*input_line_pointer != '"')
3752 goto bad_string;
3753 saved_char = *input_line_pointer;
3754 *input_line_pointer = 0;
3755 }
3756 else
3757 {
3758 s = NULL;
3759 saved_char = 0;
3760 }
3761
3762 if (tag == Tag_compatibility)
3763 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3764 else if (is_string)
3765 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3766 else
3767 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3768
3769 if (s)
3770 {
3771 *input_line_pointer = saved_char;
3772 input_line_pointer++;
3773 }
3774 demand_empty_rest_of_line ();
3775 return;
3776 bad_string:
3777 as_bad (_("bad string constant"));
3778 ignore_rest_of_line ();
3779 return;
3780 bad:
3781 as_bad (_("expected <tag> , <value>"));
3782 ignore_rest_of_line ();
3783 }
3784 #endif /* OBJ_ELF */
3785
3786 static void s_arm_arch (int);
3787 static void s_arm_cpu (int);
3788 static void s_arm_fpu (int);
3789
3790 /* This table describes all the machine specific pseudo-ops the assembler
3791 has to support. The fields are:
3792 pseudo-op name without dot
3793 function to call to execute this pseudo-op
3794 Integer arg to pass to the function. */
3795
3796 const pseudo_typeS md_pseudo_table[] =
3797 {
3798 /* Never called because '.req' does not start a line. */
3799 { "req", s_req, 0 },
3800 /* Following two are likewise never called. */
3801 { "dn", s_dn, 0 },
3802 { "qn", s_qn, 0 },
3803 { "unreq", s_unreq, 0 },
3804 { "bss", s_bss, 0 },
3805 { "align", s_align, 0 },
3806 { "arm", s_arm, 0 },
3807 { "thumb", s_thumb, 0 },
3808 { "code", s_code, 0 },
3809 { "force_thumb", s_force_thumb, 0 },
3810 { "thumb_func", s_thumb_func, 0 },
3811 { "thumb_set", s_thumb_set, 0 },
3812 { "even", s_even, 0 },
3813 { "ltorg", s_ltorg, 0 },
3814 { "pool", s_ltorg, 0 },
3815 { "syntax", s_syntax, 0 },
3816 { "cpu", s_arm_cpu, 0 },
3817 { "arch", s_arm_arch, 0 },
3818 { "fpu", s_arm_fpu, 0 },
3819 #ifdef OBJ_ELF
3820 { "word", s_arm_elf_cons, 4 },
3821 { "long", s_arm_elf_cons, 4 },
3822 { "rel31", s_arm_rel31, 0 },
3823 { "fnstart", s_arm_unwind_fnstart, 0 },
3824 { "fnend", s_arm_unwind_fnend, 0 },
3825 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3826 { "personality", s_arm_unwind_personality, 0 },
3827 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3828 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3829 { "save", s_arm_unwind_save, 0 },
3830 { "movsp", s_arm_unwind_movsp, 0 },
3831 { "pad", s_arm_unwind_pad, 0 },
3832 { "setfp", s_arm_unwind_setfp, 0 },
3833 { "unwind_raw", s_arm_unwind_raw, 0 },
3834 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3835 #else
3836 { "word", cons, 4},
3837 #endif
3838 { "extend", float_cons, 'x' },
3839 { "ldouble", float_cons, 'x' },
3840 { "packed", float_cons, 'p' },
3841 { 0, 0, 0 }
3842 };
3843 \f
3844 /* Parser functions used exclusively in instruction operands. */
3845
3846 /* Generic immediate-value read function for use in insn parsing.
3847 STR points to the beginning of the immediate (the leading #);
3848 VAL receives the value; if the value is outside [MIN, MAX]
3849 issue an error. PREFIX_OPT is true if the immediate prefix is
3850 optional. */
3851
3852 static int
3853 parse_immediate (char **str, int *val, int min, int max,
3854 bfd_boolean prefix_opt)
3855 {
3856 expressionS exp;
3857 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3858 if (exp.X_op != O_constant)
3859 {
3860 inst.error = _("constant expression required");
3861 return FAIL;
3862 }
3863
3864 if (exp.X_add_number < min || exp.X_add_number > max)
3865 {
3866 inst.error = _("immediate value out of range");
3867 return FAIL;
3868 }
3869
3870 *val = exp.X_add_number;
3871 return SUCCESS;
3872 }
3873
3874 /* Less-generic immediate-value read function with the possibility of loading a
3875 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3876 instructions. Puts the result directly in inst.operands[i]. */
3877
3878 static int
3879 parse_big_immediate (char **str, int i)
3880 {
3881 expressionS exp;
3882 char *ptr = *str;
3883
3884 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3885
3886 if (exp.X_op == O_constant)
3887 inst.operands[i].imm = exp.X_add_number;
3888 else if (exp.X_op == O_big
3889 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3890 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3891 {
3892 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3893 /* Bignums have their least significant bits in
3894 generic_bignum[0]. Make sure we put 32 bits in imm and
3895 32 bits in reg, in a (hopefully) portable way. */
3896 assert (parts != 0);
3897 inst.operands[i].imm = 0;
3898 for (j = 0; j < parts; j++, idx++)
3899 inst.operands[i].imm |= generic_bignum[idx]
3900 << (LITTLENUM_NUMBER_OF_BITS * j);
3901 inst.operands[i].reg = 0;
3902 for (j = 0; j < parts; j++, idx++)
3903 inst.operands[i].reg |= generic_bignum[idx]
3904 << (LITTLENUM_NUMBER_OF_BITS * j);
3905 inst.operands[i].regisimm = 1;
3906 }
3907 else
3908 return FAIL;
3909
3910 *str = ptr;
3911
3912 return SUCCESS;
3913 }
3914
3915 /* Returns the pseudo-register number of an FPA immediate constant,
3916 or FAIL if there isn't a valid constant here. */
3917
3918 static int
3919 parse_fpa_immediate (char ** str)
3920 {
3921 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3922 char * save_in;
3923 expressionS exp;
3924 int i;
3925 int j;
3926
3927 /* First try and match exact strings, this is to guarantee
3928 that some formats will work even for cross assembly. */
3929
3930 for (i = 0; fp_const[i]; i++)
3931 {
3932 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3933 {
3934 char *start = *str;
3935
3936 *str += strlen (fp_const[i]);
3937 if (is_end_of_line[(unsigned char) **str])
3938 return i + 8;
3939 *str = start;
3940 }
3941 }
3942
3943 /* Just because we didn't get a match doesn't mean that the constant
3944 isn't valid, just that it is in a format that we don't
3945 automatically recognize. Try parsing it with the standard
3946 expression routines. */
3947
3948 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3949
3950 /* Look for a raw floating point number. */
3951 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3952 && is_end_of_line[(unsigned char) *save_in])
3953 {
3954 for (i = 0; i < NUM_FLOAT_VALS; i++)
3955 {
3956 for (j = 0; j < MAX_LITTLENUMS; j++)
3957 {
3958 if (words[j] != fp_values[i][j])
3959 break;
3960 }
3961
3962 if (j == MAX_LITTLENUMS)
3963 {
3964 *str = save_in;
3965 return i + 8;
3966 }
3967 }
3968 }
3969
3970 /* Try and parse a more complex expression, this will probably fail
3971 unless the code uses a floating point prefix (eg "0f"). */
3972 save_in = input_line_pointer;
3973 input_line_pointer = *str;
3974 if (expression (&exp) == absolute_section
3975 && exp.X_op == O_big
3976 && exp.X_add_number < 0)
3977 {
3978 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3979 Ditto for 15. */
3980 if (gen_to_words (words, 5, (long) 15) == 0)
3981 {
3982 for (i = 0; i < NUM_FLOAT_VALS; i++)
3983 {
3984 for (j = 0; j < MAX_LITTLENUMS; j++)
3985 {
3986 if (words[j] != fp_values[i][j])
3987 break;
3988 }
3989
3990 if (j == MAX_LITTLENUMS)
3991 {
3992 *str = input_line_pointer;
3993 input_line_pointer = save_in;
3994 return i + 8;
3995 }
3996 }
3997 }
3998 }
3999
4000 *str = input_line_pointer;
4001 input_line_pointer = save_in;
4002 inst.error = _("invalid FPA immediate expression");
4003 return FAIL;
4004 }
4005
4006 /* Returns 1 if a number has "quarter-precision" float format
4007 0baBbbbbbc defgh000 00000000 00000000. */
4008
4009 static int
4010 is_quarter_float (unsigned imm)
4011 {
4012 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4013 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4014 }
4015
4016 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4017 0baBbbbbbc defgh000 00000000 00000000.
4018 The minus-zero case needs special handling, since it can't be encoded in the
4019 "quarter-precision" float format, but can nonetheless be loaded as an integer
4020 constant. */
4021
4022 static unsigned
4023 parse_qfloat_immediate (char **ccp, int *immed)
4024 {
4025 char *str = *ccp;
4026 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4027
4028 skip_past_char (&str, '#');
4029
4030 if ((str = atof_ieee (str, 's', words)) != NULL)
4031 {
4032 unsigned fpword = 0;
4033 int i;
4034
4035 /* Our FP word must be 32 bits (single-precision FP). */
4036 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4037 {
4038 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4039 fpword |= words[i];
4040 }
4041
4042 if (is_quarter_float (fpword) || fpword == 0x80000000)
4043 *immed = fpword;
4044 else
4045 return FAIL;
4046
4047 *ccp = str;
4048
4049 return SUCCESS;
4050 }
4051
4052 return FAIL;
4053 }
4054
4055 /* Shift operands. */
4056 enum shift_kind
4057 {
4058 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4059 };
4060
4061 struct asm_shift_name
4062 {
4063 const char *name;
4064 enum shift_kind kind;
4065 };
4066
4067 /* Third argument to parse_shift. */
4068 enum parse_shift_mode
4069 {
4070 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4071 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4072 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4073 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4074 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4075 };
4076
4077 /* Parse a <shift> specifier on an ARM data processing instruction.
4078 This has three forms:
4079
4080 (LSL|LSR|ASL|ASR|ROR) Rs
4081 (LSL|LSR|ASL|ASR|ROR) #imm
4082 RRX
4083
4084 Note that ASL is assimilated to LSL in the instruction encoding, and
4085 RRX to ROR #0 (which cannot be written as such). */
4086
4087 static int
4088 parse_shift (char **str, int i, enum parse_shift_mode mode)
4089 {
4090 const struct asm_shift_name *shift_name;
4091 enum shift_kind shift;
4092 char *s = *str;
4093 char *p = s;
4094 int reg;
4095
4096 for (p = *str; ISALPHA (*p); p++)
4097 ;
4098
4099 if (p == *str)
4100 {
4101 inst.error = _("shift expression expected");
4102 return FAIL;
4103 }
4104
4105 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4106
4107 if (shift_name == NULL)
4108 {
4109 inst.error = _("shift expression expected");
4110 return FAIL;
4111 }
4112
4113 shift = shift_name->kind;
4114
4115 switch (mode)
4116 {
4117 case NO_SHIFT_RESTRICT:
4118 case SHIFT_IMMEDIATE: break;
4119
4120 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4121 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4122 {
4123 inst.error = _("'LSL' or 'ASR' required");
4124 return FAIL;
4125 }
4126 break;
4127
4128 case SHIFT_LSL_IMMEDIATE:
4129 if (shift != SHIFT_LSL)
4130 {
4131 inst.error = _("'LSL' required");
4132 return FAIL;
4133 }
4134 break;
4135
4136 case SHIFT_ASR_IMMEDIATE:
4137 if (shift != SHIFT_ASR)
4138 {
4139 inst.error = _("'ASR' required");
4140 return FAIL;
4141 }
4142 break;
4143
4144 default: abort ();
4145 }
4146
4147 if (shift != SHIFT_RRX)
4148 {
4149 /* Whitespace can appear here if the next thing is a bare digit. */
4150 skip_whitespace (p);
4151
4152 if (mode == NO_SHIFT_RESTRICT
4153 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4154 {
4155 inst.operands[i].imm = reg;
4156 inst.operands[i].immisreg = 1;
4157 }
4158 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4159 return FAIL;
4160 }
4161 inst.operands[i].shift_kind = shift;
4162 inst.operands[i].shifted = 1;
4163 *str = p;
4164 return SUCCESS;
4165 }
4166
4167 /* Parse a <shifter_operand> for an ARM data processing instruction:
4168
4169 #<immediate>
4170 #<immediate>, <rotate>
4171 <Rm>
4172 <Rm>, <shift>
4173
4174 where <shift> is defined by parse_shift above, and <rotate> is a
4175 multiple of 2 between 0 and 30. Validation of immediate operands
4176 is deferred to md_apply_fix. */
4177
4178 static int
4179 parse_shifter_operand (char **str, int i)
4180 {
4181 int value;
4182 expressionS expr;
4183
4184 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4185 {
4186 inst.operands[i].reg = value;
4187 inst.operands[i].isreg = 1;
4188
4189 /* parse_shift will override this if appropriate */
4190 inst.reloc.exp.X_op = O_constant;
4191 inst.reloc.exp.X_add_number = 0;
4192
4193 if (skip_past_comma (str) == FAIL)
4194 return SUCCESS;
4195
4196 /* Shift operation on register. */
4197 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4198 }
4199
4200 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4201 return FAIL;
4202
4203 if (skip_past_comma (str) == SUCCESS)
4204 {
4205 /* #x, y -- ie explicit rotation by Y. */
4206 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4207 return FAIL;
4208
4209 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4210 {
4211 inst.error = _("constant expression expected");
4212 return FAIL;
4213 }
4214
4215 value = expr.X_add_number;
4216 if (value < 0 || value > 30 || value % 2 != 0)
4217 {
4218 inst.error = _("invalid rotation");
4219 return FAIL;
4220 }
4221 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4222 {
4223 inst.error = _("invalid constant");
4224 return FAIL;
4225 }
4226
4227 /* Convert to decoded value. md_apply_fix will put it back. */
4228 inst.reloc.exp.X_add_number
4229 = (((inst.reloc.exp.X_add_number << (32 - value))
4230 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4231 }
4232
4233 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4234 inst.reloc.pc_rel = 0;
4235 return SUCCESS;
4236 }
4237
4238 /* Parse all forms of an ARM address expression. Information is written
4239 to inst.operands[i] and/or inst.reloc.
4240
4241 Preindexed addressing (.preind=1):
4242
4243 [Rn, #offset] .reg=Rn .reloc.exp=offset
4244 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4245 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4246 .shift_kind=shift .reloc.exp=shift_imm
4247
4248 These three may have a trailing ! which causes .writeback to be set also.
4249
4250 Postindexed addressing (.postind=1, .writeback=1):
4251
4252 [Rn], #offset .reg=Rn .reloc.exp=offset
4253 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4254 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4255 .shift_kind=shift .reloc.exp=shift_imm
4256
4257 Unindexed addressing (.preind=0, .postind=0):
4258
4259 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4260
4261 Other:
4262
4263 [Rn]{!} shorthand for [Rn,#0]{!}
4264 =immediate .isreg=0 .reloc.exp=immediate
4265 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4266
4267 It is the caller's responsibility to check for addressing modes not
4268 supported by the instruction, and to set inst.reloc.type. */
4269
4270 static int
4271 parse_address (char **str, int i)
4272 {
4273 char *p = *str;
4274 int reg;
4275
4276 if (skip_past_char (&p, '[') == FAIL)
4277 {
4278 if (skip_past_char (&p, '=') == FAIL)
4279 {
4280 /* bare address - translate to PC-relative offset */
4281 inst.reloc.pc_rel = 1;
4282 inst.operands[i].reg = REG_PC;
4283 inst.operands[i].isreg = 1;
4284 inst.operands[i].preind = 1;
4285 }
4286 /* else a load-constant pseudo op, no special treatment needed here */
4287
4288 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4289 return FAIL;
4290
4291 *str = p;
4292 return SUCCESS;
4293 }
4294
4295 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4296 {
4297 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4298 return FAIL;
4299 }
4300 inst.operands[i].reg = reg;
4301 inst.operands[i].isreg = 1;
4302
4303 if (skip_past_comma (&p) == SUCCESS)
4304 {
4305 inst.operands[i].preind = 1;
4306
4307 if (*p == '+') p++;
4308 else if (*p == '-') p++, inst.operands[i].negative = 1;
4309
4310 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4311 {
4312 inst.operands[i].imm = reg;
4313 inst.operands[i].immisreg = 1;
4314
4315 if (skip_past_comma (&p) == SUCCESS)
4316 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4317 return FAIL;
4318 }
4319 else if (skip_past_char (&p, ':') == SUCCESS)
4320 {
4321 /* FIXME: '@' should be used here, but it's filtered out by generic
4322 code before we get to see it here. This may be subject to
4323 change. */
4324 expressionS exp;
4325 my_get_expression (&exp, &p, GE_NO_PREFIX);
4326 if (exp.X_op != O_constant)
4327 {
4328 inst.error = _("alignment must be constant");
4329 return FAIL;
4330 }
4331 inst.operands[i].imm = exp.X_add_number << 8;
4332 inst.operands[i].immisalign = 1;
4333 /* Alignments are not pre-indexes. */
4334 inst.operands[i].preind = 0;
4335 }
4336 else
4337 {
4338 if (inst.operands[i].negative)
4339 {
4340 inst.operands[i].negative = 0;
4341 p--;
4342 }
4343 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4344 return FAIL;
4345 }
4346 }
4347
4348 if (skip_past_char (&p, ']') == FAIL)
4349 {
4350 inst.error = _("']' expected");
4351 return FAIL;
4352 }
4353
4354 if (skip_past_char (&p, '!') == SUCCESS)
4355 inst.operands[i].writeback = 1;
4356
4357 else if (skip_past_comma (&p) == SUCCESS)
4358 {
4359 if (skip_past_char (&p, '{') == SUCCESS)
4360 {
4361 /* [Rn], {expr} - unindexed, with option */
4362 if (parse_immediate (&p, &inst.operands[i].imm,
4363 0, 255, TRUE) == FAIL)
4364 return FAIL;
4365
4366 if (skip_past_char (&p, '}') == FAIL)
4367 {
4368 inst.error = _("'}' expected at end of 'option' field");
4369 return FAIL;
4370 }
4371 if (inst.operands[i].preind)
4372 {
4373 inst.error = _("cannot combine index with option");
4374 return FAIL;
4375 }
4376 *str = p;
4377 return SUCCESS;
4378 }
4379 else
4380 {
4381 inst.operands[i].postind = 1;
4382 inst.operands[i].writeback = 1;
4383
4384 if (inst.operands[i].preind)
4385 {
4386 inst.error = _("cannot combine pre- and post-indexing");
4387 return FAIL;
4388 }
4389
4390 if (*p == '+') p++;
4391 else if (*p == '-') p++, inst.operands[i].negative = 1;
4392
4393 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4394 {
4395 /* We might be using the immediate for alignment already. If we
4396 are, OR the register number into the low-order bits. */
4397 if (inst.operands[i].immisalign)
4398 inst.operands[i].imm |= reg;
4399 else
4400 inst.operands[i].imm = reg;
4401 inst.operands[i].immisreg = 1;
4402
4403 if (skip_past_comma (&p) == SUCCESS)
4404 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4405 return FAIL;
4406 }
4407 else
4408 {
4409 if (inst.operands[i].negative)
4410 {
4411 inst.operands[i].negative = 0;
4412 p--;
4413 }
4414 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4415 return FAIL;
4416 }
4417 }
4418 }
4419
4420 /* If at this point neither .preind nor .postind is set, we have a
4421 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4422 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4423 {
4424 inst.operands[i].preind = 1;
4425 inst.reloc.exp.X_op = O_constant;
4426 inst.reloc.exp.X_add_number = 0;
4427 }
4428 *str = p;
4429 return SUCCESS;
4430 }
4431
4432 /* Parse an operand for a MOVW or MOVT instruction. */
4433 static int
4434 parse_half (char **str)
4435 {
4436 char * p;
4437
4438 p = *str;
4439 skip_past_char (&p, '#');
4440 if (strncasecmp (p, ":lower16:", 9) == 0)
4441 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4442 else if (strncasecmp (p, ":upper16:", 9) == 0)
4443 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4444
4445 if (inst.reloc.type != BFD_RELOC_UNUSED)
4446 {
4447 p += 9;
4448 skip_whitespace(p);
4449 }
4450
4451 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4452 return FAIL;
4453
4454 if (inst.reloc.type == BFD_RELOC_UNUSED)
4455 {
4456 if (inst.reloc.exp.X_op != O_constant)
4457 {
4458 inst.error = _("constant expression expected");
4459 return FAIL;
4460 }
4461 if (inst.reloc.exp.X_add_number < 0
4462 || inst.reloc.exp.X_add_number > 0xffff)
4463 {
4464 inst.error = _("immediate value out of range");
4465 return FAIL;
4466 }
4467 }
4468 *str = p;
4469 return SUCCESS;
4470 }
4471
4472 /* Miscellaneous. */
4473
4474 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4475 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4476 static int
4477 parse_psr (char **str)
4478 {
4479 char *p;
4480 unsigned long psr_field;
4481 const struct asm_psr *psr;
4482 char *start;
4483
4484 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4485 feature for ease of use and backwards compatibility. */
4486 p = *str;
4487 if (strncasecmp (p, "SPSR", 4) == 0)
4488 psr_field = SPSR_BIT;
4489 else if (strncasecmp (p, "CPSR", 4) == 0)
4490 psr_field = 0;
4491 else
4492 {
4493 start = p;
4494 do
4495 p++;
4496 while (ISALNUM (*p) || *p == '_');
4497
4498 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4499 if (!psr)
4500 return FAIL;
4501
4502 *str = p;
4503 return psr->field;
4504 }
4505
4506 p += 4;
4507 if (*p == '_')
4508 {
4509 /* A suffix follows. */
4510 p++;
4511 start = p;
4512
4513 do
4514 p++;
4515 while (ISALNUM (*p) || *p == '_');
4516
4517 psr = hash_find_n (arm_psr_hsh, start, p - start);
4518 if (!psr)
4519 goto error;
4520
4521 psr_field |= psr->field;
4522 }
4523 else
4524 {
4525 if (ISALNUM (*p))
4526 goto error; /* Garbage after "[CS]PSR". */
4527
4528 psr_field |= (PSR_c | PSR_f);
4529 }
4530 *str = p;
4531 return psr_field;
4532
4533 error:
4534 inst.error = _("flag for {c}psr instruction expected");
4535 return FAIL;
4536 }
4537
4538 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4539 value suitable for splatting into the AIF field of the instruction. */
4540
4541 static int
4542 parse_cps_flags (char **str)
4543 {
4544 int val = 0;
4545 int saw_a_flag = 0;
4546 char *s = *str;
4547
4548 for (;;)
4549 switch (*s++)
4550 {
4551 case '\0': case ',':
4552 goto done;
4553
4554 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4555 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4556 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4557
4558 default:
4559 inst.error = _("unrecognized CPS flag");
4560 return FAIL;
4561 }
4562
4563 done:
4564 if (saw_a_flag == 0)
4565 {
4566 inst.error = _("missing CPS flags");
4567 return FAIL;
4568 }
4569
4570 *str = s - 1;
4571 return val;
4572 }
4573
4574 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4575 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4576
4577 static int
4578 parse_endian_specifier (char **str)
4579 {
4580 int little_endian;
4581 char *s = *str;
4582
4583 if (strncasecmp (s, "BE", 2))
4584 little_endian = 0;
4585 else if (strncasecmp (s, "LE", 2))
4586 little_endian = 1;
4587 else
4588 {
4589 inst.error = _("valid endian specifiers are be or le");
4590 return FAIL;
4591 }
4592
4593 if (ISALNUM (s[2]) || s[2] == '_')
4594 {
4595 inst.error = _("valid endian specifiers are be or le");
4596 return FAIL;
4597 }
4598
4599 *str = s + 2;
4600 return little_endian;
4601 }
4602
4603 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4604 value suitable for poking into the rotate field of an sxt or sxta
4605 instruction, or FAIL on error. */
4606
4607 static int
4608 parse_ror (char **str)
4609 {
4610 int rot;
4611 char *s = *str;
4612
4613 if (strncasecmp (s, "ROR", 3) == 0)
4614 s += 3;
4615 else
4616 {
4617 inst.error = _("missing rotation field after comma");
4618 return FAIL;
4619 }
4620
4621 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4622 return FAIL;
4623
4624 switch (rot)
4625 {
4626 case 0: *str = s; return 0x0;
4627 case 8: *str = s; return 0x1;
4628 case 16: *str = s; return 0x2;
4629 case 24: *str = s; return 0x3;
4630
4631 default:
4632 inst.error = _("rotation can only be 0, 8, 16, or 24");
4633 return FAIL;
4634 }
4635 }
4636
4637 /* Parse a conditional code (from conds[] below). The value returned is in the
4638 range 0 .. 14, or FAIL. */
4639 static int
4640 parse_cond (char **str)
4641 {
4642 char *p, *q;
4643 const struct asm_cond *c;
4644
4645 p = q = *str;
4646 while (ISALPHA (*q))
4647 q++;
4648
4649 c = hash_find_n (arm_cond_hsh, p, q - p);
4650 if (!c)
4651 {
4652 inst.error = _("condition required");
4653 return FAIL;
4654 }
4655
4656 *str = q;
4657 return c->value;
4658 }
4659
4660 /* Parse an option for a barrier instruction. Returns the encoding for the
4661 option, or FAIL. */
4662 static int
4663 parse_barrier (char **str)
4664 {
4665 char *p, *q;
4666 const struct asm_barrier_opt *o;
4667
4668 p = q = *str;
4669 while (ISALPHA (*q))
4670 q++;
4671
4672 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4673 if (!o)
4674 return FAIL;
4675
4676 *str = q;
4677 return o->value;
4678 }
4679
4680 /* Parse the operands of a table branch instruction. Similar to a memory
4681 operand. */
4682 static int
4683 parse_tb (char **str)
4684 {
4685 char * p = *str;
4686 int reg;
4687
4688 if (skip_past_char (&p, '[') == FAIL)
4689 {
4690 inst.error = _("'[' expected");
4691 return FAIL;
4692 }
4693
4694 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4695 {
4696 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4697 return FAIL;
4698 }
4699 inst.operands[0].reg = reg;
4700
4701 if (skip_past_comma (&p) == FAIL)
4702 {
4703 inst.error = _("',' expected");
4704 return FAIL;
4705 }
4706
4707 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4708 {
4709 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4710 return FAIL;
4711 }
4712 inst.operands[0].imm = reg;
4713
4714 if (skip_past_comma (&p) == SUCCESS)
4715 {
4716 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4717 return FAIL;
4718 if (inst.reloc.exp.X_add_number != 1)
4719 {
4720 inst.error = _("invalid shift");
4721 return FAIL;
4722 }
4723 inst.operands[0].shifted = 1;
4724 }
4725
4726 if (skip_past_char (&p, ']') == FAIL)
4727 {
4728 inst.error = _("']' expected");
4729 return FAIL;
4730 }
4731 *str = p;
4732 return SUCCESS;
4733 }
4734
4735 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4736 information on the types the operands can take and how they are encoded.
4737 Note particularly the abuse of ".regisimm" to signify a Neon register.
4738 Up to three operands may be read; this function handles setting the
4739 ".present" field for each operand itself.
4740 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4741 else returns FAIL. */
4742
4743 static int
4744 parse_neon_mov (char **str, int *which_operand)
4745 {
4746 int i = *which_operand, val;
4747 enum arm_reg_type rtype;
4748 char *ptr = *str;
4749 struct neon_type_el optype;
4750
4751 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4752 {
4753 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4754 inst.operands[i].reg = val;
4755 inst.operands[i].isscalar = 1;
4756 inst.operands[i].vectype = optype;
4757 inst.operands[i++].present = 1;
4758
4759 if (skip_past_comma (&ptr) == FAIL)
4760 goto wanted_comma;
4761
4762 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4763 goto wanted_arm;
4764
4765 inst.operands[i].reg = val;
4766 inst.operands[i].isreg = 1;
4767 inst.operands[i].present = 1;
4768 }
4769 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4770 != FAIL)
4771 {
4772 /* Cases 0, 1, 2, 3, 5 (D only). */
4773 if (skip_past_comma (&ptr) == FAIL)
4774 goto wanted_comma;
4775
4776 inst.operands[i].reg = val;
4777 inst.operands[i].isreg = 1;
4778 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4779 inst.operands[i].vectype = optype;
4780 inst.operands[i++].present = 1;
4781
4782 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4783 {
4784 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4785 inst.operands[i-1].regisimm = 1;
4786 inst.operands[i].reg = val;
4787 inst.operands[i].isreg = 1;
4788 inst.operands[i++].present = 1;
4789
4790 if (rtype == REG_TYPE_NQ)
4791 {
4792 first_error (_("can't use Neon quad register here"));
4793 return FAIL;
4794 }
4795 if (skip_past_comma (&ptr) == FAIL)
4796 goto wanted_comma;
4797 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4798 goto wanted_arm;
4799 inst.operands[i].reg = val;
4800 inst.operands[i].isreg = 1;
4801 inst.operands[i].present = 1;
4802 }
4803 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4804 {
4805 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4806 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4807 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4808 goto bad_cond;
4809 }
4810 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4811 {
4812 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4813 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4814 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4815 goto bad_cond;
4816 }
4817 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4818 != FAIL)
4819 {
4820 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4821 Case 1: VMOV<c><q> <Dd>, <Dm> */
4822 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4823 goto bad_cond;
4824
4825 inst.operands[i].reg = val;
4826 inst.operands[i].isreg = 1;
4827 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4828 inst.operands[i].vectype = optype;
4829 inst.operands[i].present = 1;
4830 }
4831 else
4832 {
4833 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4834 return FAIL;
4835 }
4836 }
4837 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4838 {
4839 /* Cases 6, 7. */
4840 inst.operands[i].reg = val;
4841 inst.operands[i].isreg = 1;
4842 inst.operands[i++].present = 1;
4843
4844 if (skip_past_comma (&ptr) == FAIL)
4845 goto wanted_comma;
4846
4847 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4848 {
4849 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4850 inst.operands[i].reg = val;
4851 inst.operands[i].isscalar = 1;
4852 inst.operands[i].present = 1;
4853 inst.operands[i].vectype = optype;
4854 }
4855 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4856 {
4857 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4858 inst.operands[i].reg = val;
4859 inst.operands[i].isreg = 1;
4860 inst.operands[i++].present = 1;
4861
4862 if (skip_past_comma (&ptr) == FAIL)
4863 goto wanted_comma;
4864
4865 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4866 == FAIL)
4867 {
4868 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4869 return FAIL;
4870 }
4871
4872 inst.operands[i].reg = val;
4873 inst.operands[i].isreg = 1;
4874 inst.operands[i].regisimm = 1;
4875 inst.operands[i].vectype = optype;
4876 inst.operands[i].present = 1;
4877 }
4878 }
4879 else
4880 {
4881 first_error (_("parse error"));
4882 return FAIL;
4883 }
4884
4885 /* Successfully parsed the operands. Update args. */
4886 *which_operand = i;
4887 *str = ptr;
4888 return SUCCESS;
4889
4890 wanted_comma:
4891 first_error (_("expected comma"));
4892 return FAIL;
4893
4894 wanted_arm:
4895 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4896 return FAIL;
4897
4898 bad_cond:
4899 first_error (_("instruction cannot be conditionalized"));
4900 return FAIL;
4901 }
4902
4903 /* Matcher codes for parse_operands. */
4904 enum operand_parse_code
4905 {
4906 OP_stop, /* end of line */
4907
4908 OP_RR, /* ARM register */
4909 OP_RRnpc, /* ARM register, not r15 */
4910 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4911 OP_RRw, /* ARM register, not r15, optional trailing ! */
4912 OP_RCP, /* Coprocessor number */
4913 OP_RCN, /* Coprocessor register */
4914 OP_RF, /* FPA register */
4915 OP_RVS, /* VFP single precision register */
4916 OP_RVD, /* VFP double precision register (0..15) */
4917 OP_RND, /* Neon double precision register (0..31) */
4918 OP_RNQ, /* Neon quad precision register */
4919 OP_RNDQ, /* Neon double or quad precision register */
4920 OP_RNSC, /* Neon scalar D[X] */
4921 OP_RVC, /* VFP control register */
4922 OP_RMF, /* Maverick F register */
4923 OP_RMD, /* Maverick D register */
4924 OP_RMFX, /* Maverick FX register */
4925 OP_RMDX, /* Maverick DX register */
4926 OP_RMAX, /* Maverick AX register */
4927 OP_RMDS, /* Maverick DSPSC register */
4928 OP_RIWR, /* iWMMXt wR register */
4929 OP_RIWC, /* iWMMXt wC register */
4930 OP_RIWG, /* iWMMXt wCG register */
4931 OP_RXA, /* XScale accumulator register */
4932
4933 OP_REGLST, /* ARM register list */
4934 OP_VRSLST, /* VFP single-precision register list */
4935 OP_VRDLST, /* VFP double-precision register list */
4936 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4937 OP_NSTRLST, /* Neon element/structure list */
4938
4939 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4940 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4941 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4942 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4943 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4944 OP_VMOV, /* Neon VMOV operands. */
4945 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4946 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4947
4948 OP_I0, /* immediate zero */
4949 OP_I7, /* immediate value 0 .. 7 */
4950 OP_I15, /* 0 .. 15 */
4951 OP_I16, /* 1 .. 16 */
4952 OP_I16z, /* 0 .. 16 */
4953 OP_I31, /* 0 .. 31 */
4954 OP_I31w, /* 0 .. 31, optional trailing ! */
4955 OP_I32, /* 1 .. 32 */
4956 OP_I32z, /* 0 .. 32 */
4957 OP_I63, /* 0 .. 63 */
4958 OP_I63s, /* -64 .. 63 */
4959 OP_I64, /* 1 .. 64 */
4960 OP_I64z, /* 0 .. 64 */
4961 OP_I255, /* 0 .. 255 */
4962
4963 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4964 OP_I7b, /* 0 .. 7 */
4965 OP_I15b, /* 0 .. 15 */
4966 OP_I31b, /* 0 .. 31 */
4967
4968 OP_SH, /* shifter operand */
4969 OP_ADDR, /* Memory address expression (any mode) */
4970 OP_EXP, /* arbitrary expression */
4971 OP_EXPi, /* same, with optional immediate prefix */
4972 OP_EXPr, /* same, with optional relocation suffix */
4973 OP_HALF, /* 0 .. 65535 or low/high reloc. */
4974
4975 OP_CPSF, /* CPS flags */
4976 OP_ENDI, /* Endianness specifier */
4977 OP_PSR, /* CPSR/SPSR mask for msr */
4978 OP_COND, /* conditional code */
4979 OP_TB, /* Table branch. */
4980
4981 OP_RRnpc_I0, /* ARM register or literal 0 */
4982 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4983 OP_RR_EXi, /* ARM register or expression with imm prefix */
4984 OP_RF_IF, /* FPA register or immediate */
4985 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4986
4987 /* Optional operands. */
4988 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4989 OP_oI31b, /* 0 .. 31 */
4990 OP_oI32b, /* 1 .. 32 */
4991 OP_oIffffb, /* 0 .. 65535 */
4992 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4993
4994 OP_oRR, /* ARM register */
4995 OP_oRRnpc, /* ARM register, not the PC */
4996 OP_oRND, /* Optional Neon double precision register */
4997 OP_oRNQ, /* Optional Neon quad precision register */
4998 OP_oRNDQ, /* Optional Neon double or quad precision register */
4999 OP_oSHll, /* LSL immediate */
5000 OP_oSHar, /* ASR immediate */
5001 OP_oSHllar, /* LSL or ASR immediate */
5002 OP_oROR, /* ROR 0/8/16/24 */
5003 OP_oBARRIER, /* Option argument for a barrier instruction. */
5004
5005 OP_FIRST_OPTIONAL = OP_oI7b
5006 };
5007
5008 /* Generic instruction operand parser. This does no encoding and no
5009 semantic validation; it merely squirrels values away in the inst
5010 structure. Returns SUCCESS or FAIL depending on whether the
5011 specified grammar matched. */
5012 static int
5013 parse_operands (char *str, const unsigned char *pattern)
5014 {
5015 unsigned const char *upat = pattern;
5016 char *backtrack_pos = 0;
5017 const char *backtrack_error = 0;
5018 int i, val, backtrack_index = 0;
5019 enum arm_reg_type rtype;
5020
5021 #define po_char_or_fail(chr) do { \
5022 if (skip_past_char (&str, chr) == FAIL) \
5023 goto bad_args; \
5024 } while (0)
5025
5026 #define po_reg_or_fail(regtype) do { \
5027 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5028 &inst.operands[i].vectype); \
5029 if (val == FAIL) \
5030 { \
5031 first_error (_(reg_expected_msgs[regtype])); \
5032 goto failure; \
5033 } \
5034 inst.operands[i].reg = val; \
5035 inst.operands[i].isreg = 1; \
5036 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5037 } while (0)
5038
5039 #define po_reg_or_goto(regtype, label) do { \
5040 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5041 &inst.operands[i].vectype); \
5042 if (val == FAIL) \
5043 goto label; \
5044 \
5045 inst.operands[i].reg = val; \
5046 inst.operands[i].isreg = 1; \
5047 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5048 } while (0)
5049
5050 #define po_imm_or_fail(min, max, popt) do { \
5051 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5052 goto failure; \
5053 inst.operands[i].imm = val; \
5054 } while (0)
5055
5056 #define po_scalar_or_goto(elsz, label) do { \
5057 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5058 if (val == FAIL) \
5059 goto label; \
5060 inst.operands[i].reg = val; \
5061 inst.operands[i].isscalar = 1; \
5062 } while (0)
5063
5064 #define po_misc_or_fail(expr) do { \
5065 if (expr) \
5066 goto failure; \
5067 } while (0)
5068
5069 skip_whitespace (str);
5070
5071 for (i = 0; upat[i] != OP_stop; i++)
5072 {
5073 if (upat[i] >= OP_FIRST_OPTIONAL)
5074 {
5075 /* Remember where we are in case we need to backtrack. */
5076 assert (!backtrack_pos);
5077 backtrack_pos = str;
5078 backtrack_error = inst.error;
5079 backtrack_index = i;
5080 }
5081
5082 if (i > 0)
5083 po_char_or_fail (',');
5084
5085 switch (upat[i])
5086 {
5087 /* Registers */
5088 case OP_oRRnpc:
5089 case OP_RRnpc:
5090 case OP_oRR:
5091 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5092 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5093 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5094 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5095 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5096 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5097 case OP_oRND:
5098 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5099 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5100 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5101 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5102 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5103 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5104 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5105 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5106 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5107 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5108 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5109 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5110 case OP_oRNQ:
5111 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5112 case OP_oRNDQ:
5113 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5114
5115 /* Neon scalar. Using an element size of 8 means that some invalid
5116 scalars are accepted here, so deal with those in later code. */
5117 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5118
5119 /* WARNING: We can expand to two operands here. This has the potential
5120 to totally confuse the backtracking mechanism! It will be OK at
5121 least as long as we don't try to use optional args as well,
5122 though. */
5123 case OP_NILO:
5124 {
5125 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5126 i++;
5127 skip_past_comma (&str);
5128 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5129 break;
5130 one_reg_only:
5131 /* Optional register operand was omitted. Unfortunately, it's in
5132 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5133 here (this is a bit grotty). */
5134 inst.operands[i] = inst.operands[i-1];
5135 inst.operands[i-1].present = 0;
5136 break;
5137 try_imm:
5138 /* Immediate gets verified properly later, so accept any now. */
5139 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5140 }
5141 break;
5142
5143 case OP_RNDQ_I0:
5144 {
5145 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5146 break;
5147 try_imm0:
5148 po_imm_or_fail (0, 0, TRUE);
5149 }
5150 break;
5151
5152 case OP_RR_RNSC:
5153 {
5154 po_scalar_or_goto (8, try_rr);
5155 break;
5156 try_rr:
5157 po_reg_or_fail (REG_TYPE_RN);
5158 }
5159 break;
5160
5161 case OP_RNDQ_RNSC:
5162 {
5163 po_scalar_or_goto (8, try_ndq);
5164 break;
5165 try_ndq:
5166 po_reg_or_fail (REG_TYPE_NDQ);
5167 }
5168 break;
5169
5170 case OP_RND_RNSC:
5171 {
5172 po_scalar_or_goto (8, try_vfd);
5173 break;
5174 try_vfd:
5175 po_reg_or_fail (REG_TYPE_VFD);
5176 }
5177 break;
5178
5179 case OP_VMOV:
5180 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5181 not careful then bad things might happen. */
5182 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5183 break;
5184
5185 case OP_RNDQ_IMVNb:
5186 {
5187 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5188 break;
5189 try_mvnimm:
5190 /* There's a possibility of getting a 64-bit immediate here, so
5191 we need special handling. */
5192 if (parse_big_immediate (&str, i) == FAIL)
5193 {
5194 inst.error = _("immediate value is out of range");
5195 goto failure;
5196 }
5197 }
5198 break;
5199
5200 case OP_RNDQ_I63b:
5201 {
5202 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5203 break;
5204 try_shimm:
5205 po_imm_or_fail (0, 63, TRUE);
5206 }
5207 break;
5208
5209 case OP_RRnpcb:
5210 po_char_or_fail ('[');
5211 po_reg_or_fail (REG_TYPE_RN);
5212 po_char_or_fail (']');
5213 break;
5214
5215 case OP_RRw:
5216 po_reg_or_fail (REG_TYPE_RN);
5217 if (skip_past_char (&str, '!') == SUCCESS)
5218 inst.operands[i].writeback = 1;
5219 break;
5220
5221 /* Immediates */
5222 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5223 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5224 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5225 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5226 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5227 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5228 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5229 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5230 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5231 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5232 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5233 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5234
5235 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5236 case OP_oI7b:
5237 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5238 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5239 case OP_oI31b:
5240 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5241 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5242 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5243
5244 /* Immediate variants */
5245 case OP_oI255c:
5246 po_char_or_fail ('{');
5247 po_imm_or_fail (0, 255, TRUE);
5248 po_char_or_fail ('}');
5249 break;
5250
5251 case OP_I31w:
5252 /* The expression parser chokes on a trailing !, so we have
5253 to find it first and zap it. */
5254 {
5255 char *s = str;
5256 while (*s && *s != ',')
5257 s++;
5258 if (s[-1] == '!')
5259 {
5260 s[-1] = '\0';
5261 inst.operands[i].writeback = 1;
5262 }
5263 po_imm_or_fail (0, 31, TRUE);
5264 if (str == s - 1)
5265 str = s;
5266 }
5267 break;
5268
5269 /* Expressions */
5270 case OP_EXPi: EXPi:
5271 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5272 GE_OPT_PREFIX));
5273 break;
5274
5275 case OP_EXP:
5276 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5277 GE_NO_PREFIX));
5278 break;
5279
5280 case OP_EXPr: EXPr:
5281 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5282 GE_NO_PREFIX));
5283 if (inst.reloc.exp.X_op == O_symbol)
5284 {
5285 val = parse_reloc (&str);
5286 if (val == -1)
5287 {
5288 inst.error = _("unrecognized relocation suffix");
5289 goto failure;
5290 }
5291 else if (val != BFD_RELOC_UNUSED)
5292 {
5293 inst.operands[i].imm = val;
5294 inst.operands[i].hasreloc = 1;
5295 }
5296 }
5297 break;
5298
5299 /* Operand for MOVW or MOVT. */
5300 case OP_HALF:
5301 po_misc_or_fail (parse_half (&str));
5302 break;
5303
5304 /* Register or expression */
5305 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5306 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5307
5308 /* Register or immediate */
5309 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5310 I0: po_imm_or_fail (0, 0, FALSE); break;
5311
5312 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5313 IF:
5314 if (!is_immediate_prefix (*str))
5315 goto bad_args;
5316 str++;
5317 val = parse_fpa_immediate (&str);
5318 if (val == FAIL)
5319 goto failure;
5320 /* FPA immediates are encoded as registers 8-15.
5321 parse_fpa_immediate has already applied the offset. */
5322 inst.operands[i].reg = val;
5323 inst.operands[i].isreg = 1;
5324 break;
5325
5326 /* Two kinds of register */
5327 case OP_RIWR_RIWC:
5328 {
5329 struct reg_entry *rege = arm_reg_parse_multi (&str);
5330 if (rege->type != REG_TYPE_MMXWR
5331 && rege->type != REG_TYPE_MMXWC
5332 && rege->type != REG_TYPE_MMXWCG)
5333 {
5334 inst.error = _("iWMMXt data or control register expected");
5335 goto failure;
5336 }
5337 inst.operands[i].reg = rege->number;
5338 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5339 }
5340 break;
5341
5342 /* Misc */
5343 case OP_CPSF: val = parse_cps_flags (&str); break;
5344 case OP_ENDI: val = parse_endian_specifier (&str); break;
5345 case OP_oROR: val = parse_ror (&str); break;
5346 case OP_PSR: val = parse_psr (&str); break;
5347 case OP_COND: val = parse_cond (&str); break;
5348 case OP_oBARRIER:val = parse_barrier (&str); break;
5349
5350 case OP_TB:
5351 po_misc_or_fail (parse_tb (&str));
5352 break;
5353
5354 /* Register lists */
5355 case OP_REGLST:
5356 val = parse_reg_list (&str);
5357 if (*str == '^')
5358 {
5359 inst.operands[1].writeback = 1;
5360 str++;
5361 }
5362 break;
5363
5364 case OP_VRSLST:
5365 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5366 break;
5367
5368 case OP_VRDLST:
5369 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5370 break;
5371
5372 case OP_NRDLST:
5373 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5374 REGLIST_NEON_D);
5375 break;
5376
5377 case OP_NSTRLST:
5378 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5379 &inst.operands[i].vectype);
5380 break;
5381
5382 /* Addressing modes */
5383 case OP_ADDR:
5384 po_misc_or_fail (parse_address (&str, i));
5385 break;
5386
5387 case OP_SH:
5388 po_misc_or_fail (parse_shifter_operand (&str, i));
5389 break;
5390
5391 case OP_oSHll:
5392 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5393 break;
5394
5395 case OP_oSHar:
5396 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5397 break;
5398
5399 case OP_oSHllar:
5400 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5401 break;
5402
5403 default:
5404 as_fatal ("unhandled operand code %d", upat[i]);
5405 }
5406
5407 /* Various value-based sanity checks and shared operations. We
5408 do not signal immediate failures for the register constraints;
5409 this allows a syntax error to take precedence. */
5410 switch (upat[i])
5411 {
5412 case OP_oRRnpc:
5413 case OP_RRnpc:
5414 case OP_RRnpcb:
5415 case OP_RRw:
5416 case OP_RRnpc_I0:
5417 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5418 inst.error = BAD_PC;
5419 break;
5420
5421 case OP_CPSF:
5422 case OP_ENDI:
5423 case OP_oROR:
5424 case OP_PSR:
5425 case OP_COND:
5426 case OP_oBARRIER:
5427 case OP_REGLST:
5428 case OP_VRSLST:
5429 case OP_VRDLST:
5430 case OP_NRDLST:
5431 case OP_NSTRLST:
5432 if (val == FAIL)
5433 goto failure;
5434 inst.operands[i].imm = val;
5435 break;
5436
5437 default:
5438 break;
5439 }
5440
5441 /* If we get here, this operand was successfully parsed. */
5442 inst.operands[i].present = 1;
5443 continue;
5444
5445 bad_args:
5446 inst.error = BAD_ARGS;
5447
5448 failure:
5449 if (!backtrack_pos)
5450 {
5451 /* The parse routine should already have set inst.error, but set a
5452 defaut here just in case. */
5453 if (!inst.error)
5454 inst.error = _("syntax error");
5455 return FAIL;
5456 }
5457
5458 /* Do not backtrack over a trailing optional argument that
5459 absorbed some text. We will only fail again, with the
5460 'garbage following instruction' error message, which is
5461 probably less helpful than the current one. */
5462 if (backtrack_index == i && backtrack_pos != str
5463 && upat[i+1] == OP_stop)
5464 {
5465 if (!inst.error)
5466 inst.error = _("syntax error");
5467 return FAIL;
5468 }
5469
5470 /* Try again, skipping the optional argument at backtrack_pos. */
5471 str = backtrack_pos;
5472 inst.error = backtrack_error;
5473 inst.operands[backtrack_index].present = 0;
5474 i = backtrack_index;
5475 backtrack_pos = 0;
5476 }
5477
5478 /* Check that we have parsed all the arguments. */
5479 if (*str != '\0' && !inst.error)
5480 inst.error = _("garbage following instruction");
5481
5482 return inst.error ? FAIL : SUCCESS;
5483 }
5484
5485 #undef po_char_or_fail
5486 #undef po_reg_or_fail
5487 #undef po_reg_or_goto
5488 #undef po_imm_or_fail
5489 #undef po_scalar_or_fail
5490 \f
5491 /* Shorthand macro for instruction encoding functions issuing errors. */
5492 #define constraint(expr, err) do { \
5493 if (expr) \
5494 { \
5495 inst.error = err; \
5496 return; \
5497 } \
5498 } while (0)
5499
5500 /* Functions for operand encoding. ARM, then Thumb. */
5501
5502 #define rotate_left(v, n) (v << n | v >> (32 - n))
5503
5504 /* If VAL can be encoded in the immediate field of an ARM instruction,
5505 return the encoded form. Otherwise, return FAIL. */
5506
5507 static unsigned int
5508 encode_arm_immediate (unsigned int val)
5509 {
5510 unsigned int a, i;
5511
5512 for (i = 0; i < 32; i += 2)
5513 if ((a = rotate_left (val, i)) <= 0xff)
5514 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5515
5516 return FAIL;
5517 }
5518
5519 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5520 return the encoded form. Otherwise, return FAIL. */
5521 static unsigned int
5522 encode_thumb32_immediate (unsigned int val)
5523 {
5524 unsigned int a, i;
5525
5526 if (val <= 0xff)
5527 return val;
5528
5529 for (i = 1; i <= 24; i++)
5530 {
5531 a = val >> i;
5532 if ((val & ~(0xff << i)) == 0)
5533 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5534 }
5535
5536 a = val & 0xff;
5537 if (val == ((a << 16) | a))
5538 return 0x100 | a;
5539 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5540 return 0x300 | a;
5541
5542 a = val & 0xff00;
5543 if (val == ((a << 16) | a))
5544 return 0x200 | (a >> 8);
5545
5546 return FAIL;
5547 }
5548 /* Encode a VFP SP or DP register number into inst.instruction. */
5549
5550 static void
5551 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5552 {
5553 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5554 && reg > 15)
5555 {
5556 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5557 {
5558 if (thumb_mode)
5559 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5560 fpu_vfp_ext_v3);
5561 else
5562 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5563 fpu_vfp_ext_v3);
5564 }
5565 else
5566 {
5567 first_error (_("D register out of range for selected VFP version"));
5568 return;
5569 }
5570 }
5571
5572 switch (pos)
5573 {
5574 case VFP_REG_Sd:
5575 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5576 break;
5577
5578 case VFP_REG_Sn:
5579 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5580 break;
5581
5582 case VFP_REG_Sm:
5583 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5584 break;
5585
5586 case VFP_REG_Dd:
5587 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5588 break;
5589
5590 case VFP_REG_Dn:
5591 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5592 break;
5593
5594 case VFP_REG_Dm:
5595 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5596 break;
5597
5598 default:
5599 abort ();
5600 }
5601 }
5602
5603 /* Encode a <shift> in an ARM-format instruction. The immediate,
5604 if any, is handled by md_apply_fix. */
5605 static void
5606 encode_arm_shift (int i)
5607 {
5608 if (inst.operands[i].shift_kind == SHIFT_RRX)
5609 inst.instruction |= SHIFT_ROR << 5;
5610 else
5611 {
5612 inst.instruction |= inst.operands[i].shift_kind << 5;
5613 if (inst.operands[i].immisreg)
5614 {
5615 inst.instruction |= SHIFT_BY_REG;
5616 inst.instruction |= inst.operands[i].imm << 8;
5617 }
5618 else
5619 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5620 }
5621 }
5622
5623 static void
5624 encode_arm_shifter_operand (int i)
5625 {
5626 if (inst.operands[i].isreg)
5627 {
5628 inst.instruction |= inst.operands[i].reg;
5629 encode_arm_shift (i);
5630 }
5631 else
5632 inst.instruction |= INST_IMMEDIATE;
5633 }
5634
5635 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5636 static void
5637 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5638 {
5639 assert (inst.operands[i].isreg);
5640 inst.instruction |= inst.operands[i].reg << 16;
5641
5642 if (inst.operands[i].preind)
5643 {
5644 if (is_t)
5645 {
5646 inst.error = _("instruction does not accept preindexed addressing");
5647 return;
5648 }
5649 inst.instruction |= PRE_INDEX;
5650 if (inst.operands[i].writeback)
5651 inst.instruction |= WRITE_BACK;
5652
5653 }
5654 else if (inst.operands[i].postind)
5655 {
5656 assert (inst.operands[i].writeback);
5657 if (is_t)
5658 inst.instruction |= WRITE_BACK;
5659 }
5660 else /* unindexed - only for coprocessor */
5661 {
5662 inst.error = _("instruction does not accept unindexed addressing");
5663 return;
5664 }
5665
5666 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5667 && (((inst.instruction & 0x000f0000) >> 16)
5668 == ((inst.instruction & 0x0000f000) >> 12)))
5669 as_warn ((inst.instruction & LOAD_BIT)
5670 ? _("destination register same as write-back base")
5671 : _("source register same as write-back base"));
5672 }
5673
5674 /* inst.operands[i] was set up by parse_address. Encode it into an
5675 ARM-format mode 2 load or store instruction. If is_t is true,
5676 reject forms that cannot be used with a T instruction (i.e. not
5677 post-indexed). */
5678 static void
5679 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5680 {
5681 encode_arm_addr_mode_common (i, is_t);
5682
5683 if (inst.operands[i].immisreg)
5684 {
5685 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5686 inst.instruction |= inst.operands[i].imm;
5687 if (!inst.operands[i].negative)
5688 inst.instruction |= INDEX_UP;
5689 if (inst.operands[i].shifted)
5690 {
5691 if (inst.operands[i].shift_kind == SHIFT_RRX)
5692 inst.instruction |= SHIFT_ROR << 5;
5693 else
5694 {
5695 inst.instruction |= inst.operands[i].shift_kind << 5;
5696 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5697 }
5698 }
5699 }
5700 else /* immediate offset in inst.reloc */
5701 {
5702 if (inst.reloc.type == BFD_RELOC_UNUSED)
5703 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5704 }
5705 }
5706
5707 /* inst.operands[i] was set up by parse_address. Encode it into an
5708 ARM-format mode 3 load or store instruction. Reject forms that
5709 cannot be used with such instructions. If is_t is true, reject
5710 forms that cannot be used with a T instruction (i.e. not
5711 post-indexed). */
5712 static void
5713 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5714 {
5715 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5716 {
5717 inst.error = _("instruction does not accept scaled register index");
5718 return;
5719 }
5720
5721 encode_arm_addr_mode_common (i, is_t);
5722
5723 if (inst.operands[i].immisreg)
5724 {
5725 inst.instruction |= inst.operands[i].imm;
5726 if (!inst.operands[i].negative)
5727 inst.instruction |= INDEX_UP;
5728 }
5729 else /* immediate offset in inst.reloc */
5730 {
5731 inst.instruction |= HWOFFSET_IMM;
5732 if (inst.reloc.type == BFD_RELOC_UNUSED)
5733 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5734 }
5735 }
5736
5737 /* inst.operands[i] was set up by parse_address. Encode it into an
5738 ARM-format instruction. Reject all forms which cannot be encoded
5739 into a coprocessor load/store instruction. If wb_ok is false,
5740 reject use of writeback; if unind_ok is false, reject use of
5741 unindexed addressing. If reloc_override is not 0, use it instead
5742 of BFD_ARM_CP_OFF_IMM. */
5743
5744 static int
5745 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5746 {
5747 inst.instruction |= inst.operands[i].reg << 16;
5748
5749 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5750
5751 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5752 {
5753 assert (!inst.operands[i].writeback);
5754 if (!unind_ok)
5755 {
5756 inst.error = _("instruction does not support unindexed addressing");
5757 return FAIL;
5758 }
5759 inst.instruction |= inst.operands[i].imm;
5760 inst.instruction |= INDEX_UP;
5761 return SUCCESS;
5762 }
5763
5764 if (inst.operands[i].preind)
5765 inst.instruction |= PRE_INDEX;
5766
5767 if (inst.operands[i].writeback)
5768 {
5769 if (inst.operands[i].reg == REG_PC)
5770 {
5771 inst.error = _("pc may not be used with write-back");
5772 return FAIL;
5773 }
5774 if (!wb_ok)
5775 {
5776 inst.error = _("instruction does not support writeback");
5777 return FAIL;
5778 }
5779 inst.instruction |= WRITE_BACK;
5780 }
5781
5782 if (reloc_override)
5783 inst.reloc.type = reloc_override;
5784 else if (thumb_mode)
5785 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5786 else
5787 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5788 return SUCCESS;
5789 }
5790
5791 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5792 Determine whether it can be performed with a move instruction; if
5793 it can, convert inst.instruction to that move instruction and
5794 return 1; if it can't, convert inst.instruction to a literal-pool
5795 load and return 0. If this is not a valid thing to do in the
5796 current context, set inst.error and return 1.
5797
5798 inst.operands[i] describes the destination register. */
5799
5800 static int
5801 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5802 {
5803 unsigned long tbit;
5804
5805 if (thumb_p)
5806 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5807 else
5808 tbit = LOAD_BIT;
5809
5810 if ((inst.instruction & tbit) == 0)
5811 {
5812 inst.error = _("invalid pseudo operation");
5813 return 1;
5814 }
5815 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5816 {
5817 inst.error = _("constant expression expected");
5818 return 1;
5819 }
5820 if (inst.reloc.exp.X_op == O_constant)
5821 {
5822 if (thumb_p)
5823 {
5824 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5825 {
5826 /* This can be done with a mov(1) instruction. */
5827 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5828 inst.instruction |= inst.reloc.exp.X_add_number;
5829 return 1;
5830 }
5831 }
5832 else
5833 {
5834 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5835 if (value != FAIL)
5836 {
5837 /* This can be done with a mov instruction. */
5838 inst.instruction &= LITERAL_MASK;
5839 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5840 inst.instruction |= value & 0xfff;
5841 return 1;
5842 }
5843
5844 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5845 if (value != FAIL)
5846 {
5847 /* This can be done with a mvn instruction. */
5848 inst.instruction &= LITERAL_MASK;
5849 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5850 inst.instruction |= value & 0xfff;
5851 return 1;
5852 }
5853 }
5854 }
5855
5856 if (add_to_lit_pool () == FAIL)
5857 {
5858 inst.error = _("literal pool insertion failed");
5859 return 1;
5860 }
5861 inst.operands[1].reg = REG_PC;
5862 inst.operands[1].isreg = 1;
5863 inst.operands[1].preind = 1;
5864 inst.reloc.pc_rel = 1;
5865 inst.reloc.type = (thumb_p
5866 ? BFD_RELOC_ARM_THUMB_OFFSET
5867 : (mode_3
5868 ? BFD_RELOC_ARM_HWLITERAL
5869 : BFD_RELOC_ARM_LITERAL));
5870 return 0;
5871 }
5872
5873 /* Functions for instruction encoding, sorted by subarchitecture.
5874 First some generics; their names are taken from the conventional
5875 bit positions for register arguments in ARM format instructions. */
5876
5877 static void
5878 do_noargs (void)
5879 {
5880 }
5881
5882 static void
5883 do_rd (void)
5884 {
5885 inst.instruction |= inst.operands[0].reg << 12;
5886 }
5887
5888 static void
5889 do_rd_rm (void)
5890 {
5891 inst.instruction |= inst.operands[0].reg << 12;
5892 inst.instruction |= inst.operands[1].reg;
5893 }
5894
5895 static void
5896 do_rd_rn (void)
5897 {
5898 inst.instruction |= inst.operands[0].reg << 12;
5899 inst.instruction |= inst.operands[1].reg << 16;
5900 }
5901
5902 static void
5903 do_rn_rd (void)
5904 {
5905 inst.instruction |= inst.operands[0].reg << 16;
5906 inst.instruction |= inst.operands[1].reg << 12;
5907 }
5908
5909 static void
5910 do_rd_rm_rn (void)
5911 {
5912 unsigned Rn = inst.operands[2].reg;
5913 /* Enforce restrictions on SWP instruction. */
5914 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5915 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5916 _("Rn must not overlap other operands"));
5917 inst.instruction |= inst.operands[0].reg << 12;
5918 inst.instruction |= inst.operands[1].reg;
5919 inst.instruction |= Rn << 16;
5920 }
5921
5922 static void
5923 do_rd_rn_rm (void)
5924 {
5925 inst.instruction |= inst.operands[0].reg << 12;
5926 inst.instruction |= inst.operands[1].reg << 16;
5927 inst.instruction |= inst.operands[2].reg;
5928 }
5929
5930 static void
5931 do_rm_rd_rn (void)
5932 {
5933 inst.instruction |= inst.operands[0].reg;
5934 inst.instruction |= inst.operands[1].reg << 12;
5935 inst.instruction |= inst.operands[2].reg << 16;
5936 }
5937
5938 static void
5939 do_imm0 (void)
5940 {
5941 inst.instruction |= inst.operands[0].imm;
5942 }
5943
5944 static void
5945 do_rd_cpaddr (void)
5946 {
5947 inst.instruction |= inst.operands[0].reg << 12;
5948 encode_arm_cp_address (1, TRUE, TRUE, 0);
5949 }
5950
5951 /* ARM instructions, in alphabetical order by function name (except
5952 that wrapper functions appear immediately after the function they
5953 wrap). */
5954
5955 /* This is a pseudo-op of the form "adr rd, label" to be converted
5956 into a relative address of the form "add rd, pc, #label-.-8". */
5957
5958 static void
5959 do_adr (void)
5960 {
5961 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5962
5963 /* Frag hacking will turn this into a sub instruction if the offset turns
5964 out to be negative. */
5965 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5966 inst.reloc.pc_rel = 1;
5967 inst.reloc.exp.X_add_number -= 8;
5968 }
5969
5970 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5971 into a relative address of the form:
5972 add rd, pc, #low(label-.-8)"
5973 add rd, rd, #high(label-.-8)" */
5974
5975 static void
5976 do_adrl (void)
5977 {
5978 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5979
5980 /* Frag hacking will turn this into a sub instruction if the offset turns
5981 out to be negative. */
5982 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5983 inst.reloc.pc_rel = 1;
5984 inst.size = INSN_SIZE * 2;
5985 inst.reloc.exp.X_add_number -= 8;
5986 }
5987
5988 static void
5989 do_arit (void)
5990 {
5991 if (!inst.operands[1].present)
5992 inst.operands[1].reg = inst.operands[0].reg;
5993 inst.instruction |= inst.operands[0].reg << 12;
5994 inst.instruction |= inst.operands[1].reg << 16;
5995 encode_arm_shifter_operand (2);
5996 }
5997
5998 static void
5999 do_barrier (void)
6000 {
6001 if (inst.operands[0].present)
6002 {
6003 constraint ((inst.instruction & 0xf0) != 0x40
6004 && inst.operands[0].imm != 0xf,
6005 "bad barrier type");
6006 inst.instruction |= inst.operands[0].imm;
6007 }
6008 else
6009 inst.instruction |= 0xf;
6010 }
6011
6012 static void
6013 do_bfc (void)
6014 {
6015 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6016 constraint (msb > 32, _("bit-field extends past end of register"));
6017 /* The instruction encoding stores the LSB and MSB,
6018 not the LSB and width. */
6019 inst.instruction |= inst.operands[0].reg << 12;
6020 inst.instruction |= inst.operands[1].imm << 7;
6021 inst.instruction |= (msb - 1) << 16;
6022 }
6023
6024 static void
6025 do_bfi (void)
6026 {
6027 unsigned int msb;
6028
6029 /* #0 in second position is alternative syntax for bfc, which is
6030 the same instruction but with REG_PC in the Rm field. */
6031 if (!inst.operands[1].isreg)
6032 inst.operands[1].reg = REG_PC;
6033
6034 msb = inst.operands[2].imm + inst.operands[3].imm;
6035 constraint (msb > 32, _("bit-field extends past end of register"));
6036 /* The instruction encoding stores the LSB and MSB,
6037 not the LSB and width. */
6038 inst.instruction |= inst.operands[0].reg << 12;
6039 inst.instruction |= inst.operands[1].reg;
6040 inst.instruction |= inst.operands[2].imm << 7;
6041 inst.instruction |= (msb - 1) << 16;
6042 }
6043
6044 static void
6045 do_bfx (void)
6046 {
6047 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6048 _("bit-field extends past end of register"));
6049 inst.instruction |= inst.operands[0].reg << 12;
6050 inst.instruction |= inst.operands[1].reg;
6051 inst.instruction |= inst.operands[2].imm << 7;
6052 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6053 }
6054
6055 /* ARM V5 breakpoint instruction (argument parse)
6056 BKPT <16 bit unsigned immediate>
6057 Instruction is not conditional.
6058 The bit pattern given in insns[] has the COND_ALWAYS condition,
6059 and it is an error if the caller tried to override that. */
6060
6061 static void
6062 do_bkpt (void)
6063 {
6064 /* Top 12 of 16 bits to bits 19:8. */
6065 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6066
6067 /* Bottom 4 of 16 bits to bits 3:0. */
6068 inst.instruction |= inst.operands[0].imm & 0xf;
6069 }
6070
6071 static void
6072 encode_branch (int default_reloc)
6073 {
6074 if (inst.operands[0].hasreloc)
6075 {
6076 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6077 _("the only suffix valid here is '(plt)'"));
6078 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6079 }
6080 else
6081 {
6082 inst.reloc.type = default_reloc;
6083 }
6084 inst.reloc.pc_rel = 1;
6085 }
6086
6087 static void
6088 do_branch (void)
6089 {
6090 #ifdef OBJ_ELF
6091 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6092 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6093 else
6094 #endif
6095 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6096 }
6097
6098 static void
6099 do_bl (void)
6100 {
6101 #ifdef OBJ_ELF
6102 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6103 {
6104 if (inst.cond == COND_ALWAYS)
6105 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6106 else
6107 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6108 }
6109 else
6110 #endif
6111 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6112 }
6113
6114 /* ARM V5 branch-link-exchange instruction (argument parse)
6115 BLX <target_addr> ie BLX(1)
6116 BLX{<condition>} <Rm> ie BLX(2)
6117 Unfortunately, there are two different opcodes for this mnemonic.
6118 So, the insns[].value is not used, and the code here zaps values
6119 into inst.instruction.
6120 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6121
6122 static void
6123 do_blx (void)
6124 {
6125 if (inst.operands[0].isreg)
6126 {
6127 /* Arg is a register; the opcode provided by insns[] is correct.
6128 It is not illegal to do "blx pc", just useless. */
6129 if (inst.operands[0].reg == REG_PC)
6130 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6131
6132 inst.instruction |= inst.operands[0].reg;
6133 }
6134 else
6135 {
6136 /* Arg is an address; this instruction cannot be executed
6137 conditionally, and the opcode must be adjusted. */
6138 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6139 inst.instruction = 0xfa000000;
6140 #ifdef OBJ_ELF
6141 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6142 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6143 else
6144 #endif
6145 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6146 }
6147 }
6148
6149 static void
6150 do_bx (void)
6151 {
6152 if (inst.operands[0].reg == REG_PC)
6153 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6154
6155 inst.instruction |= inst.operands[0].reg;
6156 }
6157
6158
6159 /* ARM v5TEJ. Jump to Jazelle code. */
6160
6161 static void
6162 do_bxj (void)
6163 {
6164 if (inst.operands[0].reg == REG_PC)
6165 as_tsktsk (_("use of r15 in bxj is not really useful"));
6166
6167 inst.instruction |= inst.operands[0].reg;
6168 }
6169
6170 /* Co-processor data operation:
6171 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6172 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6173 static void
6174 do_cdp (void)
6175 {
6176 inst.instruction |= inst.operands[0].reg << 8;
6177 inst.instruction |= inst.operands[1].imm << 20;
6178 inst.instruction |= inst.operands[2].reg << 12;
6179 inst.instruction |= inst.operands[3].reg << 16;
6180 inst.instruction |= inst.operands[4].reg;
6181 inst.instruction |= inst.operands[5].imm << 5;
6182 }
6183
6184 static void
6185 do_cmp (void)
6186 {
6187 inst.instruction |= inst.operands[0].reg << 16;
6188 encode_arm_shifter_operand (1);
6189 }
6190
6191 /* Transfer between coprocessor and ARM registers.
6192 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6193 MRC2
6194 MCR{cond}
6195 MCR2
6196
6197 No special properties. */
6198
6199 static void
6200 do_co_reg (void)
6201 {
6202 inst.instruction |= inst.operands[0].reg << 8;
6203 inst.instruction |= inst.operands[1].imm << 21;
6204 inst.instruction |= inst.operands[2].reg << 12;
6205 inst.instruction |= inst.operands[3].reg << 16;
6206 inst.instruction |= inst.operands[4].reg;
6207 inst.instruction |= inst.operands[5].imm << 5;
6208 }
6209
6210 /* Transfer between coprocessor register and pair of ARM registers.
6211 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6212 MCRR2
6213 MRRC{cond}
6214 MRRC2
6215
6216 Two XScale instructions are special cases of these:
6217
6218 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6219 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6220
6221 Result unpredicatable if Rd or Rn is R15. */
6222
6223 static void
6224 do_co_reg2c (void)
6225 {
6226 inst.instruction |= inst.operands[0].reg << 8;
6227 inst.instruction |= inst.operands[1].imm << 4;
6228 inst.instruction |= inst.operands[2].reg << 12;
6229 inst.instruction |= inst.operands[3].reg << 16;
6230 inst.instruction |= inst.operands[4].reg;
6231 }
6232
6233 static void
6234 do_cpsi (void)
6235 {
6236 inst.instruction |= inst.operands[0].imm << 6;
6237 inst.instruction |= inst.operands[1].imm;
6238 }
6239
6240 static void
6241 do_dbg (void)
6242 {
6243 inst.instruction |= inst.operands[0].imm;
6244 }
6245
6246 static void
6247 do_it (void)
6248 {
6249 /* There is no IT instruction in ARM mode. We
6250 process it but do not generate code for it. */
6251 inst.size = 0;
6252 }
6253
6254 static void
6255 do_ldmstm (void)
6256 {
6257 int base_reg = inst.operands[0].reg;
6258 int range = inst.operands[1].imm;
6259
6260 inst.instruction |= base_reg << 16;
6261 inst.instruction |= range;
6262
6263 if (inst.operands[1].writeback)
6264 inst.instruction |= LDM_TYPE_2_OR_3;
6265
6266 if (inst.operands[0].writeback)
6267 {
6268 inst.instruction |= WRITE_BACK;
6269 /* Check for unpredictable uses of writeback. */
6270 if (inst.instruction & LOAD_BIT)
6271 {
6272 /* Not allowed in LDM type 2. */
6273 if ((inst.instruction & LDM_TYPE_2_OR_3)
6274 && ((range & (1 << REG_PC)) == 0))
6275 as_warn (_("writeback of base register is UNPREDICTABLE"));
6276 /* Only allowed if base reg not in list for other types. */
6277 else if (range & (1 << base_reg))
6278 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6279 }
6280 else /* STM. */
6281 {
6282 /* Not allowed for type 2. */
6283 if (inst.instruction & LDM_TYPE_2_OR_3)
6284 as_warn (_("writeback of base register is UNPREDICTABLE"));
6285 /* Only allowed if base reg not in list, or first in list. */
6286 else if ((range & (1 << base_reg))
6287 && (range & ((1 << base_reg) - 1)))
6288 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6289 }
6290 }
6291 }
6292
6293 /* ARMv5TE load-consecutive (argument parse)
6294 Mode is like LDRH.
6295
6296 LDRccD R, mode
6297 STRccD R, mode. */
6298
6299 static void
6300 do_ldrd (void)
6301 {
6302 constraint (inst.operands[0].reg % 2 != 0,
6303 _("first destination register must be even"));
6304 constraint (inst.operands[1].present
6305 && inst.operands[1].reg != inst.operands[0].reg + 1,
6306 _("can only load two consecutive registers"));
6307 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6308 constraint (!inst.operands[2].isreg, _("'[' expected"));
6309
6310 if (!inst.operands[1].present)
6311 inst.operands[1].reg = inst.operands[0].reg + 1;
6312
6313 if (inst.instruction & LOAD_BIT)
6314 {
6315 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6316 register and the first register written; we have to diagnose
6317 overlap between the base and the second register written here. */
6318
6319 if (inst.operands[2].reg == inst.operands[1].reg
6320 && (inst.operands[2].writeback || inst.operands[2].postind))
6321 as_warn (_("base register written back, and overlaps "
6322 "second destination register"));
6323
6324 /* For an index-register load, the index register must not overlap the
6325 destination (even if not write-back). */
6326 else if (inst.operands[2].immisreg
6327 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6328 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6329 as_warn (_("index register overlaps destination register"));
6330 }
6331
6332 inst.instruction |= inst.operands[0].reg << 12;
6333 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6334 }
6335
6336 static void
6337 do_ldrex (void)
6338 {
6339 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6340 || inst.operands[1].postind || inst.operands[1].writeback
6341 || inst.operands[1].immisreg || inst.operands[1].shifted
6342 || inst.operands[1].negative
6343 /* This can arise if the programmer has written
6344 strex rN, rM, foo
6345 or if they have mistakenly used a register name as the last
6346 operand, eg:
6347 strex rN, rM, rX
6348 It is very difficult to distinguish between these two cases
6349 because "rX" might actually be a label. ie the register
6350 name has been occluded by a symbol of the same name. So we
6351 just generate a general 'bad addressing mode' type error
6352 message and leave it up to the programmer to discover the
6353 true cause and fix their mistake. */
6354 || (inst.operands[1].reg == REG_PC),
6355 BAD_ADDR_MODE);
6356
6357 constraint (inst.reloc.exp.X_op != O_constant
6358 || inst.reloc.exp.X_add_number != 0,
6359 _("offset must be zero in ARM encoding"));
6360
6361 inst.instruction |= inst.operands[0].reg << 12;
6362 inst.instruction |= inst.operands[1].reg << 16;
6363 inst.reloc.type = BFD_RELOC_UNUSED;
6364 }
6365
6366 static void
6367 do_ldrexd (void)
6368 {
6369 constraint (inst.operands[0].reg % 2 != 0,
6370 _("even register required"));
6371 constraint (inst.operands[1].present
6372 && inst.operands[1].reg != inst.operands[0].reg + 1,
6373 _("can only load two consecutive registers"));
6374 /* If op 1 were present and equal to PC, this function wouldn't
6375 have been called in the first place. */
6376 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6377
6378 inst.instruction |= inst.operands[0].reg << 12;
6379 inst.instruction |= inst.operands[2].reg << 16;
6380 }
6381
6382 static void
6383 do_ldst (void)
6384 {
6385 inst.instruction |= inst.operands[0].reg << 12;
6386 if (!inst.operands[1].isreg)
6387 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6388 return;
6389 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6390 }
6391
6392 static void
6393 do_ldstt (void)
6394 {
6395 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6396 reject [Rn,...]. */
6397 if (inst.operands[1].preind)
6398 {
6399 constraint (inst.reloc.exp.X_op != O_constant ||
6400 inst.reloc.exp.X_add_number != 0,
6401 _("this instruction requires a post-indexed address"));
6402
6403 inst.operands[1].preind = 0;
6404 inst.operands[1].postind = 1;
6405 inst.operands[1].writeback = 1;
6406 }
6407 inst.instruction |= inst.operands[0].reg << 12;
6408 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6409 }
6410
6411 /* Halfword and signed-byte load/store operations. */
6412
6413 static void
6414 do_ldstv4 (void)
6415 {
6416 inst.instruction |= inst.operands[0].reg << 12;
6417 if (!inst.operands[1].isreg)
6418 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6419 return;
6420 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6421 }
6422
6423 static void
6424 do_ldsttv4 (void)
6425 {
6426 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6427 reject [Rn,...]. */
6428 if (inst.operands[1].preind)
6429 {
6430 constraint (inst.reloc.exp.X_op != O_constant ||
6431 inst.reloc.exp.X_add_number != 0,
6432 _("this instruction requires a post-indexed address"));
6433
6434 inst.operands[1].preind = 0;
6435 inst.operands[1].postind = 1;
6436 inst.operands[1].writeback = 1;
6437 }
6438 inst.instruction |= inst.operands[0].reg << 12;
6439 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6440 }
6441
6442 /* Co-processor register load/store.
6443 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6444 static void
6445 do_lstc (void)
6446 {
6447 inst.instruction |= inst.operands[0].reg << 8;
6448 inst.instruction |= inst.operands[1].reg << 12;
6449 encode_arm_cp_address (2, TRUE, TRUE, 0);
6450 }
6451
6452 static void
6453 do_mlas (void)
6454 {
6455 /* This restriction does not apply to mls (nor to mla in v6, but
6456 that's hard to detect at present). */
6457 if (inst.operands[0].reg == inst.operands[1].reg
6458 && !(inst.instruction & 0x00400000))
6459 as_tsktsk (_("rd and rm should be different in mla"));
6460
6461 inst.instruction |= inst.operands[0].reg << 16;
6462 inst.instruction |= inst.operands[1].reg;
6463 inst.instruction |= inst.operands[2].reg << 8;
6464 inst.instruction |= inst.operands[3].reg << 12;
6465
6466 }
6467
6468 static void
6469 do_mov (void)
6470 {
6471 inst.instruction |= inst.operands[0].reg << 12;
6472 encode_arm_shifter_operand (1);
6473 }
6474
6475 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6476 static void
6477 do_mov16 (void)
6478 {
6479 bfd_vma imm;
6480 bfd_boolean top;
6481
6482 top = (inst.instruction & 0x00400000) != 0;
6483 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
6484 _(":lower16: not allowed this instruction"));
6485 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
6486 _(":upper16: not allowed instruction"));
6487 inst.instruction |= inst.operands[0].reg << 12;
6488 if (inst.reloc.type == BFD_RELOC_UNUSED)
6489 {
6490 imm = inst.reloc.exp.X_add_number;
6491 /* The value is in two pieces: 0:11, 16:19. */
6492 inst.instruction |= (imm & 0x00000fff);
6493 inst.instruction |= (imm & 0x0000f000) << 4;
6494 }
6495 }
6496
6497 static void
6498 do_mrs (void)
6499 {
6500 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6501 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6502 != (PSR_c|PSR_f),
6503 _("'CPSR' or 'SPSR' expected"));
6504 inst.instruction |= inst.operands[0].reg << 12;
6505 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6506 }
6507
6508 /* Two possible forms:
6509 "{C|S}PSR_<field>, Rm",
6510 "{C|S}PSR_f, #expression". */
6511
6512 static void
6513 do_msr (void)
6514 {
6515 inst.instruction |= inst.operands[0].imm;
6516 if (inst.operands[1].isreg)
6517 inst.instruction |= inst.operands[1].reg;
6518 else
6519 {
6520 inst.instruction |= INST_IMMEDIATE;
6521 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6522 inst.reloc.pc_rel = 0;
6523 }
6524 }
6525
6526 static void
6527 do_mul (void)
6528 {
6529 if (!inst.operands[2].present)
6530 inst.operands[2].reg = inst.operands[0].reg;
6531 inst.instruction |= inst.operands[0].reg << 16;
6532 inst.instruction |= inst.operands[1].reg;
6533 inst.instruction |= inst.operands[2].reg << 8;
6534
6535 if (inst.operands[0].reg == inst.operands[1].reg)
6536 as_tsktsk (_("rd and rm should be different in mul"));
6537 }
6538
6539 /* Long Multiply Parser
6540 UMULL RdLo, RdHi, Rm, Rs
6541 SMULL RdLo, RdHi, Rm, Rs
6542 UMLAL RdLo, RdHi, Rm, Rs
6543 SMLAL RdLo, RdHi, Rm, Rs. */
6544
6545 static void
6546 do_mull (void)
6547 {
6548 inst.instruction |= inst.operands[0].reg << 12;
6549 inst.instruction |= inst.operands[1].reg << 16;
6550 inst.instruction |= inst.operands[2].reg;
6551 inst.instruction |= inst.operands[3].reg << 8;
6552
6553 /* rdhi, rdlo and rm must all be different. */
6554 if (inst.operands[0].reg == inst.operands[1].reg
6555 || inst.operands[0].reg == inst.operands[2].reg
6556 || inst.operands[1].reg == inst.operands[2].reg)
6557 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6558 }
6559
6560 static void
6561 do_nop (void)
6562 {
6563 if (inst.operands[0].present)
6564 {
6565 /* Architectural NOP hints are CPSR sets with no bits selected. */
6566 inst.instruction &= 0xf0000000;
6567 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6568 }
6569 }
6570
6571 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6572 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6573 Condition defaults to COND_ALWAYS.
6574 Error if Rd, Rn or Rm are R15. */
6575
6576 static void
6577 do_pkhbt (void)
6578 {
6579 inst.instruction |= inst.operands[0].reg << 12;
6580 inst.instruction |= inst.operands[1].reg << 16;
6581 inst.instruction |= inst.operands[2].reg;
6582 if (inst.operands[3].present)
6583 encode_arm_shift (3);
6584 }
6585
6586 /* ARM V6 PKHTB (Argument Parse). */
6587
6588 static void
6589 do_pkhtb (void)
6590 {
6591 if (!inst.operands[3].present)
6592 {
6593 /* If the shift specifier is omitted, turn the instruction
6594 into pkhbt rd, rm, rn. */
6595 inst.instruction &= 0xfff00010;
6596 inst.instruction |= inst.operands[0].reg << 12;
6597 inst.instruction |= inst.operands[1].reg;
6598 inst.instruction |= inst.operands[2].reg << 16;
6599 }
6600 else
6601 {
6602 inst.instruction |= inst.operands[0].reg << 12;
6603 inst.instruction |= inst.operands[1].reg << 16;
6604 inst.instruction |= inst.operands[2].reg;
6605 encode_arm_shift (3);
6606 }
6607 }
6608
6609 /* ARMv5TE: Preload-Cache
6610
6611 PLD <addr_mode>
6612
6613 Syntactically, like LDR with B=1, W=0, L=1. */
6614
6615 static void
6616 do_pld (void)
6617 {
6618 constraint (!inst.operands[0].isreg,
6619 _("'[' expected after PLD mnemonic"));
6620 constraint (inst.operands[0].postind,
6621 _("post-indexed expression used in preload instruction"));
6622 constraint (inst.operands[0].writeback,
6623 _("writeback used in preload instruction"));
6624 constraint (!inst.operands[0].preind,
6625 _("unindexed addressing used in preload instruction"));
6626 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6627 }
6628
6629 /* ARMv7: PLI <addr_mode> */
6630 static void
6631 do_pli (void)
6632 {
6633 constraint (!inst.operands[0].isreg,
6634 _("'[' expected after PLI mnemonic"));
6635 constraint (inst.operands[0].postind,
6636 _("post-indexed expression used in preload instruction"));
6637 constraint (inst.operands[0].writeback,
6638 _("writeback used in preload instruction"));
6639 constraint (!inst.operands[0].preind,
6640 _("unindexed addressing used in preload instruction"));
6641 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6642 inst.instruction &= ~PRE_INDEX;
6643 }
6644
6645 static void
6646 do_push_pop (void)
6647 {
6648 inst.operands[1] = inst.operands[0];
6649 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6650 inst.operands[0].isreg = 1;
6651 inst.operands[0].writeback = 1;
6652 inst.operands[0].reg = REG_SP;
6653 do_ldmstm ();
6654 }
6655
6656 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6657 word at the specified address and the following word
6658 respectively.
6659 Unconditionally executed.
6660 Error if Rn is R15. */
6661
6662 static void
6663 do_rfe (void)
6664 {
6665 inst.instruction |= inst.operands[0].reg << 16;
6666 if (inst.operands[0].writeback)
6667 inst.instruction |= WRITE_BACK;
6668 }
6669
6670 /* ARM V6 ssat (argument parse). */
6671
6672 static void
6673 do_ssat (void)
6674 {
6675 inst.instruction |= inst.operands[0].reg << 12;
6676 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6677 inst.instruction |= inst.operands[2].reg;
6678
6679 if (inst.operands[3].present)
6680 encode_arm_shift (3);
6681 }
6682
6683 /* ARM V6 usat (argument parse). */
6684
6685 static void
6686 do_usat (void)
6687 {
6688 inst.instruction |= inst.operands[0].reg << 12;
6689 inst.instruction |= inst.operands[1].imm << 16;
6690 inst.instruction |= inst.operands[2].reg;
6691
6692 if (inst.operands[3].present)
6693 encode_arm_shift (3);
6694 }
6695
6696 /* ARM V6 ssat16 (argument parse). */
6697
6698 static void
6699 do_ssat16 (void)
6700 {
6701 inst.instruction |= inst.operands[0].reg << 12;
6702 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6703 inst.instruction |= inst.operands[2].reg;
6704 }
6705
6706 static void
6707 do_usat16 (void)
6708 {
6709 inst.instruction |= inst.operands[0].reg << 12;
6710 inst.instruction |= inst.operands[1].imm << 16;
6711 inst.instruction |= inst.operands[2].reg;
6712 }
6713
6714 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6715 preserving the other bits.
6716
6717 setend <endian_specifier>, where <endian_specifier> is either
6718 BE or LE. */
6719
6720 static void
6721 do_setend (void)
6722 {
6723 if (inst.operands[0].imm)
6724 inst.instruction |= 0x200;
6725 }
6726
6727 static void
6728 do_shift (void)
6729 {
6730 unsigned int Rm = (inst.operands[1].present
6731 ? inst.operands[1].reg
6732 : inst.operands[0].reg);
6733
6734 inst.instruction |= inst.operands[0].reg << 12;
6735 inst.instruction |= Rm;
6736 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6737 {
6738 inst.instruction |= inst.operands[2].reg << 8;
6739 inst.instruction |= SHIFT_BY_REG;
6740 }
6741 else
6742 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6743 }
6744
6745 static void
6746 do_smc (void)
6747 {
6748 inst.reloc.type = BFD_RELOC_ARM_SMC;
6749 inst.reloc.pc_rel = 0;
6750 }
6751
6752 static void
6753 do_swi (void)
6754 {
6755 inst.reloc.type = BFD_RELOC_ARM_SWI;
6756 inst.reloc.pc_rel = 0;
6757 }
6758
6759 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6760 SMLAxy{cond} Rd,Rm,Rs,Rn
6761 SMLAWy{cond} Rd,Rm,Rs,Rn
6762 Error if any register is R15. */
6763
6764 static void
6765 do_smla (void)
6766 {
6767 inst.instruction |= inst.operands[0].reg << 16;
6768 inst.instruction |= inst.operands[1].reg;
6769 inst.instruction |= inst.operands[2].reg << 8;
6770 inst.instruction |= inst.operands[3].reg << 12;
6771 }
6772
6773 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6774 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6775 Error if any register is R15.
6776 Warning if Rdlo == Rdhi. */
6777
6778 static void
6779 do_smlal (void)
6780 {
6781 inst.instruction |= inst.operands[0].reg << 12;
6782 inst.instruction |= inst.operands[1].reg << 16;
6783 inst.instruction |= inst.operands[2].reg;
6784 inst.instruction |= inst.operands[3].reg << 8;
6785
6786 if (inst.operands[0].reg == inst.operands[1].reg)
6787 as_tsktsk (_("rdhi and rdlo must be different"));
6788 }
6789
6790 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6791 SMULxy{cond} Rd,Rm,Rs
6792 Error if any register is R15. */
6793
6794 static void
6795 do_smul (void)
6796 {
6797 inst.instruction |= inst.operands[0].reg << 16;
6798 inst.instruction |= inst.operands[1].reg;
6799 inst.instruction |= inst.operands[2].reg << 8;
6800 }
6801
6802 /* ARM V6 srs (argument parse). */
6803
6804 static void
6805 do_srs (void)
6806 {
6807 inst.instruction |= inst.operands[0].imm;
6808 if (inst.operands[0].writeback)
6809 inst.instruction |= WRITE_BACK;
6810 }
6811
6812 /* ARM V6 strex (argument parse). */
6813
6814 static void
6815 do_strex (void)
6816 {
6817 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6818 || inst.operands[2].postind || inst.operands[2].writeback
6819 || inst.operands[2].immisreg || inst.operands[2].shifted
6820 || inst.operands[2].negative
6821 /* See comment in do_ldrex(). */
6822 || (inst.operands[2].reg == REG_PC),
6823 BAD_ADDR_MODE);
6824
6825 constraint (inst.operands[0].reg == inst.operands[1].reg
6826 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6827
6828 constraint (inst.reloc.exp.X_op != O_constant
6829 || inst.reloc.exp.X_add_number != 0,
6830 _("offset must be zero in ARM encoding"));
6831
6832 inst.instruction |= inst.operands[0].reg << 12;
6833 inst.instruction |= inst.operands[1].reg;
6834 inst.instruction |= inst.operands[2].reg << 16;
6835 inst.reloc.type = BFD_RELOC_UNUSED;
6836 }
6837
6838 static void
6839 do_strexd (void)
6840 {
6841 constraint (inst.operands[1].reg % 2 != 0,
6842 _("even register required"));
6843 constraint (inst.operands[2].present
6844 && inst.operands[2].reg != inst.operands[1].reg + 1,
6845 _("can only store two consecutive registers"));
6846 /* If op 2 were present and equal to PC, this function wouldn't
6847 have been called in the first place. */
6848 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6849
6850 constraint (inst.operands[0].reg == inst.operands[1].reg
6851 || inst.operands[0].reg == inst.operands[1].reg + 1
6852 || inst.operands[0].reg == inst.operands[3].reg,
6853 BAD_OVERLAP);
6854
6855 inst.instruction |= inst.operands[0].reg << 12;
6856 inst.instruction |= inst.operands[1].reg;
6857 inst.instruction |= inst.operands[3].reg << 16;
6858 }
6859
6860 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6861 extends it to 32-bits, and adds the result to a value in another
6862 register. You can specify a rotation by 0, 8, 16, or 24 bits
6863 before extracting the 16-bit value.
6864 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6865 Condition defaults to COND_ALWAYS.
6866 Error if any register uses R15. */
6867
6868 static void
6869 do_sxtah (void)
6870 {
6871 inst.instruction |= inst.operands[0].reg << 12;
6872 inst.instruction |= inst.operands[1].reg << 16;
6873 inst.instruction |= inst.operands[2].reg;
6874 inst.instruction |= inst.operands[3].imm << 10;
6875 }
6876
6877 /* ARM V6 SXTH.
6878
6879 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6880 Condition defaults to COND_ALWAYS.
6881 Error if any register uses R15. */
6882
6883 static void
6884 do_sxth (void)
6885 {
6886 inst.instruction |= inst.operands[0].reg << 12;
6887 inst.instruction |= inst.operands[1].reg;
6888 inst.instruction |= inst.operands[2].imm << 10;
6889 }
6890 \f
6891 /* VFP instructions. In a logical order: SP variant first, monad
6892 before dyad, arithmetic then move then load/store. */
6893
6894 static void
6895 do_vfp_sp_monadic (void)
6896 {
6897 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6898 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6899 }
6900
6901 static void
6902 do_vfp_sp_dyadic (void)
6903 {
6904 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6905 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6906 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6907 }
6908
6909 static void
6910 do_vfp_sp_compare_z (void)
6911 {
6912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6913 }
6914
6915 static void
6916 do_vfp_dp_sp_cvt (void)
6917 {
6918 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6919 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6920 }
6921
6922 static void
6923 do_vfp_sp_dp_cvt (void)
6924 {
6925 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6926 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6927 }
6928
6929 static void
6930 do_vfp_reg_from_sp (void)
6931 {
6932 inst.instruction |= inst.operands[0].reg << 12;
6933 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6934 }
6935
6936 static void
6937 do_vfp_reg2_from_sp2 (void)
6938 {
6939 constraint (inst.operands[2].imm != 2,
6940 _("only two consecutive VFP SP registers allowed here"));
6941 inst.instruction |= inst.operands[0].reg << 12;
6942 inst.instruction |= inst.operands[1].reg << 16;
6943 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6944 }
6945
6946 static void
6947 do_vfp_sp_from_reg (void)
6948 {
6949 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6950 inst.instruction |= inst.operands[1].reg << 12;
6951 }
6952
6953 static void
6954 do_vfp_sp2_from_reg2 (void)
6955 {
6956 constraint (inst.operands[0].imm != 2,
6957 _("only two consecutive VFP SP registers allowed here"));
6958 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6959 inst.instruction |= inst.operands[1].reg << 12;
6960 inst.instruction |= inst.operands[2].reg << 16;
6961 }
6962
6963 static void
6964 do_vfp_sp_ldst (void)
6965 {
6966 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6967 encode_arm_cp_address (1, FALSE, TRUE, 0);
6968 }
6969
6970 static void
6971 do_vfp_dp_ldst (void)
6972 {
6973 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6974 encode_arm_cp_address (1, FALSE, TRUE, 0);
6975 }
6976
6977
6978 static void
6979 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6980 {
6981 if (inst.operands[0].writeback)
6982 inst.instruction |= WRITE_BACK;
6983 else
6984 constraint (ldstm_type != VFP_LDSTMIA,
6985 _("this addressing mode requires base-register writeback"));
6986 inst.instruction |= inst.operands[0].reg << 16;
6987 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6988 inst.instruction |= inst.operands[1].imm;
6989 }
6990
6991 static void
6992 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6993 {
6994 int count;
6995
6996 if (inst.operands[0].writeback)
6997 inst.instruction |= WRITE_BACK;
6998 else
6999 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7000 _("this addressing mode requires base-register writeback"));
7001
7002 inst.instruction |= inst.operands[0].reg << 16;
7003 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7004
7005 count = inst.operands[1].imm << 1;
7006 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7007 count += 1;
7008
7009 inst.instruction |= count;
7010 }
7011
7012 static void
7013 do_vfp_sp_ldstmia (void)
7014 {
7015 vfp_sp_ldstm (VFP_LDSTMIA);
7016 }
7017
7018 static void
7019 do_vfp_sp_ldstmdb (void)
7020 {
7021 vfp_sp_ldstm (VFP_LDSTMDB);
7022 }
7023
7024 static void
7025 do_vfp_dp_ldstmia (void)
7026 {
7027 vfp_dp_ldstm (VFP_LDSTMIA);
7028 }
7029
7030 static void
7031 do_vfp_dp_ldstmdb (void)
7032 {
7033 vfp_dp_ldstm (VFP_LDSTMDB);
7034 }
7035
7036 static void
7037 do_vfp_xp_ldstmia (void)
7038 {
7039 vfp_dp_ldstm (VFP_LDSTMIAX);
7040 }
7041
7042 static void
7043 do_vfp_xp_ldstmdb (void)
7044 {
7045 vfp_dp_ldstm (VFP_LDSTMDBX);
7046 }
7047
7048 static void
7049 do_vfp_dp_rd_rm (void)
7050 {
7051 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7052 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7053 }
7054
7055 static void
7056 do_vfp_dp_rn_rd (void)
7057 {
7058 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7059 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7060 }
7061
7062 static void
7063 do_vfp_dp_rd_rn (void)
7064 {
7065 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7066 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7067 }
7068
7069 static void
7070 do_vfp_dp_rd_rn_rm (void)
7071 {
7072 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7073 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7074 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7075 }
7076
7077 static void
7078 do_vfp_dp_rd (void)
7079 {
7080 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7081 }
7082
7083 static void
7084 do_vfp_dp_rm_rd_rn (void)
7085 {
7086 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7087 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7088 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7089 }
7090
7091 /* VFPv3 instructions. */
7092 static void
7093 do_vfp_sp_const (void)
7094 {
7095 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7096 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7097 inst.instruction |= (inst.operands[1].imm >> 4);
7098 }
7099
7100 static void
7101 do_vfp_dp_const (void)
7102 {
7103 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7104 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7105 inst.instruction |= (inst.operands[1].imm >> 4);
7106 }
7107
7108 static void
7109 vfp_conv (int srcsize)
7110 {
7111 unsigned immbits = srcsize - inst.operands[1].imm;
7112 inst.instruction |= (immbits & 1) << 5;
7113 inst.instruction |= (immbits >> 1);
7114 }
7115
7116 static void
7117 do_vfp_sp_conv_16 (void)
7118 {
7119 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7120 vfp_conv (16);
7121 }
7122
7123 static void
7124 do_vfp_dp_conv_16 (void)
7125 {
7126 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7127 vfp_conv (16);
7128 }
7129
7130 static void
7131 do_vfp_sp_conv_32 (void)
7132 {
7133 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7134 vfp_conv (32);
7135 }
7136
7137 static void
7138 do_vfp_dp_conv_32 (void)
7139 {
7140 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7141 vfp_conv (32);
7142 }
7143
7144 \f
7145 /* FPA instructions. Also in a logical order. */
7146
7147 static void
7148 do_fpa_cmp (void)
7149 {
7150 inst.instruction |= inst.operands[0].reg << 16;
7151 inst.instruction |= inst.operands[1].reg;
7152 }
7153
7154 static void
7155 do_fpa_ldmstm (void)
7156 {
7157 inst.instruction |= inst.operands[0].reg << 12;
7158 switch (inst.operands[1].imm)
7159 {
7160 case 1: inst.instruction |= CP_T_X; break;
7161 case 2: inst.instruction |= CP_T_Y; break;
7162 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7163 case 4: break;
7164 default: abort ();
7165 }
7166
7167 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7168 {
7169 /* The instruction specified "ea" or "fd", so we can only accept
7170 [Rn]{!}. The instruction does not really support stacking or
7171 unstacking, so we have to emulate these by setting appropriate
7172 bits and offsets. */
7173 constraint (inst.reloc.exp.X_op != O_constant
7174 || inst.reloc.exp.X_add_number != 0,
7175 _("this instruction does not support indexing"));
7176
7177 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7178 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7179
7180 if (!(inst.instruction & INDEX_UP))
7181 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7182
7183 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7184 {
7185 inst.operands[2].preind = 0;
7186 inst.operands[2].postind = 1;
7187 }
7188 }
7189
7190 encode_arm_cp_address (2, TRUE, TRUE, 0);
7191 }
7192 \f
7193 /* iWMMXt instructions: strictly in alphabetical order. */
7194
7195 static void
7196 do_iwmmxt_tandorc (void)
7197 {
7198 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7199 }
7200
7201 static void
7202 do_iwmmxt_textrc (void)
7203 {
7204 inst.instruction |= inst.operands[0].reg << 12;
7205 inst.instruction |= inst.operands[1].imm;
7206 }
7207
7208 static void
7209 do_iwmmxt_textrm (void)
7210 {
7211 inst.instruction |= inst.operands[0].reg << 12;
7212 inst.instruction |= inst.operands[1].reg << 16;
7213 inst.instruction |= inst.operands[2].imm;
7214 }
7215
7216 static void
7217 do_iwmmxt_tinsr (void)
7218 {
7219 inst.instruction |= inst.operands[0].reg << 16;
7220 inst.instruction |= inst.operands[1].reg << 12;
7221 inst.instruction |= inst.operands[2].imm;
7222 }
7223
7224 static void
7225 do_iwmmxt_tmia (void)
7226 {
7227 inst.instruction |= inst.operands[0].reg << 5;
7228 inst.instruction |= inst.operands[1].reg;
7229 inst.instruction |= inst.operands[2].reg << 12;
7230 }
7231
7232 static void
7233 do_iwmmxt_waligni (void)
7234 {
7235 inst.instruction |= inst.operands[0].reg << 12;
7236 inst.instruction |= inst.operands[1].reg << 16;
7237 inst.instruction |= inst.operands[2].reg;
7238 inst.instruction |= inst.operands[3].imm << 20;
7239 }
7240
7241 static void
7242 do_iwmmxt_wmov (void)
7243 {
7244 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7245 inst.instruction |= inst.operands[0].reg << 12;
7246 inst.instruction |= inst.operands[1].reg << 16;
7247 inst.instruction |= inst.operands[1].reg;
7248 }
7249
7250 static void
7251 do_iwmmxt_wldstbh (void)
7252 {
7253 int reloc;
7254 inst.instruction |= inst.operands[0].reg << 12;
7255 if (thumb_mode)
7256 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7257 else
7258 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7259 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7260 }
7261
7262 static void
7263 do_iwmmxt_wldstw (void)
7264 {
7265 /* RIWR_RIWC clears .isreg for a control register. */
7266 if (!inst.operands[0].isreg)
7267 {
7268 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7269 inst.instruction |= 0xf0000000;
7270 }
7271
7272 inst.instruction |= inst.operands[0].reg << 12;
7273 encode_arm_cp_address (1, TRUE, TRUE, 0);
7274 }
7275
7276 static void
7277 do_iwmmxt_wldstd (void)
7278 {
7279 inst.instruction |= inst.operands[0].reg << 12;
7280 encode_arm_cp_address (1, TRUE, FALSE, 0);
7281 }
7282
7283 static void
7284 do_iwmmxt_wshufh (void)
7285 {
7286 inst.instruction |= inst.operands[0].reg << 12;
7287 inst.instruction |= inst.operands[1].reg << 16;
7288 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7289 inst.instruction |= (inst.operands[2].imm & 0x0f);
7290 }
7291
7292 static void
7293 do_iwmmxt_wzero (void)
7294 {
7295 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7296 inst.instruction |= inst.operands[0].reg;
7297 inst.instruction |= inst.operands[0].reg << 12;
7298 inst.instruction |= inst.operands[0].reg << 16;
7299 }
7300 \f
7301 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7302 operations first, then control, shift, and load/store. */
7303
7304 /* Insns like "foo X,Y,Z". */
7305
7306 static void
7307 do_mav_triple (void)
7308 {
7309 inst.instruction |= inst.operands[0].reg << 16;
7310 inst.instruction |= inst.operands[1].reg;
7311 inst.instruction |= inst.operands[2].reg << 12;
7312 }
7313
7314 /* Insns like "foo W,X,Y,Z".
7315 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7316
7317 static void
7318 do_mav_quad (void)
7319 {
7320 inst.instruction |= inst.operands[0].reg << 5;
7321 inst.instruction |= inst.operands[1].reg << 12;
7322 inst.instruction |= inst.operands[2].reg << 16;
7323 inst.instruction |= inst.operands[3].reg;
7324 }
7325
7326 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7327 static void
7328 do_mav_dspsc (void)
7329 {
7330 inst.instruction |= inst.operands[1].reg << 12;
7331 }
7332
7333 /* Maverick shift immediate instructions.
7334 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7335 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7336
7337 static void
7338 do_mav_shift (void)
7339 {
7340 int imm = inst.operands[2].imm;
7341
7342 inst.instruction |= inst.operands[0].reg << 12;
7343 inst.instruction |= inst.operands[1].reg << 16;
7344
7345 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7346 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7347 Bit 4 should be 0. */
7348 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7349
7350 inst.instruction |= imm;
7351 }
7352 \f
7353 /* XScale instructions. Also sorted arithmetic before move. */
7354
7355 /* Xscale multiply-accumulate (argument parse)
7356 MIAcc acc0,Rm,Rs
7357 MIAPHcc acc0,Rm,Rs
7358 MIAxycc acc0,Rm,Rs. */
7359
7360 static void
7361 do_xsc_mia (void)
7362 {
7363 inst.instruction |= inst.operands[1].reg;
7364 inst.instruction |= inst.operands[2].reg << 12;
7365 }
7366
7367 /* Xscale move-accumulator-register (argument parse)
7368
7369 MARcc acc0,RdLo,RdHi. */
7370
7371 static void
7372 do_xsc_mar (void)
7373 {
7374 inst.instruction |= inst.operands[1].reg << 12;
7375 inst.instruction |= inst.operands[2].reg << 16;
7376 }
7377
7378 /* Xscale move-register-accumulator (argument parse)
7379
7380 MRAcc RdLo,RdHi,acc0. */
7381
7382 static void
7383 do_xsc_mra (void)
7384 {
7385 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7386 inst.instruction |= inst.operands[0].reg << 12;
7387 inst.instruction |= inst.operands[1].reg << 16;
7388 }
7389 \f
7390 /* Encoding functions relevant only to Thumb. */
7391
7392 /* inst.operands[i] is a shifted-register operand; encode
7393 it into inst.instruction in the format used by Thumb32. */
7394
7395 static void
7396 encode_thumb32_shifted_operand (int i)
7397 {
7398 unsigned int value = inst.reloc.exp.X_add_number;
7399 unsigned int shift = inst.operands[i].shift_kind;
7400
7401 constraint (inst.operands[i].immisreg,
7402 _("shift by register not allowed in thumb mode"));
7403 inst.instruction |= inst.operands[i].reg;
7404 if (shift == SHIFT_RRX)
7405 inst.instruction |= SHIFT_ROR << 4;
7406 else
7407 {
7408 constraint (inst.reloc.exp.X_op != O_constant,
7409 _("expression too complex"));
7410
7411 constraint (value > 32
7412 || (value == 32 && (shift == SHIFT_LSL
7413 || shift == SHIFT_ROR)),
7414 _("shift expression is too large"));
7415
7416 if (value == 0)
7417 shift = SHIFT_LSL;
7418 else if (value == 32)
7419 value = 0;
7420
7421 inst.instruction |= shift << 4;
7422 inst.instruction |= (value & 0x1c) << 10;
7423 inst.instruction |= (value & 0x03) << 6;
7424 }
7425 }
7426
7427
7428 /* inst.operands[i] was set up by parse_address. Encode it into a
7429 Thumb32 format load or store instruction. Reject forms that cannot
7430 be used with such instructions. If is_t is true, reject forms that
7431 cannot be used with a T instruction; if is_d is true, reject forms
7432 that cannot be used with a D instruction. */
7433
7434 static void
7435 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7436 {
7437 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7438
7439 constraint (!inst.operands[i].isreg,
7440 _("Instruction does not support =N addresses"));
7441
7442 inst.instruction |= inst.operands[i].reg << 16;
7443 if (inst.operands[i].immisreg)
7444 {
7445 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7446 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7447 constraint (inst.operands[i].negative,
7448 _("Thumb does not support negative register indexing"));
7449 constraint (inst.operands[i].postind,
7450 _("Thumb does not support register post-indexing"));
7451 constraint (inst.operands[i].writeback,
7452 _("Thumb does not support register indexing with writeback"));
7453 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7454 _("Thumb supports only LSL in shifted register indexing"));
7455
7456 inst.instruction |= inst.operands[i].imm;
7457 if (inst.operands[i].shifted)
7458 {
7459 constraint (inst.reloc.exp.X_op != O_constant,
7460 _("expression too complex"));
7461 constraint (inst.reloc.exp.X_add_number < 0
7462 || inst.reloc.exp.X_add_number > 3,
7463 _("shift out of range"));
7464 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7465 }
7466 inst.reloc.type = BFD_RELOC_UNUSED;
7467 }
7468 else if (inst.operands[i].preind)
7469 {
7470 constraint (is_pc && inst.operands[i].writeback,
7471 _("cannot use writeback with PC-relative addressing"));
7472 constraint (is_t && inst.operands[i].writeback,
7473 _("cannot use writeback with this instruction"));
7474
7475 if (is_d)
7476 {
7477 inst.instruction |= 0x01000000;
7478 if (inst.operands[i].writeback)
7479 inst.instruction |= 0x00200000;
7480 }
7481 else
7482 {
7483 inst.instruction |= 0x00000c00;
7484 if (inst.operands[i].writeback)
7485 inst.instruction |= 0x00000100;
7486 }
7487 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7488 }
7489 else if (inst.operands[i].postind)
7490 {
7491 assert (inst.operands[i].writeback);
7492 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7493 constraint (is_t, _("cannot use post-indexing with this instruction"));
7494
7495 if (is_d)
7496 inst.instruction |= 0x00200000;
7497 else
7498 inst.instruction |= 0x00000900;
7499 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7500 }
7501 else /* unindexed - only for coprocessor */
7502 inst.error = _("instruction does not accept unindexed addressing");
7503 }
7504
7505 /* Table of Thumb instructions which exist in both 16- and 32-bit
7506 encodings (the latter only in post-V6T2 cores). The index is the
7507 value used in the insns table below. When there is more than one
7508 possible 16-bit encoding for the instruction, this table always
7509 holds variant (1).
7510 Also contains several pseudo-instructions used during relaxation. */
7511 #define T16_32_TAB \
7512 X(adc, 4140, eb400000), \
7513 X(adcs, 4140, eb500000), \
7514 X(add, 1c00, eb000000), \
7515 X(adds, 1c00, eb100000), \
7516 X(addi, 0000, f1000000), \
7517 X(addis, 0000, f1100000), \
7518 X(add_pc,000f, f20f0000), \
7519 X(add_sp,000d, f10d0000), \
7520 X(adr, 000f, f20f0000), \
7521 X(and, 4000, ea000000), \
7522 X(ands, 4000, ea100000), \
7523 X(asr, 1000, fa40f000), \
7524 X(asrs, 1000, fa50f000), \
7525 X(b, e000, f000b000), \
7526 X(bcond, d000, f0008000), \
7527 X(bic, 4380, ea200000), \
7528 X(bics, 4380, ea300000), \
7529 X(cmn, 42c0, eb100f00), \
7530 X(cmp, 2800, ebb00f00), \
7531 X(cpsie, b660, f3af8400), \
7532 X(cpsid, b670, f3af8600), \
7533 X(cpy, 4600, ea4f0000), \
7534 X(dec_sp,80dd, f1bd0d00), \
7535 X(eor, 4040, ea800000), \
7536 X(eors, 4040, ea900000), \
7537 X(inc_sp,00dd, f10d0d00), \
7538 X(ldmia, c800, e8900000), \
7539 X(ldr, 6800, f8500000), \
7540 X(ldrb, 7800, f8100000), \
7541 X(ldrh, 8800, f8300000), \
7542 X(ldrsb, 5600, f9100000), \
7543 X(ldrsh, 5e00, f9300000), \
7544 X(ldr_pc,4800, f85f0000), \
7545 X(ldr_pc2,4800, f85f0000), \
7546 X(ldr_sp,9800, f85d0000), \
7547 X(lsl, 0000, fa00f000), \
7548 X(lsls, 0000, fa10f000), \
7549 X(lsr, 0800, fa20f000), \
7550 X(lsrs, 0800, fa30f000), \
7551 X(mov, 2000, ea4f0000), \
7552 X(movs, 2000, ea5f0000), \
7553 X(mul, 4340, fb00f000), \
7554 X(muls, 4340, ffffffff), /* no 32b muls */ \
7555 X(mvn, 43c0, ea6f0000), \
7556 X(mvns, 43c0, ea7f0000), \
7557 X(neg, 4240, f1c00000), /* rsb #0 */ \
7558 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7559 X(orr, 4300, ea400000), \
7560 X(orrs, 4300, ea500000), \
7561 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7562 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7563 X(rev, ba00, fa90f080), \
7564 X(rev16, ba40, fa90f090), \
7565 X(revsh, bac0, fa90f0b0), \
7566 X(ror, 41c0, fa60f000), \
7567 X(rors, 41c0, fa70f000), \
7568 X(sbc, 4180, eb600000), \
7569 X(sbcs, 4180, eb700000), \
7570 X(stmia, c000, e8800000), \
7571 X(str, 6000, f8400000), \
7572 X(strb, 7000, f8000000), \
7573 X(strh, 8000, f8200000), \
7574 X(str_sp,9000, f84d0000), \
7575 X(sub, 1e00, eba00000), \
7576 X(subs, 1e00, ebb00000), \
7577 X(subi, 8000, f1a00000), \
7578 X(subis, 8000, f1b00000), \
7579 X(sxtb, b240, fa4ff080), \
7580 X(sxth, b200, fa0ff080), \
7581 X(tst, 4200, ea100f00), \
7582 X(uxtb, b2c0, fa5ff080), \
7583 X(uxth, b280, fa1ff080), \
7584 X(nop, bf00, f3af8000), \
7585 X(yield, bf10, f3af8001), \
7586 X(wfe, bf20, f3af8002), \
7587 X(wfi, bf30, f3af8003), \
7588 X(sev, bf40, f3af9004), /* typo, 8004? */
7589
7590 /* To catch errors in encoding functions, the codes are all offset by
7591 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7592 as 16-bit instructions. */
7593 #define X(a,b,c) T_MNEM_##a
7594 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7595 #undef X
7596
7597 #define X(a,b,c) 0x##b
7598 static const unsigned short thumb_op16[] = { T16_32_TAB };
7599 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7600 #undef X
7601
7602 #define X(a,b,c) 0x##c
7603 static const unsigned int thumb_op32[] = { T16_32_TAB };
7604 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7605 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7606 #undef X
7607 #undef T16_32_TAB
7608
7609 /* Thumb instruction encoders, in alphabetical order. */
7610
7611 /* ADDW or SUBW. */
7612 static void
7613 do_t_add_sub_w (void)
7614 {
7615 int Rd, Rn;
7616
7617 Rd = inst.operands[0].reg;
7618 Rn = inst.operands[1].reg;
7619
7620 constraint (Rd == 15, _("PC not allowed as destination"));
7621 inst.instruction |= (Rn << 16) | (Rd << 8);
7622 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7623 }
7624
7625 /* Parse an add or subtract instruction. We get here with inst.instruction
7626 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7627
7628 static void
7629 do_t_add_sub (void)
7630 {
7631 int Rd, Rs, Rn;
7632
7633 Rd = inst.operands[0].reg;
7634 Rs = (inst.operands[1].present
7635 ? inst.operands[1].reg /* Rd, Rs, foo */
7636 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7637
7638 if (unified_syntax)
7639 {
7640 bfd_boolean flags;
7641 bfd_boolean narrow;
7642 int opcode;
7643
7644 flags = (inst.instruction == T_MNEM_adds
7645 || inst.instruction == T_MNEM_subs);
7646 if (flags)
7647 narrow = (current_it_mask == 0);
7648 else
7649 narrow = (current_it_mask != 0);
7650 if (!inst.operands[2].isreg)
7651 {
7652 opcode = 0;
7653 if (inst.size_req != 4)
7654 {
7655 int add;
7656
7657 add = (inst.instruction == T_MNEM_add
7658 || inst.instruction == T_MNEM_adds);
7659 /* Attempt to use a narrow opcode, with relaxation if
7660 appropriate. */
7661 if (Rd == REG_SP && Rs == REG_SP && !flags)
7662 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7663 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7664 opcode = T_MNEM_add_sp;
7665 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7666 opcode = T_MNEM_add_pc;
7667 else if (Rd <= 7 && Rs <= 7 && narrow)
7668 {
7669 if (flags)
7670 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7671 else
7672 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7673 }
7674 if (opcode)
7675 {
7676 inst.instruction = THUMB_OP16(opcode);
7677 inst.instruction |= (Rd << 4) | Rs;
7678 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7679 if (inst.size_req != 2)
7680 inst.relax = opcode;
7681 }
7682 else
7683 constraint (inst.size_req == 2, BAD_HIREG);
7684 }
7685 if (inst.size_req == 4
7686 || (inst.size_req != 2 && !opcode))
7687 {
7688 /* ??? Convert large immediates to addw/subw. */
7689 inst.instruction = THUMB_OP32 (inst.instruction);
7690 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7691 inst.instruction |= inst.operands[0].reg << 8;
7692 inst.instruction |= inst.operands[1].reg << 16;
7693 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7694 }
7695 }
7696 else
7697 {
7698 Rn = inst.operands[2].reg;
7699 /* See if we can do this with a 16-bit instruction. */
7700 if (!inst.operands[2].shifted && inst.size_req != 4)
7701 {
7702 if (Rd > 7 || Rs > 7 || Rn > 7)
7703 narrow = FALSE;
7704
7705 if (narrow)
7706 {
7707 inst.instruction = ((inst.instruction == T_MNEM_adds
7708 || inst.instruction == T_MNEM_add)
7709 ? T_OPCODE_ADD_R3
7710 : T_OPCODE_SUB_R3);
7711 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7712 return;
7713 }
7714
7715 if (inst.instruction == T_MNEM_add)
7716 {
7717 if (Rd == Rs)
7718 {
7719 inst.instruction = T_OPCODE_ADD_HI;
7720 inst.instruction |= (Rd & 8) << 4;
7721 inst.instruction |= (Rd & 7);
7722 inst.instruction |= Rn << 3;
7723 return;
7724 }
7725 /* ... because addition is commutative! */
7726 else if (Rd == Rn)
7727 {
7728 inst.instruction = T_OPCODE_ADD_HI;
7729 inst.instruction |= (Rd & 8) << 4;
7730 inst.instruction |= (Rd & 7);
7731 inst.instruction |= Rs << 3;
7732 return;
7733 }
7734 }
7735 }
7736 /* If we get here, it can't be done in 16 bits. */
7737 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7738 _("shift must be constant"));
7739 inst.instruction = THUMB_OP32 (inst.instruction);
7740 inst.instruction |= Rd << 8;
7741 inst.instruction |= Rs << 16;
7742 encode_thumb32_shifted_operand (2);
7743 }
7744 }
7745 else
7746 {
7747 constraint (inst.instruction == T_MNEM_adds
7748 || inst.instruction == T_MNEM_subs,
7749 BAD_THUMB32);
7750
7751 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7752 {
7753 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7754 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7755 BAD_HIREG);
7756
7757 inst.instruction = (inst.instruction == T_MNEM_add
7758 ? 0x0000 : 0x8000);
7759 inst.instruction |= (Rd << 4) | Rs;
7760 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7761 return;
7762 }
7763
7764 Rn = inst.operands[2].reg;
7765 constraint (inst.operands[2].shifted, _("unshifted register required"));
7766
7767 /* We now have Rd, Rs, and Rn set to registers. */
7768 if (Rd > 7 || Rs > 7 || Rn > 7)
7769 {
7770 /* Can't do this for SUB. */
7771 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7772 inst.instruction = T_OPCODE_ADD_HI;
7773 inst.instruction |= (Rd & 8) << 4;
7774 inst.instruction |= (Rd & 7);
7775 if (Rs == Rd)
7776 inst.instruction |= Rn << 3;
7777 else if (Rn == Rd)
7778 inst.instruction |= Rs << 3;
7779 else
7780 constraint (1, _("dest must overlap one source register"));
7781 }
7782 else
7783 {
7784 inst.instruction = (inst.instruction == T_MNEM_add
7785 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7786 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7787 }
7788 }
7789 }
7790
7791 static void
7792 do_t_adr (void)
7793 {
7794 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7795 {
7796 /* Defer to section relaxation. */
7797 inst.relax = inst.instruction;
7798 inst.instruction = THUMB_OP16 (inst.instruction);
7799 inst.instruction |= inst.operands[0].reg << 4;
7800 }
7801 else if (unified_syntax && inst.size_req != 2)
7802 {
7803 /* Generate a 32-bit opcode. */
7804 inst.instruction = THUMB_OP32 (inst.instruction);
7805 inst.instruction |= inst.operands[0].reg << 8;
7806 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7807 inst.reloc.pc_rel = 1;
7808 }
7809 else
7810 {
7811 /* Generate a 16-bit opcode. */
7812 inst.instruction = THUMB_OP16 (inst.instruction);
7813 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7814 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7815 inst.reloc.pc_rel = 1;
7816
7817 inst.instruction |= inst.operands[0].reg << 4;
7818 }
7819 }
7820
7821 /* Arithmetic instructions for which there is just one 16-bit
7822 instruction encoding, and it allows only two low registers.
7823 For maximal compatibility with ARM syntax, we allow three register
7824 operands even when Thumb-32 instructions are not available, as long
7825 as the first two are identical. For instance, both "sbc r0,r1" and
7826 "sbc r0,r0,r1" are allowed. */
7827 static void
7828 do_t_arit3 (void)
7829 {
7830 int Rd, Rs, Rn;
7831
7832 Rd = inst.operands[0].reg;
7833 Rs = (inst.operands[1].present
7834 ? inst.operands[1].reg /* Rd, Rs, foo */
7835 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7836 Rn = inst.operands[2].reg;
7837
7838 if (unified_syntax)
7839 {
7840 if (!inst.operands[2].isreg)
7841 {
7842 /* For an immediate, we always generate a 32-bit opcode;
7843 section relaxation will shrink it later if possible. */
7844 inst.instruction = THUMB_OP32 (inst.instruction);
7845 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7846 inst.instruction |= Rd << 8;
7847 inst.instruction |= Rs << 16;
7848 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7849 }
7850 else
7851 {
7852 bfd_boolean narrow;
7853
7854 /* See if we can do this with a 16-bit instruction. */
7855 if (THUMB_SETS_FLAGS (inst.instruction))
7856 narrow = current_it_mask == 0;
7857 else
7858 narrow = current_it_mask != 0;
7859
7860 if (Rd > 7 || Rn > 7 || Rs > 7)
7861 narrow = FALSE;
7862 if (inst.operands[2].shifted)
7863 narrow = FALSE;
7864 if (inst.size_req == 4)
7865 narrow = FALSE;
7866
7867 if (narrow
7868 && Rd == Rs)
7869 {
7870 inst.instruction = THUMB_OP16 (inst.instruction);
7871 inst.instruction |= Rd;
7872 inst.instruction |= Rn << 3;
7873 return;
7874 }
7875
7876 /* If we get here, it can't be done in 16 bits. */
7877 constraint (inst.operands[2].shifted
7878 && inst.operands[2].immisreg,
7879 _("shift must be constant"));
7880 inst.instruction = THUMB_OP32 (inst.instruction);
7881 inst.instruction |= Rd << 8;
7882 inst.instruction |= Rs << 16;
7883 encode_thumb32_shifted_operand (2);
7884 }
7885 }
7886 else
7887 {
7888 /* On its face this is a lie - the instruction does set the
7889 flags. However, the only supported mnemonic in this mode
7890 says it doesn't. */
7891 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7892
7893 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7894 _("unshifted register required"));
7895 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7896 constraint (Rd != Rs,
7897 _("dest and source1 must be the same register"));
7898
7899 inst.instruction = THUMB_OP16 (inst.instruction);
7900 inst.instruction |= Rd;
7901 inst.instruction |= Rn << 3;
7902 }
7903 }
7904
7905 /* Similarly, but for instructions where the arithmetic operation is
7906 commutative, so we can allow either of them to be different from
7907 the destination operand in a 16-bit instruction. For instance, all
7908 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7909 accepted. */
7910 static void
7911 do_t_arit3c (void)
7912 {
7913 int Rd, Rs, Rn;
7914
7915 Rd = inst.operands[0].reg;
7916 Rs = (inst.operands[1].present
7917 ? inst.operands[1].reg /* Rd, Rs, foo */
7918 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7919 Rn = inst.operands[2].reg;
7920
7921 if (unified_syntax)
7922 {
7923 if (!inst.operands[2].isreg)
7924 {
7925 /* For an immediate, we always generate a 32-bit opcode;
7926 section relaxation will shrink it later if possible. */
7927 inst.instruction = THUMB_OP32 (inst.instruction);
7928 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7929 inst.instruction |= Rd << 8;
7930 inst.instruction |= Rs << 16;
7931 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7932 }
7933 else
7934 {
7935 bfd_boolean narrow;
7936
7937 /* See if we can do this with a 16-bit instruction. */
7938 if (THUMB_SETS_FLAGS (inst.instruction))
7939 narrow = current_it_mask == 0;
7940 else
7941 narrow = current_it_mask != 0;
7942
7943 if (Rd > 7 || Rn > 7 || Rs > 7)
7944 narrow = FALSE;
7945 if (inst.operands[2].shifted)
7946 narrow = FALSE;
7947 if (inst.size_req == 4)
7948 narrow = FALSE;
7949
7950 if (narrow)
7951 {
7952 if (Rd == Rs)
7953 {
7954 inst.instruction = THUMB_OP16 (inst.instruction);
7955 inst.instruction |= Rd;
7956 inst.instruction |= Rn << 3;
7957 return;
7958 }
7959 if (Rd == Rn)
7960 {
7961 inst.instruction = THUMB_OP16 (inst.instruction);
7962 inst.instruction |= Rd;
7963 inst.instruction |= Rs << 3;
7964 return;
7965 }
7966 }
7967
7968 /* If we get here, it can't be done in 16 bits. */
7969 constraint (inst.operands[2].shifted
7970 && inst.operands[2].immisreg,
7971 _("shift must be constant"));
7972 inst.instruction = THUMB_OP32 (inst.instruction);
7973 inst.instruction |= Rd << 8;
7974 inst.instruction |= Rs << 16;
7975 encode_thumb32_shifted_operand (2);
7976 }
7977 }
7978 else
7979 {
7980 /* On its face this is a lie - the instruction does set the
7981 flags. However, the only supported mnemonic in this mode
7982 says it doesn't. */
7983 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7984
7985 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7986 _("unshifted register required"));
7987 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7988
7989 inst.instruction = THUMB_OP16 (inst.instruction);
7990 inst.instruction |= Rd;
7991
7992 if (Rd == Rs)
7993 inst.instruction |= Rn << 3;
7994 else if (Rd == Rn)
7995 inst.instruction |= Rs << 3;
7996 else
7997 constraint (1, _("dest must overlap one source register"));
7998 }
7999 }
8000
8001 static void
8002 do_t_barrier (void)
8003 {
8004 if (inst.operands[0].present)
8005 {
8006 constraint ((inst.instruction & 0xf0) != 0x40
8007 && inst.operands[0].imm != 0xf,
8008 "bad barrier type");
8009 inst.instruction |= inst.operands[0].imm;
8010 }
8011 else
8012 inst.instruction |= 0xf;
8013 }
8014
8015 static void
8016 do_t_bfc (void)
8017 {
8018 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8019 constraint (msb > 32, _("bit-field extends past end of register"));
8020 /* The instruction encoding stores the LSB and MSB,
8021 not the LSB and width. */
8022 inst.instruction |= inst.operands[0].reg << 8;
8023 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8024 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8025 inst.instruction |= msb - 1;
8026 }
8027
8028 static void
8029 do_t_bfi (void)
8030 {
8031 unsigned int msb;
8032
8033 /* #0 in second position is alternative syntax for bfc, which is
8034 the same instruction but with REG_PC in the Rm field. */
8035 if (!inst.operands[1].isreg)
8036 inst.operands[1].reg = REG_PC;
8037
8038 msb = inst.operands[2].imm + inst.operands[3].imm;
8039 constraint (msb > 32, _("bit-field extends past end of register"));
8040 /* The instruction encoding stores the LSB and MSB,
8041 not the LSB and width. */
8042 inst.instruction |= inst.operands[0].reg << 8;
8043 inst.instruction |= inst.operands[1].reg << 16;
8044 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8045 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8046 inst.instruction |= msb - 1;
8047 }
8048
8049 static void
8050 do_t_bfx (void)
8051 {
8052 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8053 _("bit-field extends past end of register"));
8054 inst.instruction |= inst.operands[0].reg << 8;
8055 inst.instruction |= inst.operands[1].reg << 16;
8056 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8057 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8058 inst.instruction |= inst.operands[3].imm - 1;
8059 }
8060
8061 /* ARM V5 Thumb BLX (argument parse)
8062 BLX <target_addr> which is BLX(1)
8063 BLX <Rm> which is BLX(2)
8064 Unfortunately, there are two different opcodes for this mnemonic.
8065 So, the insns[].value is not used, and the code here zaps values
8066 into inst.instruction.
8067
8068 ??? How to take advantage of the additional two bits of displacement
8069 available in Thumb32 mode? Need new relocation? */
8070
8071 static void
8072 do_t_blx (void)
8073 {
8074 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8075 if (inst.operands[0].isreg)
8076 /* We have a register, so this is BLX(2). */
8077 inst.instruction |= inst.operands[0].reg << 3;
8078 else
8079 {
8080 /* No register. This must be BLX(1). */
8081 inst.instruction = 0xf000e800;
8082 #ifdef OBJ_ELF
8083 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8084 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8085 else
8086 #endif
8087 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8088 inst.reloc.pc_rel = 1;
8089 }
8090 }
8091
8092 static void
8093 do_t_branch (void)
8094 {
8095 int opcode;
8096 int cond;
8097
8098 if (current_it_mask)
8099 {
8100 /* Conditional branches inside IT blocks are encoded as unconditional
8101 branches. */
8102 cond = COND_ALWAYS;
8103 /* A branch must be the last instruction in an IT block. */
8104 constraint (current_it_mask != 0x10, BAD_BRANCH);
8105 }
8106 else
8107 cond = inst.cond;
8108
8109 if (cond != COND_ALWAYS)
8110 opcode = T_MNEM_bcond;
8111 else
8112 opcode = inst.instruction;
8113
8114 if (unified_syntax && inst.size_req == 4)
8115 {
8116 inst.instruction = THUMB_OP32(opcode);
8117 if (cond == COND_ALWAYS)
8118 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8119 else
8120 {
8121 assert (cond != 0xF);
8122 inst.instruction |= cond << 22;
8123 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8124 }
8125 }
8126 else
8127 {
8128 inst.instruction = THUMB_OP16(opcode);
8129 if (cond == COND_ALWAYS)
8130 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8131 else
8132 {
8133 inst.instruction |= cond << 8;
8134 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8135 }
8136 /* Allow section relaxation. */
8137 if (unified_syntax && inst.size_req != 2)
8138 inst.relax = opcode;
8139 }
8140
8141 inst.reloc.pc_rel = 1;
8142 }
8143
8144 static void
8145 do_t_bkpt (void)
8146 {
8147 constraint (inst.cond != COND_ALWAYS,
8148 _("instruction is always unconditional"));
8149 if (inst.operands[0].present)
8150 {
8151 constraint (inst.operands[0].imm > 255,
8152 _("immediate value out of range"));
8153 inst.instruction |= inst.operands[0].imm;
8154 }
8155 }
8156
8157 static void
8158 do_t_branch23 (void)
8159 {
8160 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8161 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8162 inst.reloc.pc_rel = 1;
8163
8164 /* If the destination of the branch is a defined symbol which does not have
8165 the THUMB_FUNC attribute, then we must be calling a function which has
8166 the (interfacearm) attribute. We look for the Thumb entry point to that
8167 function and change the branch to refer to that function instead. */
8168 if ( inst.reloc.exp.X_op == O_symbol
8169 && inst.reloc.exp.X_add_symbol != NULL
8170 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8171 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8172 inst.reloc.exp.X_add_symbol =
8173 find_real_start (inst.reloc.exp.X_add_symbol);
8174 }
8175
8176 static void
8177 do_t_bx (void)
8178 {
8179 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8180 inst.instruction |= inst.operands[0].reg << 3;
8181 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8182 should cause the alignment to be checked once it is known. This is
8183 because BX PC only works if the instruction is word aligned. */
8184 }
8185
8186 static void
8187 do_t_bxj (void)
8188 {
8189 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8190 if (inst.operands[0].reg == REG_PC)
8191 as_tsktsk (_("use of r15 in bxj is not really useful"));
8192
8193 inst.instruction |= inst.operands[0].reg << 16;
8194 }
8195
8196 static void
8197 do_t_clz (void)
8198 {
8199 inst.instruction |= inst.operands[0].reg << 8;
8200 inst.instruction |= inst.operands[1].reg << 16;
8201 inst.instruction |= inst.operands[1].reg;
8202 }
8203
8204 static void
8205 do_t_cps (void)
8206 {
8207 constraint (current_it_mask, BAD_NOT_IT);
8208 inst.instruction |= inst.operands[0].imm;
8209 }
8210
8211 static void
8212 do_t_cpsi (void)
8213 {
8214 constraint (current_it_mask, BAD_NOT_IT);
8215 if (unified_syntax
8216 && (inst.operands[1].present || inst.size_req == 4)
8217 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8218 {
8219 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8220 inst.instruction = 0xf3af8000;
8221 inst.instruction |= imod << 9;
8222 inst.instruction |= inst.operands[0].imm << 5;
8223 if (inst.operands[1].present)
8224 inst.instruction |= 0x100 | inst.operands[1].imm;
8225 }
8226 else
8227 {
8228 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8229 && (inst.operands[0].imm & 4),
8230 _("selected processor does not support 'A' form "
8231 "of this instruction"));
8232 constraint (inst.operands[1].present || inst.size_req == 4,
8233 _("Thumb does not support the 2-argument "
8234 "form of this instruction"));
8235 inst.instruction |= inst.operands[0].imm;
8236 }
8237 }
8238
8239 /* THUMB CPY instruction (argument parse). */
8240
8241 static void
8242 do_t_cpy (void)
8243 {
8244 if (inst.size_req == 4)
8245 {
8246 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8247 inst.instruction |= inst.operands[0].reg << 8;
8248 inst.instruction |= inst.operands[1].reg;
8249 }
8250 else
8251 {
8252 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8253 inst.instruction |= (inst.operands[0].reg & 0x7);
8254 inst.instruction |= inst.operands[1].reg << 3;
8255 }
8256 }
8257
8258 static void
8259 do_t_czb (void)
8260 {
8261 constraint (current_it_mask, BAD_NOT_IT);
8262 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8263 inst.instruction |= inst.operands[0].reg;
8264 inst.reloc.pc_rel = 1;
8265 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8266 }
8267
8268 static void
8269 do_t_dbg (void)
8270 {
8271 inst.instruction |= inst.operands[0].imm;
8272 }
8273
8274 static void
8275 do_t_div (void)
8276 {
8277 if (!inst.operands[1].present)
8278 inst.operands[1].reg = inst.operands[0].reg;
8279 inst.instruction |= inst.operands[0].reg << 8;
8280 inst.instruction |= inst.operands[1].reg << 16;
8281 inst.instruction |= inst.operands[2].reg;
8282 }
8283
8284 static void
8285 do_t_hint (void)
8286 {
8287 if (unified_syntax && inst.size_req == 4)
8288 inst.instruction = THUMB_OP32 (inst.instruction);
8289 else
8290 inst.instruction = THUMB_OP16 (inst.instruction);
8291 }
8292
8293 static void
8294 do_t_it (void)
8295 {
8296 unsigned int cond = inst.operands[0].imm;
8297
8298 constraint (current_it_mask, BAD_NOT_IT);
8299 current_it_mask = (inst.instruction & 0xf) | 0x10;
8300 current_cc = cond;
8301
8302 /* If the condition is a negative condition, invert the mask. */
8303 if ((cond & 0x1) == 0x0)
8304 {
8305 unsigned int mask = inst.instruction & 0x000f;
8306
8307 if ((mask & 0x7) == 0)
8308 /* no conversion needed */;
8309 else if ((mask & 0x3) == 0)
8310 mask ^= 0x8;
8311 else if ((mask & 0x1) == 0)
8312 mask ^= 0xC;
8313 else
8314 mask ^= 0xE;
8315
8316 inst.instruction &= 0xfff0;
8317 inst.instruction |= mask;
8318 }
8319
8320 inst.instruction |= cond << 4;
8321 }
8322
8323 static void
8324 do_t_ldmstm (void)
8325 {
8326 /* This really doesn't seem worth it. */
8327 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8328 _("expression too complex"));
8329 constraint (inst.operands[1].writeback,
8330 _("Thumb load/store multiple does not support {reglist}^"));
8331
8332 if (unified_syntax)
8333 {
8334 /* See if we can use a 16-bit instruction. */
8335 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8336 && inst.size_req != 4
8337 && inst.operands[0].reg <= 7
8338 && !(inst.operands[1].imm & ~0xff)
8339 && (inst.instruction == T_MNEM_stmia
8340 ? inst.operands[0].writeback
8341 : (inst.operands[0].writeback
8342 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8343 {
8344 if (inst.instruction == T_MNEM_stmia
8345 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8346 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8347 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8348 inst.operands[0].reg);
8349
8350 inst.instruction = THUMB_OP16 (inst.instruction);
8351 inst.instruction |= inst.operands[0].reg << 8;
8352 inst.instruction |= inst.operands[1].imm;
8353 }
8354 else
8355 {
8356 if (inst.operands[1].imm & (1 << 13))
8357 as_warn (_("SP should not be in register list"));
8358 if (inst.instruction == T_MNEM_stmia)
8359 {
8360 if (inst.operands[1].imm & (1 << 15))
8361 as_warn (_("PC should not be in register list"));
8362 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8363 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8364 inst.operands[0].reg);
8365 }
8366 else
8367 {
8368 if (inst.operands[1].imm & (1 << 14)
8369 && inst.operands[1].imm & (1 << 15))
8370 as_warn (_("LR and PC should not both be in register list"));
8371 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8372 && inst.operands[0].writeback)
8373 as_warn (_("base register should not be in register list "
8374 "when written back"));
8375 }
8376 if (inst.instruction < 0xffff)
8377 inst.instruction = THUMB_OP32 (inst.instruction);
8378 inst.instruction |= inst.operands[0].reg << 16;
8379 inst.instruction |= inst.operands[1].imm;
8380 if (inst.operands[0].writeback)
8381 inst.instruction |= WRITE_BACK;
8382 }
8383 }
8384 else
8385 {
8386 constraint (inst.operands[0].reg > 7
8387 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8388 if (inst.instruction == T_MNEM_stmia)
8389 {
8390 if (!inst.operands[0].writeback)
8391 as_warn (_("this instruction will write back the base register"));
8392 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8393 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8394 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8395 inst.operands[0].reg);
8396 }
8397 else
8398 {
8399 if (!inst.operands[0].writeback
8400 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8401 as_warn (_("this instruction will write back the base register"));
8402 else if (inst.operands[0].writeback
8403 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8404 as_warn (_("this instruction will not write back the base register"));
8405 }
8406
8407 inst.instruction = THUMB_OP16 (inst.instruction);
8408 inst.instruction |= inst.operands[0].reg << 8;
8409 inst.instruction |= inst.operands[1].imm;
8410 }
8411 }
8412
8413 static void
8414 do_t_ldrex (void)
8415 {
8416 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8417 || inst.operands[1].postind || inst.operands[1].writeback
8418 || inst.operands[1].immisreg || inst.operands[1].shifted
8419 || inst.operands[1].negative,
8420 BAD_ADDR_MODE);
8421
8422 inst.instruction |= inst.operands[0].reg << 12;
8423 inst.instruction |= inst.operands[1].reg << 16;
8424 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8425 }
8426
8427 static void
8428 do_t_ldrexd (void)
8429 {
8430 if (!inst.operands[1].present)
8431 {
8432 constraint (inst.operands[0].reg == REG_LR,
8433 _("r14 not allowed as first register "
8434 "when second register is omitted"));
8435 inst.operands[1].reg = inst.operands[0].reg + 1;
8436 }
8437 constraint (inst.operands[0].reg == inst.operands[1].reg,
8438 BAD_OVERLAP);
8439
8440 inst.instruction |= inst.operands[0].reg << 12;
8441 inst.instruction |= inst.operands[1].reg << 8;
8442 inst.instruction |= inst.operands[2].reg << 16;
8443 }
8444
8445 static void
8446 do_t_ldst (void)
8447 {
8448 unsigned long opcode;
8449 int Rn;
8450
8451 opcode = inst.instruction;
8452 if (unified_syntax)
8453 {
8454 if (!inst.operands[1].isreg)
8455 {
8456 if (opcode <= 0xffff)
8457 inst.instruction = THUMB_OP32 (opcode);
8458 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8459 return;
8460 }
8461 if (inst.operands[1].isreg
8462 && !inst.operands[1].writeback
8463 && !inst.operands[1].shifted && !inst.operands[1].postind
8464 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8465 && opcode <= 0xffff
8466 && inst.size_req != 4)
8467 {
8468 /* Insn may have a 16-bit form. */
8469 Rn = inst.operands[1].reg;
8470 if (inst.operands[1].immisreg)
8471 {
8472 inst.instruction = THUMB_OP16 (opcode);
8473 /* [Rn, Ri] */
8474 if (Rn <= 7 && inst.operands[1].imm <= 7)
8475 goto op16;
8476 }
8477 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8478 && opcode != T_MNEM_ldrsb)
8479 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8480 || (Rn == REG_SP && opcode == T_MNEM_str))
8481 {
8482 /* [Rn, #const] */
8483 if (Rn > 7)
8484 {
8485 if (Rn == REG_PC)
8486 {
8487 if (inst.reloc.pc_rel)
8488 opcode = T_MNEM_ldr_pc2;
8489 else
8490 opcode = T_MNEM_ldr_pc;
8491 }
8492 else
8493 {
8494 if (opcode == T_MNEM_ldr)
8495 opcode = T_MNEM_ldr_sp;
8496 else
8497 opcode = T_MNEM_str_sp;
8498 }
8499 inst.instruction = inst.operands[0].reg << 8;
8500 }
8501 else
8502 {
8503 inst.instruction = inst.operands[0].reg;
8504 inst.instruction |= inst.operands[1].reg << 3;
8505 }
8506 inst.instruction |= THUMB_OP16 (opcode);
8507 if (inst.size_req == 2)
8508 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8509 else
8510 inst.relax = opcode;
8511 return;
8512 }
8513 }
8514 /* Definitely a 32-bit variant. */
8515 inst.instruction = THUMB_OP32 (opcode);
8516 inst.instruction |= inst.operands[0].reg << 12;
8517 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8518 return;
8519 }
8520
8521 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8522
8523 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8524 {
8525 /* Only [Rn,Rm] is acceptable. */
8526 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8527 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8528 || inst.operands[1].postind || inst.operands[1].shifted
8529 || inst.operands[1].negative,
8530 _("Thumb does not support this addressing mode"));
8531 inst.instruction = THUMB_OP16 (inst.instruction);
8532 goto op16;
8533 }
8534
8535 inst.instruction = THUMB_OP16 (inst.instruction);
8536 if (!inst.operands[1].isreg)
8537 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8538 return;
8539
8540 constraint (!inst.operands[1].preind
8541 || inst.operands[1].shifted
8542 || inst.operands[1].writeback,
8543 _("Thumb does not support this addressing mode"));
8544 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8545 {
8546 constraint (inst.instruction & 0x0600,
8547 _("byte or halfword not valid for base register"));
8548 constraint (inst.operands[1].reg == REG_PC
8549 && !(inst.instruction & THUMB_LOAD_BIT),
8550 _("r15 based store not allowed"));
8551 constraint (inst.operands[1].immisreg,
8552 _("invalid base register for register offset"));
8553
8554 if (inst.operands[1].reg == REG_PC)
8555 inst.instruction = T_OPCODE_LDR_PC;
8556 else if (inst.instruction & THUMB_LOAD_BIT)
8557 inst.instruction = T_OPCODE_LDR_SP;
8558 else
8559 inst.instruction = T_OPCODE_STR_SP;
8560
8561 inst.instruction |= inst.operands[0].reg << 8;
8562 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8563 return;
8564 }
8565
8566 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8567 if (!inst.operands[1].immisreg)
8568 {
8569 /* Immediate offset. */
8570 inst.instruction |= inst.operands[0].reg;
8571 inst.instruction |= inst.operands[1].reg << 3;
8572 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8573 return;
8574 }
8575
8576 /* Register offset. */
8577 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8578 constraint (inst.operands[1].negative,
8579 _("Thumb does not support this addressing mode"));
8580
8581 op16:
8582 switch (inst.instruction)
8583 {
8584 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8585 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8586 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8587 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8588 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8589 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8590 case 0x5600 /* ldrsb */:
8591 case 0x5e00 /* ldrsh */: break;
8592 default: abort ();
8593 }
8594
8595 inst.instruction |= inst.operands[0].reg;
8596 inst.instruction |= inst.operands[1].reg << 3;
8597 inst.instruction |= inst.operands[1].imm << 6;
8598 }
8599
8600 static void
8601 do_t_ldstd (void)
8602 {
8603 if (!inst.operands[1].present)
8604 {
8605 inst.operands[1].reg = inst.operands[0].reg + 1;
8606 constraint (inst.operands[0].reg == REG_LR,
8607 _("r14 not allowed here"));
8608 }
8609 inst.instruction |= inst.operands[0].reg << 12;
8610 inst.instruction |= inst.operands[1].reg << 8;
8611 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8612
8613 }
8614
8615 static void
8616 do_t_ldstt (void)
8617 {
8618 inst.instruction |= inst.operands[0].reg << 12;
8619 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8620 }
8621
8622 static void
8623 do_t_mla (void)
8624 {
8625 inst.instruction |= inst.operands[0].reg << 8;
8626 inst.instruction |= inst.operands[1].reg << 16;
8627 inst.instruction |= inst.operands[2].reg;
8628 inst.instruction |= inst.operands[3].reg << 12;
8629 }
8630
8631 static void
8632 do_t_mlal (void)
8633 {
8634 inst.instruction |= inst.operands[0].reg << 12;
8635 inst.instruction |= inst.operands[1].reg << 8;
8636 inst.instruction |= inst.operands[2].reg << 16;
8637 inst.instruction |= inst.operands[3].reg;
8638 }
8639
8640 static void
8641 do_t_mov_cmp (void)
8642 {
8643 if (unified_syntax)
8644 {
8645 int r0off = (inst.instruction == T_MNEM_mov
8646 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8647 unsigned long opcode;
8648 bfd_boolean narrow;
8649 bfd_boolean low_regs;
8650
8651 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8652 opcode = inst.instruction;
8653 if (current_it_mask)
8654 narrow = opcode != T_MNEM_movs;
8655 else
8656 narrow = opcode != T_MNEM_movs || low_regs;
8657 if (inst.size_req == 4
8658 || inst.operands[1].shifted)
8659 narrow = FALSE;
8660
8661 if (!inst.operands[1].isreg)
8662 {
8663 /* Immediate operand. */
8664 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8665 narrow = 0;
8666 if (low_regs && narrow)
8667 {
8668 inst.instruction = THUMB_OP16 (opcode);
8669 inst.instruction |= inst.operands[0].reg << 8;
8670 if (inst.size_req == 2)
8671 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8672 else
8673 inst.relax = opcode;
8674 }
8675 else
8676 {
8677 inst.instruction = THUMB_OP32 (inst.instruction);
8678 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8679 inst.instruction |= inst.operands[0].reg << r0off;
8680 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8681 }
8682 }
8683 else if (!narrow)
8684 {
8685 inst.instruction = THUMB_OP32 (inst.instruction);
8686 inst.instruction |= inst.operands[0].reg << r0off;
8687 encode_thumb32_shifted_operand (1);
8688 }
8689 else
8690 switch (inst.instruction)
8691 {
8692 case T_MNEM_mov:
8693 inst.instruction = T_OPCODE_MOV_HR;
8694 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8695 inst.instruction |= (inst.operands[0].reg & 0x7);
8696 inst.instruction |= inst.operands[1].reg << 3;
8697 break;
8698
8699 case T_MNEM_movs:
8700 /* We know we have low registers at this point.
8701 Generate ADD Rd, Rs, #0. */
8702 inst.instruction = T_OPCODE_ADD_I3;
8703 inst.instruction |= inst.operands[0].reg;
8704 inst.instruction |= inst.operands[1].reg << 3;
8705 break;
8706
8707 case T_MNEM_cmp:
8708 if (low_regs)
8709 {
8710 inst.instruction = T_OPCODE_CMP_LR;
8711 inst.instruction |= inst.operands[0].reg;
8712 inst.instruction |= inst.operands[1].reg << 3;
8713 }
8714 else
8715 {
8716 inst.instruction = T_OPCODE_CMP_HR;
8717 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8718 inst.instruction |= (inst.operands[0].reg & 0x7);
8719 inst.instruction |= inst.operands[1].reg << 3;
8720 }
8721 break;
8722 }
8723 return;
8724 }
8725
8726 inst.instruction = THUMB_OP16 (inst.instruction);
8727 if (inst.operands[1].isreg)
8728 {
8729 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8730 {
8731 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8732 since a MOV instruction produces unpredictable results. */
8733 if (inst.instruction == T_OPCODE_MOV_I8)
8734 inst.instruction = T_OPCODE_ADD_I3;
8735 else
8736 inst.instruction = T_OPCODE_CMP_LR;
8737
8738 inst.instruction |= inst.operands[0].reg;
8739 inst.instruction |= inst.operands[1].reg << 3;
8740 }
8741 else
8742 {
8743 if (inst.instruction == T_OPCODE_MOV_I8)
8744 inst.instruction = T_OPCODE_MOV_HR;
8745 else
8746 inst.instruction = T_OPCODE_CMP_HR;
8747 do_t_cpy ();
8748 }
8749 }
8750 else
8751 {
8752 constraint (inst.operands[0].reg > 7,
8753 _("only lo regs allowed with immediate"));
8754 inst.instruction |= inst.operands[0].reg << 8;
8755 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8756 }
8757 }
8758
8759 static void
8760 do_t_mov16 (void)
8761 {
8762 bfd_vma imm;
8763 bfd_boolean top;
8764
8765 top = (inst.instruction & 0x00800000) != 0;
8766 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
8767 {
8768 constraint (top, _(":lower16: not allowed this instruction"));
8769 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
8770 }
8771 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
8772 {
8773 constraint (!top, _(":upper16: not allowed this instruction"));
8774 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
8775 }
8776
8777 inst.instruction |= inst.operands[0].reg << 8;
8778 if (inst.reloc.type == BFD_RELOC_UNUSED)
8779 {
8780 imm = inst.reloc.exp.X_add_number;
8781 inst.instruction |= (imm & 0xf000) << 4;
8782 inst.instruction |= (imm & 0x0800) << 15;
8783 inst.instruction |= (imm & 0x0700) << 4;
8784 inst.instruction |= (imm & 0x00ff);
8785 }
8786 }
8787
8788 static void
8789 do_t_mvn_tst (void)
8790 {
8791 if (unified_syntax)
8792 {
8793 int r0off = (inst.instruction == T_MNEM_mvn
8794 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8795 bfd_boolean narrow;
8796
8797 if (inst.size_req == 4
8798 || inst.instruction > 0xffff
8799 || inst.operands[1].shifted
8800 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8801 narrow = FALSE;
8802 else if (inst.instruction == T_MNEM_cmn)
8803 narrow = TRUE;
8804 else if (THUMB_SETS_FLAGS (inst.instruction))
8805 narrow = (current_it_mask == 0);
8806 else
8807 narrow = (current_it_mask != 0);
8808
8809 if (!inst.operands[1].isreg)
8810 {
8811 /* For an immediate, we always generate a 32-bit opcode;
8812 section relaxation will shrink it later if possible. */
8813 if (inst.instruction < 0xffff)
8814 inst.instruction = THUMB_OP32 (inst.instruction);
8815 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8816 inst.instruction |= inst.operands[0].reg << r0off;
8817 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8818 }
8819 else
8820 {
8821 /* See if we can do this with a 16-bit instruction. */
8822 if (narrow)
8823 {
8824 inst.instruction = THUMB_OP16 (inst.instruction);
8825 inst.instruction |= inst.operands[0].reg;
8826 inst.instruction |= inst.operands[1].reg << 3;
8827 }
8828 else
8829 {
8830 constraint (inst.operands[1].shifted
8831 && inst.operands[1].immisreg,
8832 _("shift must be constant"));
8833 if (inst.instruction < 0xffff)
8834 inst.instruction = THUMB_OP32 (inst.instruction);
8835 inst.instruction |= inst.operands[0].reg << r0off;
8836 encode_thumb32_shifted_operand (1);
8837 }
8838 }
8839 }
8840 else
8841 {
8842 constraint (inst.instruction > 0xffff
8843 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8844 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8845 _("unshifted register required"));
8846 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8847 BAD_HIREG);
8848
8849 inst.instruction = THUMB_OP16 (inst.instruction);
8850 inst.instruction |= inst.operands[0].reg;
8851 inst.instruction |= inst.operands[1].reg << 3;
8852 }
8853 }
8854
8855 static void
8856 do_t_mrs (void)
8857 {
8858 int flags;
8859 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8860 if (flags == 0)
8861 {
8862 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8863 _("selected processor does not support "
8864 "requested special purpose register"));
8865 }
8866 else
8867 {
8868 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8869 _("selected processor does not support "
8870 "requested special purpose register %x"));
8871 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8872 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8873 _("'CPSR' or 'SPSR' expected"));
8874 }
8875
8876 inst.instruction |= inst.operands[0].reg << 8;
8877 inst.instruction |= (flags & SPSR_BIT) >> 2;
8878 inst.instruction |= inst.operands[1].imm & 0xff;
8879 }
8880
8881 static void
8882 do_t_msr (void)
8883 {
8884 int flags;
8885
8886 constraint (!inst.operands[1].isreg,
8887 _("Thumb encoding does not support an immediate here"));
8888 flags = inst.operands[0].imm;
8889 if (flags & ~0xff)
8890 {
8891 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8892 _("selected processor does not support "
8893 "requested special purpose register"));
8894 }
8895 else
8896 {
8897 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8898 _("selected processor does not support "
8899 "requested special purpose register"));
8900 flags |= PSR_f;
8901 }
8902 inst.instruction |= (flags & SPSR_BIT) >> 2;
8903 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8904 inst.instruction |= (flags & 0xff);
8905 inst.instruction |= inst.operands[1].reg << 16;
8906 }
8907
8908 static void
8909 do_t_mul (void)
8910 {
8911 if (!inst.operands[2].present)
8912 inst.operands[2].reg = inst.operands[0].reg;
8913
8914 /* There is no 32-bit MULS and no 16-bit MUL. */
8915 if (unified_syntax && inst.instruction == T_MNEM_mul)
8916 {
8917 inst.instruction = THUMB_OP32 (inst.instruction);
8918 inst.instruction |= inst.operands[0].reg << 8;
8919 inst.instruction |= inst.operands[1].reg << 16;
8920 inst.instruction |= inst.operands[2].reg << 0;
8921 }
8922 else
8923 {
8924 constraint (!unified_syntax
8925 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8926 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8927 BAD_HIREG);
8928
8929 inst.instruction = THUMB_OP16 (inst.instruction);
8930 inst.instruction |= inst.operands[0].reg;
8931
8932 if (inst.operands[0].reg == inst.operands[1].reg)
8933 inst.instruction |= inst.operands[2].reg << 3;
8934 else if (inst.operands[0].reg == inst.operands[2].reg)
8935 inst.instruction |= inst.operands[1].reg << 3;
8936 else
8937 constraint (1, _("dest must overlap one source register"));
8938 }
8939 }
8940
8941 static void
8942 do_t_mull (void)
8943 {
8944 inst.instruction |= inst.operands[0].reg << 12;
8945 inst.instruction |= inst.operands[1].reg << 8;
8946 inst.instruction |= inst.operands[2].reg << 16;
8947 inst.instruction |= inst.operands[3].reg;
8948
8949 if (inst.operands[0].reg == inst.operands[1].reg)
8950 as_tsktsk (_("rdhi and rdlo must be different"));
8951 }
8952
8953 static void
8954 do_t_nop (void)
8955 {
8956 if (unified_syntax)
8957 {
8958 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8959 {
8960 inst.instruction = THUMB_OP32 (inst.instruction);
8961 inst.instruction |= inst.operands[0].imm;
8962 }
8963 else
8964 {
8965 inst.instruction = THUMB_OP16 (inst.instruction);
8966 inst.instruction |= inst.operands[0].imm << 4;
8967 }
8968 }
8969 else
8970 {
8971 constraint (inst.operands[0].present,
8972 _("Thumb does not support NOP with hints"));
8973 inst.instruction = 0x46c0;
8974 }
8975 }
8976
8977 static void
8978 do_t_neg (void)
8979 {
8980 if (unified_syntax)
8981 {
8982 bfd_boolean narrow;
8983
8984 if (THUMB_SETS_FLAGS (inst.instruction))
8985 narrow = (current_it_mask == 0);
8986 else
8987 narrow = (current_it_mask != 0);
8988 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8989 narrow = FALSE;
8990 if (inst.size_req == 4)
8991 narrow = FALSE;
8992
8993 if (!narrow)
8994 {
8995 inst.instruction = THUMB_OP32 (inst.instruction);
8996 inst.instruction |= inst.operands[0].reg << 8;
8997 inst.instruction |= inst.operands[1].reg << 16;
8998 }
8999 else
9000 {
9001 inst.instruction = THUMB_OP16 (inst.instruction);
9002 inst.instruction |= inst.operands[0].reg;
9003 inst.instruction |= inst.operands[1].reg << 3;
9004 }
9005 }
9006 else
9007 {
9008 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9009 BAD_HIREG);
9010 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9011
9012 inst.instruction = THUMB_OP16 (inst.instruction);
9013 inst.instruction |= inst.operands[0].reg;
9014 inst.instruction |= inst.operands[1].reg << 3;
9015 }
9016 }
9017
9018 static void
9019 do_t_pkhbt (void)
9020 {
9021 inst.instruction |= inst.operands[0].reg << 8;
9022 inst.instruction |= inst.operands[1].reg << 16;
9023 inst.instruction |= inst.operands[2].reg;
9024 if (inst.operands[3].present)
9025 {
9026 unsigned int val = inst.reloc.exp.X_add_number;
9027 constraint (inst.reloc.exp.X_op != O_constant,
9028 _("expression too complex"));
9029 inst.instruction |= (val & 0x1c) << 10;
9030 inst.instruction |= (val & 0x03) << 6;
9031 }
9032 }
9033
9034 static void
9035 do_t_pkhtb (void)
9036 {
9037 if (!inst.operands[3].present)
9038 inst.instruction &= ~0x00000020;
9039 do_t_pkhbt ();
9040 }
9041
9042 static void
9043 do_t_pld (void)
9044 {
9045 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9046 }
9047
9048 static void
9049 do_t_push_pop (void)
9050 {
9051 unsigned mask;
9052
9053 constraint (inst.operands[0].writeback,
9054 _("push/pop do not support {reglist}^"));
9055 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9056 _("expression too complex"));
9057
9058 mask = inst.operands[0].imm;
9059 if ((mask & ~0xff) == 0)
9060 inst.instruction = THUMB_OP16 (inst.instruction);
9061 else if ((inst.instruction == T_MNEM_push
9062 && (mask & ~0xff) == 1 << REG_LR)
9063 || (inst.instruction == T_MNEM_pop
9064 && (mask & ~0xff) == 1 << REG_PC))
9065 {
9066 inst.instruction = THUMB_OP16 (inst.instruction);
9067 inst.instruction |= THUMB_PP_PC_LR;
9068 mask &= 0xff;
9069 }
9070 else if (unified_syntax)
9071 {
9072 if (mask & (1 << 13))
9073 inst.error = _("SP not allowed in register list");
9074 if (inst.instruction == T_MNEM_push)
9075 {
9076 if (mask & (1 << 15))
9077 inst.error = _("PC not allowed in register list");
9078 }
9079 else
9080 {
9081 if (mask & (1 << 14)
9082 && mask & (1 << 15))
9083 inst.error = _("LR and PC should not both be in register list");
9084 }
9085 if ((mask & (mask - 1)) == 0)
9086 {
9087 /* Single register push/pop implemented as str/ldr. */
9088 if (inst.instruction == T_MNEM_push)
9089 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9090 else
9091 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9092 mask = ffs(mask) - 1;
9093 mask <<= 12;
9094 }
9095 else
9096 inst.instruction = THUMB_OP32 (inst.instruction);
9097 }
9098 else
9099 {
9100 inst.error = _("invalid register list to push/pop instruction");
9101 return;
9102 }
9103
9104 inst.instruction |= mask;
9105 }
9106
9107 static void
9108 do_t_rbit (void)
9109 {
9110 inst.instruction |= inst.operands[0].reg << 8;
9111 inst.instruction |= inst.operands[1].reg << 16;
9112 }
9113
9114 static void
9115 do_t_rev (void)
9116 {
9117 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9118 && inst.size_req != 4)
9119 {
9120 inst.instruction = THUMB_OP16 (inst.instruction);
9121 inst.instruction |= inst.operands[0].reg;
9122 inst.instruction |= inst.operands[1].reg << 3;
9123 }
9124 else if (unified_syntax)
9125 {
9126 inst.instruction = THUMB_OP32 (inst.instruction);
9127 inst.instruction |= inst.operands[0].reg << 8;
9128 inst.instruction |= inst.operands[1].reg << 16;
9129 inst.instruction |= inst.operands[1].reg;
9130 }
9131 else
9132 inst.error = BAD_HIREG;
9133 }
9134
9135 static void
9136 do_t_rsb (void)
9137 {
9138 int Rd, Rs;
9139
9140 Rd = inst.operands[0].reg;
9141 Rs = (inst.operands[1].present
9142 ? inst.operands[1].reg /* Rd, Rs, foo */
9143 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9144
9145 inst.instruction |= Rd << 8;
9146 inst.instruction |= Rs << 16;
9147 if (!inst.operands[2].isreg)
9148 {
9149 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9150 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9151 }
9152 else
9153 encode_thumb32_shifted_operand (2);
9154 }
9155
9156 static void
9157 do_t_setend (void)
9158 {
9159 constraint (current_it_mask, BAD_NOT_IT);
9160 if (inst.operands[0].imm)
9161 inst.instruction |= 0x8;
9162 }
9163
9164 static void
9165 do_t_shift (void)
9166 {
9167 if (!inst.operands[1].present)
9168 inst.operands[1].reg = inst.operands[0].reg;
9169
9170 if (unified_syntax)
9171 {
9172 bfd_boolean narrow;
9173 int shift_kind;
9174
9175 switch (inst.instruction)
9176 {
9177 case T_MNEM_asr:
9178 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9179 case T_MNEM_lsl:
9180 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9181 case T_MNEM_lsr:
9182 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9183 case T_MNEM_ror:
9184 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9185 default: abort ();
9186 }
9187
9188 if (THUMB_SETS_FLAGS (inst.instruction))
9189 narrow = (current_it_mask == 0);
9190 else
9191 narrow = (current_it_mask != 0);
9192 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9193 narrow = FALSE;
9194 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9195 narrow = FALSE;
9196 if (inst.operands[2].isreg
9197 && (inst.operands[1].reg != inst.operands[0].reg
9198 || inst.operands[2].reg > 7))
9199 narrow = FALSE;
9200 if (inst.size_req == 4)
9201 narrow = FALSE;
9202
9203 if (!narrow)
9204 {
9205 if (inst.operands[2].isreg)
9206 {
9207 inst.instruction = THUMB_OP32 (inst.instruction);
9208 inst.instruction |= inst.operands[0].reg << 8;
9209 inst.instruction |= inst.operands[1].reg << 16;
9210 inst.instruction |= inst.operands[2].reg;
9211 }
9212 else
9213 {
9214 inst.operands[1].shifted = 1;
9215 inst.operands[1].shift_kind = shift_kind;
9216 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9217 ? T_MNEM_movs : T_MNEM_mov);
9218 inst.instruction |= inst.operands[0].reg << 8;
9219 encode_thumb32_shifted_operand (1);
9220 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9221 inst.reloc.type = BFD_RELOC_UNUSED;
9222 }
9223 }
9224 else
9225 {
9226 if (inst.operands[2].isreg)
9227 {
9228 switch (shift_kind)
9229 {
9230 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9231 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9232 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9233 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9234 default: abort ();
9235 }
9236
9237 inst.instruction |= inst.operands[0].reg;
9238 inst.instruction |= inst.operands[2].reg << 3;
9239 }
9240 else
9241 {
9242 switch (shift_kind)
9243 {
9244 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9245 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9246 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9247 default: abort ();
9248 }
9249 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9250 inst.instruction |= inst.operands[0].reg;
9251 inst.instruction |= inst.operands[1].reg << 3;
9252 }
9253 }
9254 }
9255 else
9256 {
9257 constraint (inst.operands[0].reg > 7
9258 || inst.operands[1].reg > 7, BAD_HIREG);
9259 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9260
9261 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9262 {
9263 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9264 constraint (inst.operands[0].reg != inst.operands[1].reg,
9265 _("source1 and dest must be same register"));
9266
9267 switch (inst.instruction)
9268 {
9269 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9270 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9271 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9272 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9273 default: abort ();
9274 }
9275
9276 inst.instruction |= inst.operands[0].reg;
9277 inst.instruction |= inst.operands[2].reg << 3;
9278 }
9279 else
9280 {
9281 switch (inst.instruction)
9282 {
9283 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9284 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9285 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9286 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9287 default: abort ();
9288 }
9289 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9290 inst.instruction |= inst.operands[0].reg;
9291 inst.instruction |= inst.operands[1].reg << 3;
9292 }
9293 }
9294 }
9295
9296 static void
9297 do_t_simd (void)
9298 {
9299 inst.instruction |= inst.operands[0].reg << 8;
9300 inst.instruction |= inst.operands[1].reg << 16;
9301 inst.instruction |= inst.operands[2].reg;
9302 }
9303
9304 static void
9305 do_t_smc (void)
9306 {
9307 unsigned int value = inst.reloc.exp.X_add_number;
9308 constraint (inst.reloc.exp.X_op != O_constant,
9309 _("expression too complex"));
9310 inst.reloc.type = BFD_RELOC_UNUSED;
9311 inst.instruction |= (value & 0xf000) >> 12;
9312 inst.instruction |= (value & 0x0ff0);
9313 inst.instruction |= (value & 0x000f) << 16;
9314 }
9315
9316 static void
9317 do_t_ssat (void)
9318 {
9319 inst.instruction |= inst.operands[0].reg << 8;
9320 inst.instruction |= inst.operands[1].imm - 1;
9321 inst.instruction |= inst.operands[2].reg << 16;
9322
9323 if (inst.operands[3].present)
9324 {
9325 constraint (inst.reloc.exp.X_op != O_constant,
9326 _("expression too complex"));
9327
9328 if (inst.reloc.exp.X_add_number != 0)
9329 {
9330 if (inst.operands[3].shift_kind == SHIFT_ASR)
9331 inst.instruction |= 0x00200000; /* sh bit */
9332 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9333 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9334 }
9335 inst.reloc.type = BFD_RELOC_UNUSED;
9336 }
9337 }
9338
9339 static void
9340 do_t_ssat16 (void)
9341 {
9342 inst.instruction |= inst.operands[0].reg << 8;
9343 inst.instruction |= inst.operands[1].imm - 1;
9344 inst.instruction |= inst.operands[2].reg << 16;
9345 }
9346
9347 static void
9348 do_t_strex (void)
9349 {
9350 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9351 || inst.operands[2].postind || inst.operands[2].writeback
9352 || inst.operands[2].immisreg || inst.operands[2].shifted
9353 || inst.operands[2].negative,
9354 BAD_ADDR_MODE);
9355
9356 inst.instruction |= inst.operands[0].reg << 8;
9357 inst.instruction |= inst.operands[1].reg << 12;
9358 inst.instruction |= inst.operands[2].reg << 16;
9359 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9360 }
9361
9362 static void
9363 do_t_strexd (void)
9364 {
9365 if (!inst.operands[2].present)
9366 inst.operands[2].reg = inst.operands[1].reg + 1;
9367
9368 constraint (inst.operands[0].reg == inst.operands[1].reg
9369 || inst.operands[0].reg == inst.operands[2].reg
9370 || inst.operands[0].reg == inst.operands[3].reg
9371 || inst.operands[1].reg == inst.operands[2].reg,
9372 BAD_OVERLAP);
9373
9374 inst.instruction |= inst.operands[0].reg;
9375 inst.instruction |= inst.operands[1].reg << 12;
9376 inst.instruction |= inst.operands[2].reg << 8;
9377 inst.instruction |= inst.operands[3].reg << 16;
9378 }
9379
9380 static void
9381 do_t_sxtah (void)
9382 {
9383 inst.instruction |= inst.operands[0].reg << 8;
9384 inst.instruction |= inst.operands[1].reg << 16;
9385 inst.instruction |= inst.operands[2].reg;
9386 inst.instruction |= inst.operands[3].imm << 4;
9387 }
9388
9389 static void
9390 do_t_sxth (void)
9391 {
9392 if (inst.instruction <= 0xffff && inst.size_req != 4
9393 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9394 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9395 {
9396 inst.instruction = THUMB_OP16 (inst.instruction);
9397 inst.instruction |= inst.operands[0].reg;
9398 inst.instruction |= inst.operands[1].reg << 3;
9399 }
9400 else if (unified_syntax)
9401 {
9402 if (inst.instruction <= 0xffff)
9403 inst.instruction = THUMB_OP32 (inst.instruction);
9404 inst.instruction |= inst.operands[0].reg << 8;
9405 inst.instruction |= inst.operands[1].reg;
9406 inst.instruction |= inst.operands[2].imm << 4;
9407 }
9408 else
9409 {
9410 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9411 _("Thumb encoding does not support rotation"));
9412 constraint (1, BAD_HIREG);
9413 }
9414 }
9415
9416 static void
9417 do_t_swi (void)
9418 {
9419 inst.reloc.type = BFD_RELOC_ARM_SWI;
9420 }
9421
9422 static void
9423 do_t_tb (void)
9424 {
9425 int half;
9426
9427 half = (inst.instruction & 0x10) != 0;
9428 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9429 constraint (inst.operands[0].immisreg,
9430 _("instruction requires register index"));
9431 constraint (inst.operands[0].imm == 15,
9432 _("PC is not a valid index register"));
9433 constraint (!half && inst.operands[0].shifted,
9434 _("instruction does not allow shifted index"));
9435 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9436 }
9437
9438 static void
9439 do_t_usat (void)
9440 {
9441 inst.instruction |= inst.operands[0].reg << 8;
9442 inst.instruction |= inst.operands[1].imm;
9443 inst.instruction |= inst.operands[2].reg << 16;
9444
9445 if (inst.operands[3].present)
9446 {
9447 constraint (inst.reloc.exp.X_op != O_constant,
9448 _("expression too complex"));
9449 if (inst.reloc.exp.X_add_number != 0)
9450 {
9451 if (inst.operands[3].shift_kind == SHIFT_ASR)
9452 inst.instruction |= 0x00200000; /* sh bit */
9453
9454 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9455 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9456 }
9457 inst.reloc.type = BFD_RELOC_UNUSED;
9458 }
9459 }
9460
9461 static void
9462 do_t_usat16 (void)
9463 {
9464 inst.instruction |= inst.operands[0].reg << 8;
9465 inst.instruction |= inst.operands[1].imm;
9466 inst.instruction |= inst.operands[2].reg << 16;
9467 }
9468
9469 /* Neon instruction encoder helpers. */
9470
9471 /* Encodings for the different types for various Neon opcodes. */
9472
9473 /* An "invalid" code for the following tables. */
9474 #define N_INV -1u
9475
9476 struct neon_tab_entry
9477 {
9478 unsigned integer;
9479 unsigned float_or_poly;
9480 unsigned scalar_or_imm;
9481 };
9482
9483 /* Map overloaded Neon opcodes to their respective encodings. */
9484 #define NEON_ENC_TAB \
9485 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9486 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9487 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9488 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9489 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9490 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9491 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9492 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9493 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9494 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9495 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9496 /* Register variants of the following two instructions are encoded as
9497 vcge / vcgt with the operands reversed. */ \
9498 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9499 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9500 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9501 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9502 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9503 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9504 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9505 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9506 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9507 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9508 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9509 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9510 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9511 X(vshl, 0x0000400, N_INV, 0x0800510), \
9512 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9513 X(vand, 0x0000110, N_INV, 0x0800030), \
9514 X(vbic, 0x0100110, N_INV, 0x0800030), \
9515 X(veor, 0x1000110, N_INV, N_INV), \
9516 X(vorn, 0x0300110, N_INV, 0x0800010), \
9517 X(vorr, 0x0200110, N_INV, 0x0800010), \
9518 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9519 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9520 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9521 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9522 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9523 X(vst1, 0x0000000, 0x0800000, N_INV), \
9524 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9525 X(vst2, 0x0000100, 0x0800100, N_INV), \
9526 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9527 X(vst3, 0x0000200, 0x0800200, N_INV), \
9528 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9529 X(vst4, 0x0000300, 0x0800300, N_INV), \
9530 X(vmovn, 0x1b20200, N_INV, N_INV), \
9531 X(vtrn, 0x1b20080, N_INV, N_INV), \
9532 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9533 X(vqmovun, 0x1b20240, N_INV, N_INV)
9534
9535 enum neon_opc
9536 {
9537 #define X(OPC,I,F,S) N_MNEM_##OPC
9538 NEON_ENC_TAB
9539 #undef X
9540 };
9541
9542 static const struct neon_tab_entry neon_enc_tab[] =
9543 {
9544 #define X(OPC,I,F,S) { (I), (F), (S) }
9545 NEON_ENC_TAB
9546 #undef X
9547 };
9548
9549 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9550 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9551 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9552 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9553 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9554 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9555 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9556 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9557 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9558
9559 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9560 shapes which an instruction can accept. The following mnemonic characters
9561 are used in the tag names for this enumeration:
9562
9563 D - Neon D<n> register
9564 Q - Neon Q<n> register
9565 I - Immediate
9566 S - Scalar
9567 R - ARM register
9568 L - D<n> register list
9569 */
9570
9571 enum neon_shape
9572 {
9573 NS_DDD_QQQ,
9574 NS_DDD,
9575 NS_QQQ,
9576 NS_DDI_QQI,
9577 NS_DDI,
9578 NS_QQI,
9579 NS_DDS_QQS,
9580 NS_DDS,
9581 NS_QQS,
9582 NS_DD_QQ,
9583 NS_DD,
9584 NS_QQ,
9585 NS_DS_QS,
9586 NS_DS,
9587 NS_QS,
9588 NS_DR_QR,
9589 NS_DR,
9590 NS_QR,
9591 NS_DI_QI,
9592 NS_DI,
9593 NS_QI,
9594 NS_DLD,
9595 NS_DQ,
9596 NS_QD,
9597 NS_DQI,
9598 NS_QDI,
9599 NS_QDD,
9600 NS_QDS,
9601 NS_QQD,
9602 NS_DQQ,
9603 NS_DDDI_QQQI,
9604 NS_DDDI,
9605 NS_QQQI,
9606 NS_IGNORE
9607 };
9608
9609 /* Bit masks used in type checking given instructions.
9610 'N_EQK' means the type must be the same as (or based on in some way) the key
9611 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9612 set, various other bits can be set as well in order to modify the meaning of
9613 the type constraint. */
9614
9615 enum neon_type_mask
9616 {
9617 N_S8 = 0x000001,
9618 N_S16 = 0x000002,
9619 N_S32 = 0x000004,
9620 N_S64 = 0x000008,
9621 N_U8 = 0x000010,
9622 N_U16 = 0x000020,
9623 N_U32 = 0x000040,
9624 N_U64 = 0x000080,
9625 N_I8 = 0x000100,
9626 N_I16 = 0x000200,
9627 N_I32 = 0x000400,
9628 N_I64 = 0x000800,
9629 N_8 = 0x001000,
9630 N_16 = 0x002000,
9631 N_32 = 0x004000,
9632 N_64 = 0x008000,
9633 N_P8 = 0x010000,
9634 N_P16 = 0x020000,
9635 N_F32 = 0x040000,
9636 N_KEY = 0x080000, /* key element (main type specifier). */
9637 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9638 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9639 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9640 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9641 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9642 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9643 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9644 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9645 N_UTYP = 0,
9646 N_MAX_NONSPECIAL = N_F32
9647 };
9648
9649 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9650
9651 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9652 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9653 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9654 #define N_SUF_32 (N_SU_32 | N_F32)
9655 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9656 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9657
9658 /* Pass this as the first type argument to neon_check_type to ignore types
9659 altogether. */
9660 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9661
9662 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9663 specific shape when there are two alternatives. For non-polymorphic shapes,
9664 checking is done during operand parsing, so is not implemented here. */
9665
9666 static enum neon_shape
9667 neon_check_shape (enum neon_shape req)
9668 {
9669 #define RR(X) (inst.operands[(X)].isreg)
9670 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9671 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9672 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9673 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9674
9675 /* Fix missing optional operands. FIXME: we don't know at this point how
9676 many arguments we should have, so this makes the assumption that we have
9677 > 1. This is true of all current Neon opcodes, I think, but may not be
9678 true in the future. */
9679 if (!inst.operands[1].present)
9680 inst.operands[1] = inst.operands[0];
9681
9682 switch (req)
9683 {
9684 case NS_DDD_QQQ:
9685 {
9686 if (RD(0) && RD(1) && RD(2))
9687 return NS_DDD;
9688 else if (RQ(0) && RQ(1) && RQ(2))
9689 return NS_QQQ;
9690 else
9691 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9692 "operands"));
9693 }
9694 break;
9695
9696 case NS_DDI_QQI:
9697 {
9698 if (RD(0) && RD(1) && IM(2))
9699 return NS_DDI;
9700 else if (RQ(0) && RQ(1) && IM(2))
9701 return NS_QQI;
9702 else
9703 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9704 "operands"));
9705 }
9706 break;
9707
9708 case NS_DDDI_QQQI:
9709 {
9710 if (RD(0) && RD(1) && RD(2) && IM(3))
9711 return NS_DDDI;
9712 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9713 return NS_QQQI;
9714 else
9715 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9716 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9717 }
9718 break;
9719
9720 case NS_DDS_QQS:
9721 {
9722 if (RD(0) && RD(1) && SC(2))
9723 return NS_DDS;
9724 else if (RQ(0) && RQ(1) && SC(2))
9725 return NS_QQS;
9726 else
9727 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9728 "operands"));
9729 }
9730 break;
9731
9732 case NS_DD_QQ:
9733 {
9734 if (RD(0) && RD(1))
9735 return NS_DD;
9736 else if (RQ(0) && RQ(1))
9737 return NS_QQ;
9738 else
9739 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9740 }
9741 break;
9742
9743 case NS_DS_QS:
9744 {
9745 if (RD(0) && SC(1))
9746 return NS_DS;
9747 else if (RQ(0) && SC(1))
9748 return NS_QS;
9749 else
9750 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9751 }
9752 break;
9753
9754 case NS_DR_QR:
9755 {
9756 if (RD(0) && RR(1))
9757 return NS_DR;
9758 else if (RQ(0) && RR(1))
9759 return NS_QR;
9760 else
9761 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9762 }
9763 break;
9764
9765 case NS_DI_QI:
9766 {
9767 if (RD(0) && IM(1))
9768 return NS_DI;
9769 else if (RQ(0) && IM(1))
9770 return NS_QI;
9771 else
9772 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9773 }
9774 break;
9775
9776 default:
9777 abort ();
9778 }
9779
9780 return req;
9781 #undef RR
9782 #undef RD
9783 #undef RQ
9784 #undef IM
9785 #undef SC
9786 }
9787
9788 static void
9789 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9790 unsigned *g_size)
9791 {
9792 /* Allow modification to be made to types which are constrained to be
9793 based on the key element, based on bits set alongside N_EQK. */
9794 if ((typebits & N_EQK) != 0)
9795 {
9796 if ((typebits & N_HLF) != 0)
9797 *g_size /= 2;
9798 else if ((typebits & N_DBL) != 0)
9799 *g_size *= 2;
9800 if ((typebits & N_SGN) != 0)
9801 *g_type = NT_signed;
9802 else if ((typebits & N_UNS) != 0)
9803 *g_type = NT_unsigned;
9804 else if ((typebits & N_INT) != 0)
9805 *g_type = NT_integer;
9806 else if ((typebits & N_FLT) != 0)
9807 *g_type = NT_float;
9808 else if ((typebits & N_SIZ) != 0)
9809 *g_type = NT_untyped;
9810 }
9811 }
9812
9813 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9814 operand type, i.e. the single type specified in a Neon instruction when it
9815 is the only one given. */
9816
9817 static struct neon_type_el
9818 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9819 {
9820 struct neon_type_el dest = *key;
9821
9822 assert ((thisarg & N_EQK) != 0);
9823
9824 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9825
9826 return dest;
9827 }
9828
9829 /* Convert Neon type and size into compact bitmask representation. */
9830
9831 static enum neon_type_mask
9832 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9833 {
9834 switch (type)
9835 {
9836 case NT_untyped:
9837 switch (size)
9838 {
9839 case 8: return N_8;
9840 case 16: return N_16;
9841 case 32: return N_32;
9842 case 64: return N_64;
9843 default: ;
9844 }
9845 break;
9846
9847 case NT_integer:
9848 switch (size)
9849 {
9850 case 8: return N_I8;
9851 case 16: return N_I16;
9852 case 32: return N_I32;
9853 case 64: return N_I64;
9854 default: ;
9855 }
9856 break;
9857
9858 case NT_float:
9859 if (size == 32)
9860 return N_F32;
9861 break;
9862
9863 case NT_poly:
9864 switch (size)
9865 {
9866 case 8: return N_P8;
9867 case 16: return N_P16;
9868 default: ;
9869 }
9870 break;
9871
9872 case NT_signed:
9873 switch (size)
9874 {
9875 case 8: return N_S8;
9876 case 16: return N_S16;
9877 case 32: return N_S32;
9878 case 64: return N_S64;
9879 default: ;
9880 }
9881 break;
9882
9883 case NT_unsigned:
9884 switch (size)
9885 {
9886 case 8: return N_U8;
9887 case 16: return N_U16;
9888 case 32: return N_U32;
9889 case 64: return N_U64;
9890 default: ;
9891 }
9892 break;
9893
9894 default: ;
9895 }
9896
9897 return N_UTYP;
9898 }
9899
9900 /* Convert compact Neon bitmask type representation to a type and size. Only
9901 handles the case where a single bit is set in the mask. */
9902
9903 static int
9904 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9905 enum neon_type_mask mask)
9906 {
9907 if ((mask & N_EQK) != 0)
9908 return FAIL;
9909
9910 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9911 *size = 8;
9912 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9913 *size = 16;
9914 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9915 *size = 32;
9916 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9917 *size = 64;
9918 else
9919 return FAIL;
9920
9921 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9922 *type = NT_signed;
9923 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9924 *type = NT_unsigned;
9925 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9926 *type = NT_integer;
9927 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9928 *type = NT_untyped;
9929 else if ((mask & (N_P8 | N_P16)) != 0)
9930 *type = NT_poly;
9931 else if ((mask & N_F32) != 0)
9932 *type = NT_float;
9933 else
9934 return FAIL;
9935
9936 return SUCCESS;
9937 }
9938
9939 /* Modify a bitmask of allowed types. This is only needed for type
9940 relaxation. */
9941
9942 static unsigned
9943 modify_types_allowed (unsigned allowed, unsigned mods)
9944 {
9945 unsigned size;
9946 enum neon_el_type type;
9947 unsigned destmask;
9948 int i;
9949
9950 destmask = 0;
9951
9952 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9953 {
9954 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9955 {
9956 neon_modify_type_size (mods, &type, &size);
9957 destmask |= type_chk_of_el_type (type, size);
9958 }
9959 }
9960
9961 return destmask;
9962 }
9963
9964 /* Check type and return type classification.
9965 The manual states (paraphrase): If one datatype is given, it indicates the
9966 type given in:
9967 - the second operand, if there is one
9968 - the operand, if there is no second operand
9969 - the result, if there are no operands.
9970 This isn't quite good enough though, so we use a concept of a "key" datatype
9971 which is set on a per-instruction basis, which is the one which matters when
9972 only one data type is written.
9973 Note: this function has side-effects (e.g. filling in missing operands). All
9974 Neon instructions should call it before performing bit encoding.
9975 */
9976
9977 static struct neon_type_el
9978 neon_check_type (unsigned els, enum neon_shape ns, ...)
9979 {
9980 va_list ap;
9981 unsigned i, pass, key_el = 0;
9982 unsigned types[NEON_MAX_TYPE_ELS];
9983 enum neon_el_type k_type = NT_invtype;
9984 unsigned k_size = -1u;
9985 struct neon_type_el badtype = {NT_invtype, -1};
9986 unsigned key_allowed = 0;
9987
9988 /* Optional registers in Neon instructions are always (not) in operand 1.
9989 Fill in the missing operand here, if it was omitted. */
9990 if (els > 1 && !inst.operands[1].present)
9991 inst.operands[1] = inst.operands[0];
9992
9993 /* Suck up all the varargs. */
9994 va_start (ap, ns);
9995 for (i = 0; i < els; i++)
9996 {
9997 unsigned thisarg = va_arg (ap, unsigned);
9998 if (thisarg == N_IGNORE_TYPE)
9999 {
10000 va_end (ap);
10001 return badtype;
10002 }
10003 types[i] = thisarg;
10004 if ((thisarg & N_KEY) != 0)
10005 key_el = i;
10006 }
10007 va_end (ap);
10008
10009 if (inst.vectype.elems > 0)
10010 for (i = 0; i < els; i++)
10011 if (inst.operands[i].vectype.type != NT_invtype)
10012 {
10013 first_error (_("types specified in both the mnemonic and operands"));
10014 return badtype;
10015 }
10016
10017 /* Duplicate inst.vectype elements here as necessary.
10018 FIXME: No idea if this is exactly the same as the ARM assembler,
10019 particularly when an insn takes one register and one non-register
10020 operand. */
10021 if (inst.vectype.elems == 1 && els > 1)
10022 {
10023 unsigned j;
10024 inst.vectype.elems = els;
10025 inst.vectype.el[key_el] = inst.vectype.el[0];
10026 for (j = 0; j < els; j++)
10027 if (j != key_el)
10028 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10029 types[j]);
10030 }
10031 else if (inst.vectype.elems == 0 && els > 0)
10032 {
10033 unsigned j;
10034 /* No types were given after the mnemonic, so look for types specified
10035 after each operand. We allow some flexibility here; as long as the
10036 "key" operand has a type, we can infer the others. */
10037 for (j = 0; j < els; j++)
10038 if (inst.operands[j].vectype.type != NT_invtype)
10039 inst.vectype.el[j] = inst.operands[j].vectype;
10040
10041 if (inst.operands[key_el].vectype.type != NT_invtype)
10042 {
10043 for (j = 0; j < els; j++)
10044 if (inst.operands[j].vectype.type == NT_invtype)
10045 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10046 types[j]);
10047 }
10048 else
10049 {
10050 first_error (_("operand types can't be inferred"));
10051 return badtype;
10052 }
10053 }
10054 else if (inst.vectype.elems != els)
10055 {
10056 first_error (_("type specifier has the wrong number of parts"));
10057 return badtype;
10058 }
10059
10060 for (pass = 0; pass < 2; pass++)
10061 {
10062 for (i = 0; i < els; i++)
10063 {
10064 unsigned thisarg = types[i];
10065 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10066 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10067 enum neon_el_type g_type = inst.vectype.el[i].type;
10068 unsigned g_size = inst.vectype.el[i].size;
10069
10070 /* Decay more-specific signed & unsigned types to sign-insensitive
10071 integer types if sign-specific variants are unavailable. */
10072 if ((g_type == NT_signed || g_type == NT_unsigned)
10073 && (types_allowed & N_SU_ALL) == 0)
10074 g_type = NT_integer;
10075
10076 /* If only untyped args are allowed, decay any more specific types to
10077 them. Some instructions only care about signs for some element
10078 sizes, so handle that properly. */
10079 if ((g_size == 8 && (types_allowed & N_8) != 0)
10080 || (g_size == 16 && (types_allowed & N_16) != 0)
10081 || (g_size == 32 && (types_allowed & N_32) != 0)
10082 || (g_size == 64 && (types_allowed & N_64) != 0))
10083 g_type = NT_untyped;
10084
10085 if (pass == 0)
10086 {
10087 if ((thisarg & N_KEY) != 0)
10088 {
10089 k_type = g_type;
10090 k_size = g_size;
10091 key_allowed = thisarg & ~N_KEY;
10092 }
10093 }
10094 else
10095 {
10096 if ((thisarg & N_EQK) == 0)
10097 {
10098 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10099
10100 if ((given_type & types_allowed) == 0)
10101 {
10102 first_error (_("bad type in Neon instruction"));
10103 return badtype;
10104 }
10105 }
10106 else
10107 {
10108 enum neon_el_type mod_k_type = k_type;
10109 unsigned mod_k_size = k_size;
10110 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10111 if (g_type != mod_k_type || g_size != mod_k_size)
10112 {
10113 first_error (_("inconsistent types in Neon instruction"));
10114 return badtype;
10115 }
10116 }
10117 }
10118 }
10119 }
10120
10121 return inst.vectype.el[key_el];
10122 }
10123
10124 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10125 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10126
10127 static unsigned
10128 neon_dp_fixup (unsigned i)
10129 {
10130 if (thumb_mode)
10131 {
10132 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10133 if (i & (1 << 24))
10134 i |= 1 << 28;
10135
10136 i &= ~(1 << 24);
10137
10138 i |= 0xef000000;
10139 }
10140 else
10141 i |= 0xf2000000;
10142
10143 return i;
10144 }
10145
10146 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10147 (0, 1, 2, 3). */
10148
10149 static unsigned
10150 neon_logbits (unsigned x)
10151 {
10152 return ffs (x) - 4;
10153 }
10154
10155 #define LOW4(R) ((R) & 0xf)
10156 #define HI1(R) (((R) >> 4) & 1)
10157
10158 /* Encode insns with bit pattern:
10159
10160 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10161 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10162
10163 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10164 different meaning for some instruction. */
10165
10166 static void
10167 neon_three_same (int isquad, int ubit, int size)
10168 {
10169 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10170 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10171 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10172 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10173 inst.instruction |= LOW4 (inst.operands[2].reg);
10174 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10175 inst.instruction |= (isquad != 0) << 6;
10176 inst.instruction |= (ubit != 0) << 24;
10177 if (size != -1)
10178 inst.instruction |= neon_logbits (size) << 20;
10179
10180 inst.instruction = neon_dp_fixup (inst.instruction);
10181 }
10182
10183 /* Encode instructions of the form:
10184
10185 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10186 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10187
10188 Don't write size if SIZE == -1. */
10189
10190 static void
10191 neon_two_same (int qbit, int ubit, int size)
10192 {
10193 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10194 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10195 inst.instruction |= LOW4 (inst.operands[1].reg);
10196 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10197 inst.instruction |= (qbit != 0) << 6;
10198 inst.instruction |= (ubit != 0) << 24;
10199
10200 if (size != -1)
10201 inst.instruction |= neon_logbits (size) << 18;
10202
10203 inst.instruction = neon_dp_fixup (inst.instruction);
10204 }
10205
10206 /* Neon instruction encoders, in approximate order of appearance. */
10207
10208 static void
10209 do_neon_dyadic_i_su (void)
10210 {
10211 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10212 struct neon_type_el et = neon_check_type (3, rs,
10213 N_EQK, N_EQK, N_SU_32 | N_KEY);
10214 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10215 }
10216
10217 static void
10218 do_neon_dyadic_i64_su (void)
10219 {
10220 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10221 struct neon_type_el et = neon_check_type (3, rs,
10222 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10223 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10224 }
10225
10226 static void
10227 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10228 unsigned immbits)
10229 {
10230 unsigned size = et.size >> 3;
10231 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10232 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10233 inst.instruction |= LOW4 (inst.operands[1].reg);
10234 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10235 inst.instruction |= (isquad != 0) << 6;
10236 inst.instruction |= immbits << 16;
10237 inst.instruction |= (size >> 3) << 7;
10238 inst.instruction |= (size & 0x7) << 19;
10239 if (write_ubit)
10240 inst.instruction |= (uval != 0) << 24;
10241
10242 inst.instruction = neon_dp_fixup (inst.instruction);
10243 }
10244
10245 static void
10246 do_neon_shl_imm (void)
10247 {
10248 if (!inst.operands[2].isreg)
10249 {
10250 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10251 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10252 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10253 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10254 }
10255 else
10256 {
10257 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10258 struct neon_type_el et = neon_check_type (3, rs,
10259 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10260 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10261 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10262 }
10263 }
10264
10265 static void
10266 do_neon_qshl_imm (void)
10267 {
10268 if (!inst.operands[2].isreg)
10269 {
10270 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10271 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10272 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10273 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10274 inst.operands[2].imm);
10275 }
10276 else
10277 {
10278 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10279 struct neon_type_el et = neon_check_type (3, rs,
10280 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10281 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10282 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10283 }
10284 }
10285
10286 static int
10287 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10288 {
10289 /* Handle .I8 and .I64 as pseudo-instructions. */
10290 switch (size)
10291 {
10292 case 8:
10293 /* Unfortunately, this will make everything apart from zero out-of-range.
10294 FIXME is this the intended semantics? There doesn't seem much point in
10295 accepting .I8 if so. */
10296 immediate |= immediate << 8;
10297 size = 16;
10298 break;
10299 case 64:
10300 /* Similarly, anything other than zero will be replicated in bits [63:32],
10301 which probably isn't want we want if we specified .I64. */
10302 if (immediate != 0)
10303 goto bad_immediate;
10304 size = 32;
10305 break;
10306 default: ;
10307 }
10308
10309 if (immediate == (immediate & 0x000000ff))
10310 {
10311 *immbits = immediate;
10312 return (size == 16) ? 0x9 : 0x1;
10313 }
10314 else if (immediate == (immediate & 0x0000ff00))
10315 {
10316 *immbits = immediate >> 8;
10317 return (size == 16) ? 0xb : 0x3;
10318 }
10319 else if (immediate == (immediate & 0x00ff0000))
10320 {
10321 *immbits = immediate >> 16;
10322 return 0x5;
10323 }
10324 else if (immediate == (immediate & 0xff000000))
10325 {
10326 *immbits = immediate >> 24;
10327 return 0x7;
10328 }
10329
10330 bad_immediate:
10331 first_error (_("immediate value out of range"));
10332 return FAIL;
10333 }
10334
10335 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10336 A, B, C, D. */
10337
10338 static int
10339 neon_bits_same_in_bytes (unsigned imm)
10340 {
10341 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10342 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10343 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10344 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10345 }
10346
10347 /* For immediate of above form, return 0bABCD. */
10348
10349 static unsigned
10350 neon_squash_bits (unsigned imm)
10351 {
10352 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10353 | ((imm & 0x01000000) >> 21);
10354 }
10355
10356 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10357
10358 static unsigned
10359 neon_qfloat_bits (unsigned imm)
10360 {
10361 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10362 }
10363
10364 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10365 the instruction. *OP is passed as the initial value of the op field, and
10366 may be set to a different value depending on the constant (i.e.
10367 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10368 MVN). */
10369
10370 static int
10371 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10372 int *op, int size, enum neon_el_type type)
10373 {
10374 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10375 {
10376 if (size != 32 || *op == 1)
10377 return FAIL;
10378 *immbits = neon_qfloat_bits (immlo);
10379 return 0xf;
10380 }
10381 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10382 && neon_bits_same_in_bytes (immlo))
10383 {
10384 /* Check this one first so we don't have to bother with immhi in later
10385 tests. */
10386 if (*op == 1)
10387 return FAIL;
10388 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10389 *op = 1;
10390 return 0xe;
10391 }
10392 else if (immhi != 0)
10393 return FAIL;
10394 else if (immlo == (immlo & 0x000000ff))
10395 {
10396 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10397 immediate. */
10398 if ((size != 8 && size != 16 && size != 32)
10399 || (size == 8 && *op == 1))
10400 return FAIL;
10401 *immbits = immlo;
10402 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10403 }
10404 else if (immlo == (immlo & 0x0000ff00))
10405 {
10406 if (size != 16 && size != 32)
10407 return FAIL;
10408 *immbits = immlo >> 8;
10409 return (size == 16) ? 0xa : 0x2;
10410 }
10411 else if (immlo == (immlo & 0x00ff0000))
10412 {
10413 if (size != 32)
10414 return FAIL;
10415 *immbits = immlo >> 16;
10416 return 0x4;
10417 }
10418 else if (immlo == (immlo & 0xff000000))
10419 {
10420 if (size != 32)
10421 return FAIL;
10422 *immbits = immlo >> 24;
10423 return 0x6;
10424 }
10425 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10426 {
10427 if (size != 32)
10428 return FAIL;
10429 *immbits = (immlo >> 8) & 0xff;
10430 return 0xc;
10431 }
10432 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10433 {
10434 if (size != 32)
10435 return FAIL;
10436 *immbits = (immlo >> 16) & 0xff;
10437 return 0xd;
10438 }
10439
10440 return FAIL;
10441 }
10442
10443 /* Write immediate bits [7:0] to the following locations:
10444
10445 |28/24|23 19|18 16|15 4|3 0|
10446 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10447
10448 This function is used by VMOV/VMVN/VORR/VBIC. */
10449
10450 static void
10451 neon_write_immbits (unsigned immbits)
10452 {
10453 inst.instruction |= immbits & 0xf;
10454 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10455 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10456 }
10457
10458 /* Invert low-order SIZE bits of XHI:XLO. */
10459
10460 static void
10461 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10462 {
10463 unsigned immlo = xlo ? *xlo : 0;
10464 unsigned immhi = xhi ? *xhi : 0;
10465
10466 switch (size)
10467 {
10468 case 8:
10469 immlo = (~immlo) & 0xff;
10470 break;
10471
10472 case 16:
10473 immlo = (~immlo) & 0xffff;
10474 break;
10475
10476 case 64:
10477 immhi = (~immhi) & 0xffffffff;
10478 /* fall through. */
10479
10480 case 32:
10481 immlo = (~immlo) & 0xffffffff;
10482 break;
10483
10484 default:
10485 abort ();
10486 }
10487
10488 if (xlo)
10489 *xlo = immlo;
10490
10491 if (xhi)
10492 *xhi = immhi;
10493 }
10494
10495 static void
10496 do_neon_logic (void)
10497 {
10498 if (inst.operands[2].present && inst.operands[2].isreg)
10499 {
10500 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10501 neon_check_type (3, rs, N_IGNORE_TYPE);
10502 /* U bit and size field were set as part of the bitmask. */
10503 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10504 neon_three_same (rs == NS_QQQ, 0, -1);
10505 }
10506 else
10507 {
10508 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10509 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10510 | N_I64 | N_F32);
10511 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10512 unsigned immbits;
10513 int cmode;
10514
10515 if (et.type == NT_invtype)
10516 return;
10517
10518 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10519
10520 switch (opcode)
10521 {
10522 case N_MNEM_vbic:
10523 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10524 et.size);
10525 break;
10526
10527 case N_MNEM_vorr:
10528 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10529 et.size);
10530 break;
10531
10532 case N_MNEM_vand:
10533 /* Pseudo-instruction for VBIC. */
10534 immbits = inst.operands[1].imm;
10535 neon_invert_size (&immbits, 0, et.size);
10536 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10537 break;
10538
10539 case N_MNEM_vorn:
10540 /* Pseudo-instruction for VORR. */
10541 immbits = inst.operands[1].imm;
10542 neon_invert_size (&immbits, 0, et.size);
10543 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10544 break;
10545
10546 default:
10547 abort ();
10548 }
10549
10550 if (cmode == FAIL)
10551 return;
10552
10553 inst.instruction |= (rs == NS_QI) << 6;
10554 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10555 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10556 inst.instruction |= cmode << 8;
10557 neon_write_immbits (immbits);
10558
10559 inst.instruction = neon_dp_fixup (inst.instruction);
10560 }
10561 }
10562
10563 static void
10564 do_neon_bitfield (void)
10565 {
10566 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10567 neon_check_type (3, rs, N_IGNORE_TYPE);
10568 neon_three_same (rs == NS_QQQ, 0, -1);
10569 }
10570
10571 static void
10572 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10573 unsigned destbits)
10574 {
10575 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10576 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10577 types | N_KEY);
10578 if (et.type == NT_float)
10579 {
10580 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10581 neon_three_same (rs == NS_QQQ, 0, -1);
10582 }
10583 else
10584 {
10585 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10586 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10587 }
10588 }
10589
10590 static void
10591 do_neon_dyadic_if_su (void)
10592 {
10593 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10594 }
10595
10596 static void
10597 do_neon_dyadic_if_su_d (void)
10598 {
10599 /* This version only allow D registers, but that constraint is enforced during
10600 operand parsing so we don't need to do anything extra here. */
10601 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10602 }
10603
10604 static void
10605 do_neon_dyadic_if_i (void)
10606 {
10607 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10608 }
10609
10610 static void
10611 do_neon_dyadic_if_i_d (void)
10612 {
10613 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10614 }
10615
10616 static void
10617 do_neon_addsub_if_i (void)
10618 {
10619 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10620 affected if we specify unsigned args. */
10621 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10622 }
10623
10624 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10625 result to be:
10626 V<op> A,B (A is operand 0, B is operand 2)
10627 to mean:
10628 V<op> A,B,A
10629 not:
10630 V<op> A,B,B
10631 so handle that case specially. */
10632
10633 static void
10634 neon_exchange_operands (void)
10635 {
10636 void *scratch = alloca (sizeof (inst.operands[0]));
10637 if (inst.operands[1].present)
10638 {
10639 /* Swap operands[1] and operands[2]. */
10640 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10641 inst.operands[1] = inst.operands[2];
10642 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10643 }
10644 else
10645 {
10646 inst.operands[1] = inst.operands[2];
10647 inst.operands[2] = inst.operands[0];
10648 }
10649 }
10650
10651 static void
10652 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10653 {
10654 if (inst.operands[2].isreg)
10655 {
10656 if (invert)
10657 neon_exchange_operands ();
10658 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10659 }
10660 else
10661 {
10662 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10663 struct neon_type_el et = neon_check_type (2, rs,
10664 N_EQK | N_SIZ, immtypes | N_KEY);
10665
10666 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10667 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10668 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10669 inst.instruction |= LOW4 (inst.operands[1].reg);
10670 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10671 inst.instruction |= (rs == NS_QQI) << 6;
10672 inst.instruction |= (et.type == NT_float) << 10;
10673 inst.instruction |= neon_logbits (et.size) << 18;
10674
10675 inst.instruction = neon_dp_fixup (inst.instruction);
10676 }
10677 }
10678
10679 static void
10680 do_neon_cmp (void)
10681 {
10682 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10683 }
10684
10685 static void
10686 do_neon_cmp_inv (void)
10687 {
10688 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10689 }
10690
10691 static void
10692 do_neon_ceq (void)
10693 {
10694 neon_compare (N_IF_32, N_IF_32, FALSE);
10695 }
10696
10697 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10698 scalars, which are encoded in 5 bits, M : Rm.
10699 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10700 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10701 index in M. */
10702
10703 static unsigned
10704 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10705 {
10706 unsigned regno = NEON_SCALAR_REG (scalar);
10707 unsigned elno = NEON_SCALAR_INDEX (scalar);
10708
10709 switch (elsize)
10710 {
10711 case 16:
10712 if (regno > 7 || elno > 3)
10713 goto bad_scalar;
10714 return regno | (elno << 3);
10715
10716 case 32:
10717 if (regno > 15 || elno > 1)
10718 goto bad_scalar;
10719 return regno | (elno << 4);
10720
10721 default:
10722 bad_scalar:
10723 first_error (_("scalar out of range for multiply instruction"));
10724 }
10725
10726 return 0;
10727 }
10728
10729 /* Encode multiply / multiply-accumulate scalar instructions. */
10730
10731 static void
10732 neon_mul_mac (struct neon_type_el et, int ubit)
10733 {
10734 unsigned scalar;
10735
10736 /* Give a more helpful error message if we have an invalid type. */
10737 if (et.type == NT_invtype)
10738 return;
10739
10740 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10741 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10742 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10743 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10744 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10745 inst.instruction |= LOW4 (scalar);
10746 inst.instruction |= HI1 (scalar) << 5;
10747 inst.instruction |= (et.type == NT_float) << 8;
10748 inst.instruction |= neon_logbits (et.size) << 20;
10749 inst.instruction |= (ubit != 0) << 24;
10750
10751 inst.instruction = neon_dp_fixup (inst.instruction);
10752 }
10753
10754 static void
10755 do_neon_mac_maybe_scalar (void)
10756 {
10757 if (inst.operands[2].isscalar)
10758 {
10759 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10760 struct neon_type_el et = neon_check_type (3, rs,
10761 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10762 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10763 neon_mul_mac (et, rs == NS_QQS);
10764 }
10765 else
10766 do_neon_dyadic_if_i ();
10767 }
10768
10769 static void
10770 do_neon_tst (void)
10771 {
10772 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10773 struct neon_type_el et = neon_check_type (3, rs,
10774 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10775 neon_three_same (rs == NS_QQQ, 0, et.size);
10776 }
10777
10778 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10779 same types as the MAC equivalents. The polynomial type for this instruction
10780 is encoded the same as the integer type. */
10781
10782 static void
10783 do_neon_mul (void)
10784 {
10785 if (inst.operands[2].isscalar)
10786 do_neon_mac_maybe_scalar ();
10787 else
10788 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10789 }
10790
10791 static void
10792 do_neon_qdmulh (void)
10793 {
10794 if (inst.operands[2].isscalar)
10795 {
10796 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10797 struct neon_type_el et = neon_check_type (3, rs,
10798 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10799 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10800 neon_mul_mac (et, rs == NS_QQS);
10801 }
10802 else
10803 {
10804 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10805 struct neon_type_el et = neon_check_type (3, rs,
10806 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10807 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10808 /* The U bit (rounding) comes from bit mask. */
10809 neon_three_same (rs == NS_QQQ, 0, et.size);
10810 }
10811 }
10812
10813 static void
10814 do_neon_fcmp_absolute (void)
10815 {
10816 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10817 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10818 /* Size field comes from bit mask. */
10819 neon_three_same (rs == NS_QQQ, 1, -1);
10820 }
10821
10822 static void
10823 do_neon_fcmp_absolute_inv (void)
10824 {
10825 neon_exchange_operands ();
10826 do_neon_fcmp_absolute ();
10827 }
10828
10829 static void
10830 do_neon_step (void)
10831 {
10832 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10833 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10834 neon_three_same (rs == NS_QQQ, 0, -1);
10835 }
10836
10837 static void
10838 do_neon_abs_neg (void)
10839 {
10840 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10841 struct neon_type_el et = neon_check_type (3, rs,
10842 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10843 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10844 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10845 inst.instruction |= LOW4 (inst.operands[1].reg);
10846 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10847 inst.instruction |= (rs == NS_QQ) << 6;
10848 inst.instruction |= (et.type == NT_float) << 10;
10849 inst.instruction |= neon_logbits (et.size) << 18;
10850
10851 inst.instruction = neon_dp_fixup (inst.instruction);
10852 }
10853
10854 static void
10855 do_neon_sli (void)
10856 {
10857 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10858 struct neon_type_el et = neon_check_type (2, rs,
10859 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10860 int imm = inst.operands[2].imm;
10861 constraint (imm < 0 || (unsigned)imm >= et.size,
10862 _("immediate out of range for insert"));
10863 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10864 }
10865
10866 static void
10867 do_neon_sri (void)
10868 {
10869 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10870 struct neon_type_el et = neon_check_type (2, rs,
10871 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10872 int imm = inst.operands[2].imm;
10873 constraint (imm < 1 || (unsigned)imm > et.size,
10874 _("immediate out of range for insert"));
10875 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10876 }
10877
10878 static void
10879 do_neon_qshlu_imm (void)
10880 {
10881 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10882 struct neon_type_el et = neon_check_type (2, rs,
10883 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10884 int imm = inst.operands[2].imm;
10885 constraint (imm < 0 || (unsigned)imm >= et.size,
10886 _("immediate out of range for shift"));
10887 /* Only encodes the 'U present' variant of the instruction.
10888 In this case, signed types have OP (bit 8) set to 0.
10889 Unsigned types have OP set to 1. */
10890 inst.instruction |= (et.type == NT_unsigned) << 8;
10891 /* The rest of the bits are the same as other immediate shifts. */
10892 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10893 }
10894
10895 static void
10896 do_neon_qmovn (void)
10897 {
10898 struct neon_type_el et = neon_check_type (2, NS_DQ,
10899 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10900 /* Saturating move where operands can be signed or unsigned, and the
10901 destination has the same signedness. */
10902 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10903 if (et.type == NT_unsigned)
10904 inst.instruction |= 0xc0;
10905 else
10906 inst.instruction |= 0x80;
10907 neon_two_same (0, 1, et.size / 2);
10908 }
10909
10910 static void
10911 do_neon_qmovun (void)
10912 {
10913 struct neon_type_el et = neon_check_type (2, NS_DQ,
10914 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10915 /* Saturating move with unsigned results. Operands must be signed. */
10916 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10917 neon_two_same (0, 1, et.size / 2);
10918 }
10919
10920 static void
10921 do_neon_rshift_sat_narrow (void)
10922 {
10923 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10924 or unsigned. If operands are unsigned, results must also be unsigned. */
10925 struct neon_type_el et = neon_check_type (2, NS_DQI,
10926 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10927 int imm = inst.operands[2].imm;
10928 /* This gets the bounds check, size encoding and immediate bits calculation
10929 right. */
10930 et.size /= 2;
10931
10932 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10933 VQMOVN.I<size> <Dd>, <Qm>. */
10934 if (imm == 0)
10935 {
10936 inst.operands[2].present = 0;
10937 inst.instruction = N_MNEM_vqmovn;
10938 do_neon_qmovn ();
10939 return;
10940 }
10941
10942 constraint (imm < 1 || (unsigned)imm > et.size,
10943 _("immediate out of range"));
10944 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10945 }
10946
10947 static void
10948 do_neon_rshift_sat_narrow_u (void)
10949 {
10950 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10951 or unsigned. If operands are unsigned, results must also be unsigned. */
10952 struct neon_type_el et = neon_check_type (2, NS_DQI,
10953 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10954 int imm = inst.operands[2].imm;
10955 /* This gets the bounds check, size encoding and immediate bits calculation
10956 right. */
10957 et.size /= 2;
10958
10959 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10960 VQMOVUN.I<size> <Dd>, <Qm>. */
10961 if (imm == 0)
10962 {
10963 inst.operands[2].present = 0;
10964 inst.instruction = N_MNEM_vqmovun;
10965 do_neon_qmovun ();
10966 return;
10967 }
10968
10969 constraint (imm < 1 || (unsigned)imm > et.size,
10970 _("immediate out of range"));
10971 /* FIXME: The manual is kind of unclear about what value U should have in
10972 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10973 must be 1. */
10974 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10975 }
10976
10977 static void
10978 do_neon_movn (void)
10979 {
10980 struct neon_type_el et = neon_check_type (2, NS_DQ,
10981 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10982 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10983 neon_two_same (0, 1, et.size / 2);
10984 }
10985
10986 static void
10987 do_neon_rshift_narrow (void)
10988 {
10989 struct neon_type_el et = neon_check_type (2, NS_DQI,
10990 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10991 int imm = inst.operands[2].imm;
10992 /* This gets the bounds check, size encoding and immediate bits calculation
10993 right. */
10994 et.size /= 2;
10995
10996 /* If immediate is zero then we are a pseudo-instruction for
10997 VMOVN.I<size> <Dd>, <Qm> */
10998 if (imm == 0)
10999 {
11000 inst.operands[2].present = 0;
11001 inst.instruction = N_MNEM_vmovn;
11002 do_neon_movn ();
11003 return;
11004 }
11005
11006 constraint (imm < 1 || (unsigned)imm > et.size,
11007 _("immediate out of range for narrowing operation"));
11008 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
11009 }
11010
11011 static void
11012 do_neon_shll (void)
11013 {
11014 /* FIXME: Type checking when lengthening. */
11015 struct neon_type_el et = neon_check_type (2, NS_QDI,
11016 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
11017 unsigned imm = inst.operands[2].imm;
11018
11019 if (imm == et.size)
11020 {
11021 /* Maximum shift variant. */
11022 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11023 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11024 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11025 inst.instruction |= LOW4 (inst.operands[1].reg);
11026 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11027 inst.instruction |= neon_logbits (et.size) << 18;
11028
11029 inst.instruction = neon_dp_fixup (inst.instruction);
11030 }
11031 else
11032 {
11033 /* A more-specific type check for non-max versions. */
11034 et = neon_check_type (2, NS_QDI,
11035 N_EQK | N_DBL, N_SU_32 | N_KEY);
11036 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11037 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
11038 }
11039 }
11040
11041 /* Check the various types for the VCVT instruction, and return the one that
11042 the current instruction is. */
11043
11044 static int
11045 neon_cvt_flavour (enum neon_shape rs)
11046 {
11047 #define CVT_VAR(C,X,Y) \
11048 et = neon_check_type (2, rs, (X), (Y)); \
11049 if (et.type != NT_invtype) \
11050 { \
11051 inst.error = NULL; \
11052 return (C); \
11053 }
11054 struct neon_type_el et;
11055
11056 CVT_VAR (0, N_S32, N_F32);
11057 CVT_VAR (1, N_U32, N_F32);
11058 CVT_VAR (2, N_F32, N_S32);
11059 CVT_VAR (3, N_F32, N_U32);
11060
11061 return -1;
11062 #undef CVT_VAR
11063 }
11064
11065 static void
11066 do_neon_cvt (void)
11067 {
11068 /* Fixed-point conversion with #0 immediate is encoded as an integer
11069 conversion. */
11070 if (inst.operands[2].present && inst.operands[2].imm != 0)
11071 {
11072 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11073 int flavour = neon_cvt_flavour (rs);
11074 unsigned immbits = 32 - inst.operands[2].imm;
11075 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11076 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11077 if (flavour != -1)
11078 inst.instruction |= enctab[flavour];
11079 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11080 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11081 inst.instruction |= LOW4 (inst.operands[1].reg);
11082 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11083 inst.instruction |= (rs == NS_QQI) << 6;
11084 inst.instruction |= 1 << 21;
11085 inst.instruction |= immbits << 16;
11086 }
11087 else
11088 {
11089 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11090 int flavour = neon_cvt_flavour (rs);
11091 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11092 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11093 if (flavour != -1)
11094 inst.instruction |= enctab[flavour];
11095 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11096 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11097 inst.instruction |= LOW4 (inst.operands[1].reg);
11098 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11099 inst.instruction |= (rs == NS_QQ) << 6;
11100 inst.instruction |= 2 << 18;
11101 }
11102 inst.instruction = neon_dp_fixup (inst.instruction);
11103 }
11104
11105 static void
11106 neon_move_immediate (void)
11107 {
11108 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11109 struct neon_type_el et = neon_check_type (1, rs,
11110 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11111 unsigned immlo, immhi = 0, immbits;
11112 int op, cmode;
11113
11114 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11115 op = (inst.instruction & (1 << 5)) != 0;
11116
11117 immlo = inst.operands[1].imm;
11118 if (inst.operands[1].regisimm)
11119 immhi = inst.operands[1].reg;
11120
11121 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11122 _("immediate has bits set outside the operand size"));
11123
11124 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11125 et.size, et.type)) == FAIL)
11126 {
11127 /* Invert relevant bits only. */
11128 neon_invert_size (&immlo, &immhi, et.size);
11129 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11130 with one or the other; those cases are caught by
11131 neon_cmode_for_move_imm. */
11132 op = !op;
11133 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11134 et.size, et.type)) == FAIL)
11135 {
11136 first_error (_("immediate out of range"));
11137 return;
11138 }
11139 }
11140
11141 inst.instruction &= ~(1 << 5);
11142 inst.instruction |= op << 5;
11143
11144 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11145 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11146 inst.instruction |= (rs == NS_QI) << 6;
11147 inst.instruction |= cmode << 8;
11148
11149 neon_write_immbits (immbits);
11150 }
11151
11152 static void
11153 do_neon_mvn (void)
11154 {
11155 if (inst.operands[1].isreg)
11156 {
11157 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11158
11159 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11160 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11161 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11162 inst.instruction |= LOW4 (inst.operands[1].reg);
11163 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11164 inst.instruction |= (rs == NS_QQ) << 6;
11165 }
11166 else
11167 {
11168 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11169 neon_move_immediate ();
11170 }
11171
11172 inst.instruction = neon_dp_fixup (inst.instruction);
11173 }
11174
11175 /* Encode instructions of form:
11176
11177 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11178 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11179
11180 */
11181
11182 static void
11183 neon_mixed_length (struct neon_type_el et, unsigned size)
11184 {
11185 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11186 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11187 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11188 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11189 inst.instruction |= LOW4 (inst.operands[2].reg);
11190 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11191 inst.instruction |= (et.type == NT_unsigned) << 24;
11192 inst.instruction |= neon_logbits (size) << 20;
11193
11194 inst.instruction = neon_dp_fixup (inst.instruction);
11195 }
11196
11197 static void
11198 do_neon_dyadic_long (void)
11199 {
11200 /* FIXME: Type checking for lengthening op. */
11201 struct neon_type_el et = neon_check_type (3, NS_QDD,
11202 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11203 neon_mixed_length (et, et.size);
11204 }
11205
11206 static void
11207 do_neon_abal (void)
11208 {
11209 struct neon_type_el et = neon_check_type (3, NS_QDD,
11210 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11211 neon_mixed_length (et, et.size);
11212 }
11213
11214 static void
11215 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11216 {
11217 if (inst.operands[2].isscalar)
11218 {
11219 struct neon_type_el et = neon_check_type (3, NS_QDS,
11220 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11221 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11222 neon_mul_mac (et, et.type == NT_unsigned);
11223 }
11224 else
11225 {
11226 struct neon_type_el et = neon_check_type (3, NS_QDD,
11227 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11228 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11229 neon_mixed_length (et, et.size);
11230 }
11231 }
11232
11233 static void
11234 do_neon_mac_maybe_scalar_long (void)
11235 {
11236 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11237 }
11238
11239 static void
11240 do_neon_dyadic_wide (void)
11241 {
11242 struct neon_type_el et = neon_check_type (3, NS_QQD,
11243 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11244 neon_mixed_length (et, et.size);
11245 }
11246
11247 static void
11248 do_neon_dyadic_narrow (void)
11249 {
11250 struct neon_type_el et = neon_check_type (3, NS_QDD,
11251 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11252 neon_mixed_length (et, et.size / 2);
11253 }
11254
11255 static void
11256 do_neon_mul_sat_scalar_long (void)
11257 {
11258 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11259 }
11260
11261 static void
11262 do_neon_vmull (void)
11263 {
11264 if (inst.operands[2].isscalar)
11265 do_neon_mac_maybe_scalar_long ();
11266 else
11267 {
11268 struct neon_type_el et = neon_check_type (3, NS_QDD,
11269 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11270 if (et.type == NT_poly)
11271 inst.instruction = NEON_ENC_POLY (inst.instruction);
11272 else
11273 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11274 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11275 zero. Should be OK as-is. */
11276 neon_mixed_length (et, et.size);
11277 }
11278 }
11279
11280 static void
11281 do_neon_ext (void)
11282 {
11283 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11284 struct neon_type_el et = neon_check_type (3, rs,
11285 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11286 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11287 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11288 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11289 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11290 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11291 inst.instruction |= LOW4 (inst.operands[2].reg);
11292 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11293 inst.instruction |= (rs == NS_QQQI) << 6;
11294 inst.instruction |= imm << 8;
11295
11296 inst.instruction = neon_dp_fixup (inst.instruction);
11297 }
11298
11299 static void
11300 do_neon_rev (void)
11301 {
11302 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11303 struct neon_type_el et = neon_check_type (2, rs,
11304 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11305 unsigned op = (inst.instruction >> 7) & 3;
11306 /* N (width of reversed regions) is encoded as part of the bitmask. We
11307 extract it here to check the elements to be reversed are smaller.
11308 Otherwise we'd get a reserved instruction. */
11309 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11310 assert (elsize != 0);
11311 constraint (et.size >= elsize,
11312 _("elements must be smaller than reversal region"));
11313 neon_two_same (rs == NS_QQ, 1, et.size);
11314 }
11315
11316 static void
11317 do_neon_dup (void)
11318 {
11319 if (inst.operands[1].isscalar)
11320 {
11321 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11322 struct neon_type_el et = neon_check_type (2, rs,
11323 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11324 unsigned sizebits = et.size >> 3;
11325 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11326 int logsize = neon_logbits (et.size);
11327 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11328 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11329 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11330 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11331 inst.instruction |= LOW4 (dm);
11332 inst.instruction |= HI1 (dm) << 5;
11333 inst.instruction |= (rs == NS_QS) << 6;
11334 inst.instruction |= x << 17;
11335 inst.instruction |= sizebits << 16;
11336
11337 inst.instruction = neon_dp_fixup (inst.instruction);
11338 }
11339 else
11340 {
11341 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11342 struct neon_type_el et = neon_check_type (1, rs,
11343 N_8 | N_16 | N_32 | N_KEY);
11344 unsigned save_cond = inst.instruction & 0xf0000000;
11345 /* Duplicate ARM register to lanes of vector. */
11346 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11347 switch (et.size)
11348 {
11349 case 8: inst.instruction |= 0x400000; break;
11350 case 16: inst.instruction |= 0x000020; break;
11351 case 32: inst.instruction |= 0x000000; break;
11352 default: break;
11353 }
11354 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11355 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11356 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11357 inst.instruction |= (rs == NS_QR) << 21;
11358 /* The encoding for this instruction is identical for the ARM and Thumb
11359 variants, except for the condition field. */
11360 if (thumb_mode)
11361 inst.instruction |= 0xe0000000;
11362 else
11363 inst.instruction |= save_cond;
11364 }
11365 }
11366
11367 /* VMOV has particularly many variations. It can be one of:
11368 0. VMOV<c><q> <Qd>, <Qm>
11369 1. VMOV<c><q> <Dd>, <Dm>
11370 (Register operations, which are VORR with Rm = Rn.)
11371 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11372 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11373 (Immediate loads.)
11374 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11375 (ARM register to scalar.)
11376 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11377 (Two ARM registers to vector.)
11378 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11379 (Scalar to ARM register.)
11380 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11381 (Vector to two ARM registers.)
11382
11383 We should have just enough information to be able to disambiguate most of
11384 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11385 registers" cases. For these, abuse the .regisimm operand field to signify a
11386 Neon register.
11387
11388 All the encoded bits are hardcoded by this function.
11389
11390 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
11391 Cases 5, 7 may be used with VFPv2 and above.
11392
11393 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11394 can specify a type where it doesn't make sense to, and is ignored).
11395 */
11396
11397 static void
11398 do_neon_mov (void)
11399 {
11400 int nargs = inst.operands[0].present + inst.operands[1].present
11401 + inst.operands[2].present;
11402 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11403 const char *vfp_vers = "selected FPU does not support instruction";
11404
11405 switch (nargs)
11406 {
11407 case 2:
11408 /* Cases 0, 1, 2, 3, 4, 6. */
11409 if (inst.operands[1].isscalar)
11410 {
11411 /* Case 6. */
11412 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11413 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11414 unsigned logsize = neon_logbits (et.size);
11415 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11416 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11417 unsigned abcdebits = 0;
11418
11419 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11420 _(vfp_vers));
11421 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11422 && et.size != 32, _(vfp_vers));
11423 constraint (et.type == NT_invtype, _("bad type for scalar"));
11424 constraint (x >= 64 / et.size, _("scalar index out of range"));
11425
11426 switch (et.size)
11427 {
11428 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11429 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11430 case 32: abcdebits = 0x00; break;
11431 default: ;
11432 }
11433
11434 abcdebits |= x << logsize;
11435 inst.instruction = save_cond;
11436 inst.instruction |= 0xe100b10;
11437 inst.instruction |= LOW4 (dn) << 16;
11438 inst.instruction |= HI1 (dn) << 7;
11439 inst.instruction |= inst.operands[0].reg << 12;
11440 inst.instruction |= (abcdebits & 3) << 5;
11441 inst.instruction |= (abcdebits >> 2) << 21;
11442 }
11443 else if (inst.operands[1].isreg)
11444 {
11445 /* Cases 0, 1, 4. */
11446 if (inst.operands[0].isscalar)
11447 {
11448 /* Case 4. */
11449 unsigned bcdebits = 0;
11450 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11451 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11452 int logsize = neon_logbits (et.size);
11453 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11454 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11455
11456 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11457 _(vfp_vers));
11458 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11459 && et.size != 32, _(vfp_vers));
11460 constraint (et.type == NT_invtype, _("bad type for scalar"));
11461 constraint (x >= 64 / et.size, _("scalar index out of range"));
11462
11463 switch (et.size)
11464 {
11465 case 8: bcdebits = 0x8; break;
11466 case 16: bcdebits = 0x1; break;
11467 case 32: bcdebits = 0x0; break;
11468 default: ;
11469 }
11470
11471 bcdebits |= x << logsize;
11472 inst.instruction = save_cond;
11473 inst.instruction |= 0xe000b10;
11474 inst.instruction |= LOW4 (dn) << 16;
11475 inst.instruction |= HI1 (dn) << 7;
11476 inst.instruction |= inst.operands[1].reg << 12;
11477 inst.instruction |= (bcdebits & 3) << 5;
11478 inst.instruction |= (bcdebits >> 2) << 21;
11479 }
11480 else
11481 {
11482 /* Cases 0, 1. */
11483 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11484 /* The architecture manual I have doesn't explicitly state which
11485 value the U bit should have for register->register moves, but
11486 the equivalent VORR instruction has U = 0, so do that. */
11487 inst.instruction = 0x0200110;
11488 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11489 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11490 inst.instruction |= LOW4 (inst.operands[1].reg);
11491 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11492 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11493 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11494 inst.instruction |= (rs == NS_QQ) << 6;
11495
11496 inst.instruction = neon_dp_fixup (inst.instruction);
11497 }
11498 }
11499 else
11500 {
11501 /* Cases 2, 3. */
11502 inst.instruction = 0x0800010;
11503 neon_move_immediate ();
11504 inst.instruction = neon_dp_fixup (inst.instruction);
11505 }
11506 break;
11507
11508 case 3:
11509 /* Cases 5, 7. */
11510 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
11511 _(vfp_vers));
11512
11513 if (inst.operands[0].regisimm)
11514 {
11515 /* Case 5. */
11516 inst.instruction = save_cond;
11517 inst.instruction |= 0xc400b10;
11518 inst.instruction |= LOW4 (inst.operands[0].reg);
11519 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11520 inst.instruction |= inst.operands[1].reg << 12;
11521 inst.instruction |= inst.operands[2].reg << 16;
11522 }
11523 else
11524 {
11525 /* Case 7. */
11526 inst.instruction = save_cond;
11527 inst.instruction |= 0xc500b10;
11528 inst.instruction |= inst.operands[0].reg << 12;
11529 inst.instruction |= inst.operands[1].reg << 16;
11530 inst.instruction |= LOW4 (inst.operands[2].reg);
11531 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11532 }
11533 break;
11534
11535 default:
11536 abort ();
11537 }
11538 }
11539
11540 static void
11541 do_neon_rshift_round_imm (void)
11542 {
11543 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11544 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11545 int imm = inst.operands[2].imm;
11546
11547 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11548 if (imm == 0)
11549 {
11550 inst.operands[2].present = 0;
11551 do_neon_mov ();
11552 return;
11553 }
11554
11555 constraint (imm < 1 || (unsigned)imm > et.size,
11556 _("immediate out of range for shift"));
11557 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11558 et.size - imm);
11559 }
11560
11561 static void
11562 do_neon_movl (void)
11563 {
11564 struct neon_type_el et = neon_check_type (2, NS_QD,
11565 N_EQK | N_DBL, N_SU_32 | N_KEY);
11566 unsigned sizebits = et.size >> 3;
11567 inst.instruction |= sizebits << 19;
11568 neon_two_same (0, et.type == NT_unsigned, -1);
11569 }
11570
11571 static void
11572 do_neon_trn (void)
11573 {
11574 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11575 struct neon_type_el et = neon_check_type (2, rs,
11576 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11577 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11578 neon_two_same (rs == NS_QQ, 1, et.size);
11579 }
11580
11581 static void
11582 do_neon_zip_uzp (void)
11583 {
11584 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11585 struct neon_type_el et = neon_check_type (2, rs,
11586 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11587 if (rs == NS_DD && et.size == 32)
11588 {
11589 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11590 inst.instruction = N_MNEM_vtrn;
11591 do_neon_trn ();
11592 return;
11593 }
11594 neon_two_same (rs == NS_QQ, 1, et.size);
11595 }
11596
11597 static void
11598 do_neon_sat_abs_neg (void)
11599 {
11600 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11601 struct neon_type_el et = neon_check_type (2, rs,
11602 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11603 neon_two_same (rs == NS_QQ, 1, et.size);
11604 }
11605
11606 static void
11607 do_neon_pair_long (void)
11608 {
11609 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11610 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11611 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11612 inst.instruction |= (et.type == NT_unsigned) << 7;
11613 neon_two_same (rs == NS_QQ, 1, et.size);
11614 }
11615
11616 static void
11617 do_neon_recip_est (void)
11618 {
11619 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11620 struct neon_type_el et = neon_check_type (2, rs,
11621 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11622 inst.instruction |= (et.type == NT_float) << 8;
11623 neon_two_same (rs == NS_QQ, 1, et.size);
11624 }
11625
11626 static void
11627 do_neon_cls (void)
11628 {
11629 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11630 struct neon_type_el et = neon_check_type (2, rs,
11631 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11632 neon_two_same (rs == NS_QQ, 1, et.size);
11633 }
11634
11635 static void
11636 do_neon_clz (void)
11637 {
11638 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11639 struct neon_type_el et = neon_check_type (2, rs,
11640 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11641 neon_two_same (rs == NS_QQ, 1, et.size);
11642 }
11643
11644 static void
11645 do_neon_cnt (void)
11646 {
11647 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11648 struct neon_type_el et = neon_check_type (2, rs,
11649 N_EQK | N_INT, N_8 | N_KEY);
11650 neon_two_same (rs == NS_QQ, 1, et.size);
11651 }
11652
11653 static void
11654 do_neon_swp (void)
11655 {
11656 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11657 neon_two_same (rs == NS_QQ, 1, -1);
11658 }
11659
11660 static void
11661 do_neon_tbl_tbx (void)
11662 {
11663 unsigned listlenbits;
11664 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11665
11666 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11667 {
11668 first_error (_("bad list length for table lookup"));
11669 return;
11670 }
11671
11672 listlenbits = inst.operands[1].imm - 1;
11673 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11674 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11675 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11676 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11677 inst.instruction |= LOW4 (inst.operands[2].reg);
11678 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11679 inst.instruction |= listlenbits << 8;
11680
11681 inst.instruction = neon_dp_fixup (inst.instruction);
11682 }
11683
11684 static void
11685 do_neon_ldm_stm (void)
11686 {
11687 /* P, U and L bits are part of bitmask. */
11688 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11689 unsigned offsetbits = inst.operands[1].imm * 2;
11690
11691 constraint (is_dbmode && !inst.operands[0].writeback,
11692 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11693
11694 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11695 _("register list must contain at least 1 and at most 16 "
11696 "registers"));
11697
11698 inst.instruction |= inst.operands[0].reg << 16;
11699 inst.instruction |= inst.operands[0].writeback << 21;
11700 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11701 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11702
11703 inst.instruction |= offsetbits;
11704
11705 if (thumb_mode)
11706 inst.instruction |= 0xe0000000;
11707 }
11708
11709 static void
11710 do_neon_ldr_str (void)
11711 {
11712 unsigned offsetbits;
11713 int offset_up = 1;
11714 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11715
11716 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11717 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11718
11719 constraint (inst.reloc.pc_rel && !is_ldr,
11720 _("PC-relative addressing unavailable with VSTR"));
11721
11722 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11723 _("Immediate value must be a constant"));
11724
11725 if (inst.reloc.exp.X_add_number < 0)
11726 {
11727 offset_up = 0;
11728 offsetbits = -inst.reloc.exp.X_add_number / 4;
11729 }
11730 else
11731 offsetbits = inst.reloc.exp.X_add_number / 4;
11732
11733 /* FIXME: Does this catch everything? */
11734 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11735 || inst.operands[1].postind || inst.operands[1].writeback
11736 || inst.operands[1].immisreg || inst.operands[1].shifted,
11737 BAD_ADDR_MODE);
11738 constraint ((inst.operands[1].imm & 3) != 0,
11739 _("Offset must be a multiple of 4"));
11740 constraint (offsetbits != (offsetbits & 0xff),
11741 _("Immediate offset out of range"));
11742
11743 inst.instruction |= inst.operands[1].reg << 16;
11744 inst.instruction |= offsetbits & 0xff;
11745 inst.instruction |= offset_up << 23;
11746
11747 if (thumb_mode)
11748 inst.instruction |= 0xe0000000;
11749
11750 if (inst.reloc.pc_rel)
11751 {
11752 if (thumb_mode)
11753 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11754 else
11755 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11756 }
11757 else
11758 inst.reloc.type = BFD_RELOC_UNUSED;
11759 }
11760
11761 /* "interleave" version also handles non-interleaving register VLD1/VST1
11762 instructions. */
11763
11764 static void
11765 do_neon_ld_st_interleave (void)
11766 {
11767 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11768 N_8 | N_16 | N_32 | N_64);
11769 unsigned alignbits = 0;
11770 unsigned idx;
11771 /* The bits in this table go:
11772 0: register stride of one (0) or two (1)
11773 1,2: register list length, minus one (1, 2, 3, 4).
11774 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11775 We use -1 for invalid entries. */
11776 const int typetable[] =
11777 {
11778 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11779 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11780 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11781 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11782 };
11783 int typebits;
11784
11785 if (et.type == NT_invtype)
11786 return;
11787
11788 if (inst.operands[1].immisalign)
11789 switch (inst.operands[1].imm >> 8)
11790 {
11791 case 64: alignbits = 1; break;
11792 case 128:
11793 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11794 goto bad_alignment;
11795 alignbits = 2;
11796 break;
11797 case 256:
11798 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11799 goto bad_alignment;
11800 alignbits = 3;
11801 break;
11802 default:
11803 bad_alignment:
11804 first_error (_("bad alignment"));
11805 return;
11806 }
11807
11808 inst.instruction |= alignbits << 4;
11809 inst.instruction |= neon_logbits (et.size) << 6;
11810
11811 /* Bits [4:6] of the immediate in a list specifier encode register stride
11812 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11813 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11814 up the right value for "type" in a table based on this value and the given
11815 list style, then stick it back. */
11816 idx = ((inst.operands[0].imm >> 4) & 7)
11817 | (((inst.instruction >> 8) & 3) << 3);
11818
11819 typebits = typetable[idx];
11820
11821 constraint (typebits == -1, _("bad list type for instruction"));
11822
11823 inst.instruction &= ~0xf00;
11824 inst.instruction |= typebits << 8;
11825 }
11826
11827 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11828 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11829 otherwise. The variable arguments are a list of pairs of legal (size, align)
11830 values, terminated with -1. */
11831
11832 static int
11833 neon_alignment_bit (int size, int align, int *do_align, ...)
11834 {
11835 va_list ap;
11836 int result = FAIL, thissize, thisalign;
11837
11838 if (!inst.operands[1].immisalign)
11839 {
11840 *do_align = 0;
11841 return SUCCESS;
11842 }
11843
11844 va_start (ap, do_align);
11845
11846 do
11847 {
11848 thissize = va_arg (ap, int);
11849 if (thissize == -1)
11850 break;
11851 thisalign = va_arg (ap, int);
11852
11853 if (size == thissize && align == thisalign)
11854 result = SUCCESS;
11855 }
11856 while (result != SUCCESS);
11857
11858 va_end (ap);
11859
11860 if (result == SUCCESS)
11861 *do_align = 1;
11862 else
11863 first_error (_("unsupported alignment for instruction"));
11864
11865 return result;
11866 }
11867
11868 static void
11869 do_neon_ld_st_lane (void)
11870 {
11871 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11872 int align_good, do_align = 0;
11873 int logsize = neon_logbits (et.size);
11874 int align = inst.operands[1].imm >> 8;
11875 int n = (inst.instruction >> 8) & 3;
11876 int max_el = 64 / et.size;
11877
11878 if (et.type == NT_invtype)
11879 return;
11880
11881 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11882 _("bad list length"));
11883 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11884 _("scalar index out of range"));
11885 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11886 && et.size == 8,
11887 _("stride of 2 unavailable when element size is 8"));
11888
11889 switch (n)
11890 {
11891 case 0: /* VLD1 / VST1. */
11892 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11893 32, 32, -1);
11894 if (align_good == FAIL)
11895 return;
11896 if (do_align)
11897 {
11898 unsigned alignbits = 0;
11899 switch (et.size)
11900 {
11901 case 16: alignbits = 0x1; break;
11902 case 32: alignbits = 0x3; break;
11903 default: ;
11904 }
11905 inst.instruction |= alignbits << 4;
11906 }
11907 break;
11908
11909 case 1: /* VLD2 / VST2. */
11910 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11911 32, 64, -1);
11912 if (align_good == FAIL)
11913 return;
11914 if (do_align)
11915 inst.instruction |= 1 << 4;
11916 break;
11917
11918 case 2: /* VLD3 / VST3. */
11919 constraint (inst.operands[1].immisalign,
11920 _("can't use alignment with this instruction"));
11921 break;
11922
11923 case 3: /* VLD4 / VST4. */
11924 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11925 16, 64, 32, 64, 32, 128, -1);
11926 if (align_good == FAIL)
11927 return;
11928 if (do_align)
11929 {
11930 unsigned alignbits = 0;
11931 switch (et.size)
11932 {
11933 case 8: alignbits = 0x1; break;
11934 case 16: alignbits = 0x1; break;
11935 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11936 default: ;
11937 }
11938 inst.instruction |= alignbits << 4;
11939 }
11940 break;
11941
11942 default: ;
11943 }
11944
11945 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11946 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11947 inst.instruction |= 1 << (4 + logsize);
11948
11949 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11950 inst.instruction |= logsize << 10;
11951 }
11952
11953 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11954
11955 static void
11956 do_neon_ld_dup (void)
11957 {
11958 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11959 int align_good, do_align = 0;
11960
11961 if (et.type == NT_invtype)
11962 return;
11963
11964 switch ((inst.instruction >> 8) & 3)
11965 {
11966 case 0: /* VLD1. */
11967 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11968 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11969 &do_align, 16, 16, 32, 32, -1);
11970 if (align_good == FAIL)
11971 return;
11972 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11973 {
11974 case 1: break;
11975 case 2: inst.instruction |= 1 << 5; break;
11976 default: first_error (_("bad list length")); return;
11977 }
11978 inst.instruction |= neon_logbits (et.size) << 6;
11979 break;
11980
11981 case 1: /* VLD2. */
11982 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11983 &do_align, 8, 16, 16, 32, 32, 64, -1);
11984 if (align_good == FAIL)
11985 return;
11986 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11987 _("bad list length"));
11988 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11989 inst.instruction |= 1 << 5;
11990 inst.instruction |= neon_logbits (et.size) << 6;
11991 break;
11992
11993 case 2: /* VLD3. */
11994 constraint (inst.operands[1].immisalign,
11995 _("can't use alignment with this instruction"));
11996 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11997 _("bad list length"));
11998 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11999 inst.instruction |= 1 << 5;
12000 inst.instruction |= neon_logbits (et.size) << 6;
12001 break;
12002
12003 case 3: /* VLD4. */
12004 {
12005 int align = inst.operands[1].imm >> 8;
12006 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
12007 16, 64, 32, 64, 32, 128, -1);
12008 if (align_good == FAIL)
12009 return;
12010 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
12011 _("bad list length"));
12012 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12013 inst.instruction |= 1 << 5;
12014 if (et.size == 32 && align == 128)
12015 inst.instruction |= 0x3 << 6;
12016 else
12017 inst.instruction |= neon_logbits (et.size) << 6;
12018 }
12019 break;
12020
12021 default: ;
12022 }
12023
12024 inst.instruction |= do_align << 4;
12025 }
12026
12027 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12028 apart from bits [11:4]. */
12029
12030 static void
12031 do_neon_ldx_stx (void)
12032 {
12033 switch (NEON_LANE (inst.operands[0].imm))
12034 {
12035 case NEON_INTERLEAVE_LANES:
12036 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
12037 do_neon_ld_st_interleave ();
12038 break;
12039
12040 case NEON_ALL_LANES:
12041 inst.instruction = NEON_ENC_DUP (inst.instruction);
12042 do_neon_ld_dup ();
12043 break;
12044
12045 default:
12046 inst.instruction = NEON_ENC_LANE (inst.instruction);
12047 do_neon_ld_st_lane ();
12048 }
12049
12050 /* L bit comes from bit mask. */
12051 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12052 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12053 inst.instruction |= inst.operands[1].reg << 16;
12054
12055 if (inst.operands[1].postind)
12056 {
12057 int postreg = inst.operands[1].imm & 0xf;
12058 constraint (!inst.operands[1].immisreg,
12059 _("post-index must be a register"));
12060 constraint (postreg == 0xd || postreg == 0xf,
12061 _("bad register for post-index"));
12062 inst.instruction |= postreg;
12063 }
12064 else if (inst.operands[1].writeback)
12065 {
12066 inst.instruction |= 0xd;
12067 }
12068 else
12069 inst.instruction |= 0xf;
12070
12071 if (thumb_mode)
12072 inst.instruction |= 0xf9000000;
12073 else
12074 inst.instruction |= 0xf4000000;
12075 }
12076
12077 \f
12078 /* Overall per-instruction processing. */
12079
12080 /* We need to be able to fix up arbitrary expressions in some statements.
12081 This is so that we can handle symbols that are an arbitrary distance from
12082 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12083 which returns part of an address in a form which will be valid for
12084 a data instruction. We do this by pushing the expression into a symbol
12085 in the expr_section, and creating a fix for that. */
12086
12087 static void
12088 fix_new_arm (fragS * frag,
12089 int where,
12090 short int size,
12091 expressionS * exp,
12092 int pc_rel,
12093 int reloc)
12094 {
12095 fixS * new_fix;
12096
12097 switch (exp->X_op)
12098 {
12099 case O_constant:
12100 case O_symbol:
12101 case O_add:
12102 case O_subtract:
12103 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12104 break;
12105
12106 default:
12107 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12108 pc_rel, reloc);
12109 break;
12110 }
12111
12112 /* Mark whether the fix is to a THUMB instruction, or an ARM
12113 instruction. */
12114 new_fix->tc_fix_data = thumb_mode;
12115 }
12116
12117 /* Create a frg for an instruction requiring relaxation. */
12118 static void
12119 output_relax_insn (void)
12120 {
12121 char * to;
12122 symbolS *sym;
12123 int offset;
12124
12125 #ifdef OBJ_ELF
12126 /* The size of the instruction is unknown, so tie the debug info to the
12127 start of the instruction. */
12128 dwarf2_emit_insn (0);
12129 #endif
12130
12131 switch (inst.reloc.exp.X_op)
12132 {
12133 case O_symbol:
12134 sym = inst.reloc.exp.X_add_symbol;
12135 offset = inst.reloc.exp.X_add_number;
12136 break;
12137 case O_constant:
12138 sym = NULL;
12139 offset = inst.reloc.exp.X_add_number;
12140 break;
12141 default:
12142 sym = make_expr_symbol (&inst.reloc.exp);
12143 offset = 0;
12144 break;
12145 }
12146 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12147 inst.relax, sym, offset, NULL/*offset, opcode*/);
12148 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12149 }
12150
12151 /* Write a 32-bit thumb instruction to buf. */
12152 static void
12153 put_thumb32_insn (char * buf, unsigned long insn)
12154 {
12155 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12156 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12157 }
12158
12159 static void
12160 output_inst (const char * str)
12161 {
12162 char * to = NULL;
12163
12164 if (inst.error)
12165 {
12166 as_bad ("%s -- `%s'", inst.error, str);
12167 return;
12168 }
12169 if (inst.relax) {
12170 output_relax_insn();
12171 return;
12172 }
12173 if (inst.size == 0)
12174 return;
12175
12176 to = frag_more (inst.size);
12177
12178 if (thumb_mode && (inst.size > THUMB_SIZE))
12179 {
12180 assert (inst.size == (2 * THUMB_SIZE));
12181 put_thumb32_insn (to, inst.instruction);
12182 }
12183 else if (inst.size > INSN_SIZE)
12184 {
12185 assert (inst.size == (2 * INSN_SIZE));
12186 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12187 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12188 }
12189 else
12190 md_number_to_chars (to, inst.instruction, inst.size);
12191
12192 if (inst.reloc.type != BFD_RELOC_UNUSED)
12193 fix_new_arm (frag_now, to - frag_now->fr_literal,
12194 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12195 inst.reloc.type);
12196
12197 #ifdef OBJ_ELF
12198 dwarf2_emit_insn (inst.size);
12199 #endif
12200 }
12201
12202 /* Tag values used in struct asm_opcode's tag field. */
12203 enum opcode_tag
12204 {
12205 OT_unconditional, /* Instruction cannot be conditionalized.
12206 The ARM condition field is still 0xE. */
12207 OT_unconditionalF, /* Instruction cannot be conditionalized
12208 and carries 0xF in its ARM condition field. */
12209 OT_csuffix, /* Instruction takes a conditional suffix. */
12210 OT_cinfix3, /* Instruction takes a conditional infix,
12211 beginning at character index 3. (In
12212 unified mode, it becomes a suffix.) */
12213 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
12214 tsts, cmps, cmns, and teqs. */
12215 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12216 character index 3, even in unified mode. Used for
12217 legacy instructions where suffix and infix forms
12218 may be ambiguous. */
12219 OT_csuf_or_in3, /* Instruction takes either a conditional
12220 suffix or an infix at character index 3. */
12221 OT_odd_infix_unc, /* This is the unconditional variant of an
12222 instruction that takes a conditional infix
12223 at an unusual position. In unified mode,
12224 this variant will accept a suffix. */
12225 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12226 are the conditional variants of instructions that
12227 take conditional infixes in unusual positions.
12228 The infix appears at character index
12229 (tag - OT_odd_infix_0). These are not accepted
12230 in unified mode. */
12231 };
12232
12233 /* Subroutine of md_assemble, responsible for looking up the primary
12234 opcode from the mnemonic the user wrote. STR points to the
12235 beginning of the mnemonic.
12236
12237 This is not simply a hash table lookup, because of conditional
12238 variants. Most instructions have conditional variants, which are
12239 expressed with a _conditional affix_ to the mnemonic. If we were
12240 to encode each conditional variant as a literal string in the opcode
12241 table, it would have approximately 20,000 entries.
12242
12243 Most mnemonics take this affix as a suffix, and in unified syntax,
12244 'most' is upgraded to 'all'. However, in the divided syntax, some
12245 instructions take the affix as an infix, notably the s-variants of
12246 the arithmetic instructions. Of those instructions, all but six
12247 have the infix appear after the third character of the mnemonic.
12248
12249 Accordingly, the algorithm for looking up primary opcodes given
12250 an identifier is:
12251
12252 1. Look up the identifier in the opcode table.
12253 If we find a match, go to step U.
12254
12255 2. Look up the last two characters of the identifier in the
12256 conditions table. If we find a match, look up the first N-2
12257 characters of the identifier in the opcode table. If we
12258 find a match, go to step CE.
12259
12260 3. Look up the fourth and fifth characters of the identifier in
12261 the conditions table. If we find a match, extract those
12262 characters from the identifier, and look up the remaining
12263 characters in the opcode table. If we find a match, go
12264 to step CM.
12265
12266 4. Fail.
12267
12268 U. Examine the tag field of the opcode structure, in case this is
12269 one of the six instructions with its conditional infix in an
12270 unusual place. If it is, the tag tells us where to find the
12271 infix; look it up in the conditions table and set inst.cond
12272 accordingly. Otherwise, this is an unconditional instruction.
12273 Again set inst.cond accordingly. Return the opcode structure.
12274
12275 CE. Examine the tag field to make sure this is an instruction that
12276 should receive a conditional suffix. If it is not, fail.
12277 Otherwise, set inst.cond from the suffix we already looked up,
12278 and return the opcode structure.
12279
12280 CM. Examine the tag field to make sure this is an instruction that
12281 should receive a conditional infix after the third character.
12282 If it is not, fail. Otherwise, undo the edits to the current
12283 line of input and proceed as for case CE. */
12284
12285 static const struct asm_opcode *
12286 opcode_lookup (char **str)
12287 {
12288 char *end, *base;
12289 char *affix;
12290 const struct asm_opcode *opcode;
12291 const struct asm_cond *cond;
12292 char save[2];
12293
12294 /* Scan up to the end of the mnemonic, which must end in white space,
12295 '.' (in unified mode only), or end of string. */
12296 for (base = end = *str; *end != '\0'; end++)
12297 if (*end == ' ' || (unified_syntax && *end == '.'))
12298 break;
12299
12300 if (end == base)
12301 return 0;
12302
12303 /* Handle a possible width suffix and/or Neon type suffix. */
12304 if (end[0] == '.')
12305 {
12306 int offset = 2;
12307
12308 if (end[1] == 'w')
12309 inst.size_req = 4;
12310 else if (end[1] == 'n')
12311 inst.size_req = 2;
12312 else
12313 offset = 0;
12314
12315 inst.vectype.elems = 0;
12316
12317 *str = end + offset;
12318
12319 if (end[offset] == '.')
12320 {
12321 /* See if we have a Neon type suffix. */
12322 if (parse_neon_type (&inst.vectype, str) == FAIL)
12323 return 0;
12324 }
12325 else if (end[offset] != '\0' && end[offset] != ' ')
12326 return 0;
12327 }
12328 else
12329 *str = end;
12330
12331 /* Look for unaffixed or special-case affixed mnemonic. */
12332 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12333 if (opcode)
12334 {
12335 /* step U */
12336 if (opcode->tag < OT_odd_infix_0)
12337 {
12338 inst.cond = COND_ALWAYS;
12339 return opcode;
12340 }
12341
12342 if (unified_syntax)
12343 as_warn (_("conditional infixes are deprecated in unified syntax"));
12344 affix = base + (opcode->tag - OT_odd_infix_0);
12345 cond = hash_find_n (arm_cond_hsh, affix, 2);
12346 assert (cond);
12347
12348 inst.cond = cond->value;
12349 return opcode;
12350 }
12351
12352 /* Cannot have a conditional suffix on a mnemonic of less than two
12353 characters. */
12354 if (end - base < 3)
12355 return 0;
12356
12357 /* Look for suffixed mnemonic. */
12358 affix = end - 2;
12359 cond = hash_find_n (arm_cond_hsh, affix, 2);
12360 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12361 if (opcode && cond)
12362 {
12363 /* step CE */
12364 switch (opcode->tag)
12365 {
12366 case OT_cinfix3_legacy:
12367 /* Ignore conditional suffixes matched on infix only mnemonics. */
12368 break;
12369
12370 case OT_cinfix3:
12371 case OT_cinfix3_deprecated:
12372 case OT_odd_infix_unc:
12373 if (!unified_syntax)
12374 return 0;
12375 /* else fall through */
12376
12377 case OT_csuffix:
12378 case OT_csuf_or_in3:
12379 inst.cond = cond->value;
12380 return opcode;
12381
12382 case OT_unconditional:
12383 case OT_unconditionalF:
12384 if (thumb_mode)
12385 {
12386 inst.cond = cond->value;
12387 }
12388 else
12389 {
12390 /* delayed diagnostic */
12391 inst.error = BAD_COND;
12392 inst.cond = COND_ALWAYS;
12393 }
12394 return opcode;
12395
12396 default:
12397 return 0;
12398 }
12399 }
12400
12401 /* Cannot have a usual-position infix on a mnemonic of less than
12402 six characters (five would be a suffix). */
12403 if (end - base < 6)
12404 return 0;
12405
12406 /* Look for infixed mnemonic in the usual position. */
12407 affix = base + 3;
12408 cond = hash_find_n (arm_cond_hsh, affix, 2);
12409 if (!cond)
12410 return 0;
12411
12412 memcpy (save, affix, 2);
12413 memmove (affix, affix + 2, (end - affix) - 2);
12414 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12415 memmove (affix + 2, affix, (end - affix) - 2);
12416 memcpy (affix, save, 2);
12417
12418 if (opcode
12419 && (opcode->tag == OT_cinfix3
12420 || opcode->tag == OT_cinfix3_deprecated
12421 || opcode->tag == OT_csuf_or_in3
12422 || opcode->tag == OT_cinfix3_legacy))
12423 {
12424 /* step CM */
12425 if (unified_syntax
12426 && (opcode->tag == OT_cinfix3
12427 || opcode->tag == OT_cinfix3_deprecated))
12428 as_warn (_("conditional infixes are deprecated in unified syntax"));
12429
12430 inst.cond = cond->value;
12431 return opcode;
12432 }
12433
12434 return 0;
12435 }
12436
12437 void
12438 md_assemble (char *str)
12439 {
12440 char *p = str;
12441 const struct asm_opcode * opcode;
12442
12443 /* Align the previous label if needed. */
12444 if (last_label_seen != NULL)
12445 {
12446 symbol_set_frag (last_label_seen, frag_now);
12447 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12448 S_SET_SEGMENT (last_label_seen, now_seg);
12449 }
12450
12451 memset (&inst, '\0', sizeof (inst));
12452 inst.reloc.type = BFD_RELOC_UNUSED;
12453
12454 opcode = opcode_lookup (&p);
12455 if (!opcode)
12456 {
12457 /* It wasn't an instruction, but it might be a register alias of
12458 the form alias .req reg, or a Neon .dn/.qn directive. */
12459 if (!create_register_alias (str, p)
12460 && !create_neon_reg_alias (str, p))
12461 as_bad (_("bad instruction `%s'"), str);
12462
12463 return;
12464 }
12465
12466 if (opcode->tag == OT_cinfix3_deprecated)
12467 as_warn (_("s suffix on comparison instruction is deprecated"));
12468
12469 if (thumb_mode)
12470 {
12471 arm_feature_set variant;
12472
12473 variant = cpu_variant;
12474 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12475 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12476 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12477 /* Check that this instruction is supported for this CPU. */
12478 if (!opcode->tvariant
12479 || (thumb_mode == 1
12480 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12481 {
12482 as_bad (_("selected processor does not support `%s'"), str);
12483 return;
12484 }
12485 if (inst.cond != COND_ALWAYS && !unified_syntax
12486 && opcode->tencode != do_t_branch)
12487 {
12488 as_bad (_("Thumb does not support conditional execution"));
12489 return;
12490 }
12491
12492 /* Check conditional suffixes. */
12493 if (current_it_mask)
12494 {
12495 int cond;
12496 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12497 current_it_mask <<= 1;
12498 current_it_mask &= 0x1f;
12499 /* The BKPT instruction is unconditional even in an IT block. */
12500 if (!inst.error
12501 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12502 {
12503 as_bad (_("incorrect condition in IT block"));
12504 return;
12505 }
12506 }
12507 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12508 {
12509 as_bad (_("thumb conditional instrunction not in IT block"));
12510 return;
12511 }
12512
12513 mapping_state (MAP_THUMB);
12514 inst.instruction = opcode->tvalue;
12515
12516 if (!parse_operands (p, opcode->operands))
12517 opcode->tencode ();
12518
12519 /* Clear current_it_mask at the end of an IT block. */
12520 if (current_it_mask == 0x10)
12521 current_it_mask = 0;
12522
12523 if (!(inst.error || inst.relax))
12524 {
12525 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12526 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12527 if (inst.size_req && inst.size_req != inst.size)
12528 {
12529 as_bad (_("cannot honor width suffix -- `%s'"), str);
12530 return;
12531 }
12532 }
12533 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12534 *opcode->tvariant);
12535 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12536 set those bits when Thumb-2 32-bit instructions are seen. ie.
12537 anything other than bl/blx.
12538 This is overly pessimistic for relaxable instructions. */
12539 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12540 || inst.relax)
12541 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12542 arm_ext_v6t2);
12543 }
12544 else
12545 {
12546 /* Check that this instruction is supported for this CPU. */
12547 if (!opcode->avariant ||
12548 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12549 {
12550 as_bad (_("selected processor does not support `%s'"), str);
12551 return;
12552 }
12553 if (inst.size_req)
12554 {
12555 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12556 return;
12557 }
12558
12559 mapping_state (MAP_ARM);
12560 inst.instruction = opcode->avalue;
12561 if (opcode->tag == OT_unconditionalF)
12562 inst.instruction |= 0xF << 28;
12563 else
12564 inst.instruction |= inst.cond << 28;
12565 inst.size = INSN_SIZE;
12566 if (!parse_operands (p, opcode->operands))
12567 opcode->aencode ();
12568 /* Arm mode bx is marked as both v4T and v5 because it's still required
12569 on a hypothetical non-thumb v5 core. */
12570 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12571 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12572 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12573 else
12574 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12575 *opcode->avariant);
12576 }
12577 output_inst (str);
12578 }
12579
12580 /* Various frobbings of labels and their addresses. */
12581
12582 void
12583 arm_start_line_hook (void)
12584 {
12585 last_label_seen = NULL;
12586 }
12587
12588 void
12589 arm_frob_label (symbolS * sym)
12590 {
12591 last_label_seen = sym;
12592
12593 ARM_SET_THUMB (sym, thumb_mode);
12594
12595 #if defined OBJ_COFF || defined OBJ_ELF
12596 ARM_SET_INTERWORK (sym, support_interwork);
12597 #endif
12598
12599 /* Note - do not allow local symbols (.Lxxx) to be labeled
12600 as Thumb functions. This is because these labels, whilst
12601 they exist inside Thumb code, are not the entry points for
12602 possible ARM->Thumb calls. Also, these labels can be used
12603 as part of a computed goto or switch statement. eg gcc
12604 can generate code that looks like this:
12605
12606 ldr r2, [pc, .Laaa]
12607 lsl r3, r3, #2
12608 ldr r2, [r3, r2]
12609 mov pc, r2
12610
12611 .Lbbb: .word .Lxxx
12612 .Lccc: .word .Lyyy
12613 ..etc...
12614 .Laaa: .word Lbbb
12615
12616 The first instruction loads the address of the jump table.
12617 The second instruction converts a table index into a byte offset.
12618 The third instruction gets the jump address out of the table.
12619 The fourth instruction performs the jump.
12620
12621 If the address stored at .Laaa is that of a symbol which has the
12622 Thumb_Func bit set, then the linker will arrange for this address
12623 to have the bottom bit set, which in turn would mean that the
12624 address computation performed by the third instruction would end
12625 up with the bottom bit set. Since the ARM is capable of unaligned
12626 word loads, the instruction would then load the incorrect address
12627 out of the jump table, and chaos would ensue. */
12628 if (label_is_thumb_function_name
12629 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12630 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12631 {
12632 /* When the address of a Thumb function is taken the bottom
12633 bit of that address should be set. This will allow
12634 interworking between Arm and Thumb functions to work
12635 correctly. */
12636
12637 THUMB_SET_FUNC (sym, 1);
12638
12639 label_is_thumb_function_name = FALSE;
12640 }
12641
12642 #ifdef OBJ_ELF
12643 dwarf2_emit_label (sym);
12644 #endif
12645 }
12646
12647 int
12648 arm_data_in_code (void)
12649 {
12650 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12651 {
12652 *input_line_pointer = '/';
12653 input_line_pointer += 5;
12654 *input_line_pointer = 0;
12655 return 1;
12656 }
12657
12658 return 0;
12659 }
12660
12661 char *
12662 arm_canonicalize_symbol_name (char * name)
12663 {
12664 int len;
12665
12666 if (thumb_mode && (len = strlen (name)) > 5
12667 && streq (name + len - 5, "/data"))
12668 *(name + len - 5) = 0;
12669
12670 return name;
12671 }
12672 \f
12673 /* Table of all register names defined by default. The user can
12674 define additional names with .req. Note that all register names
12675 should appear in both upper and lowercase variants. Some registers
12676 also have mixed-case names. */
12677
12678 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12679 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12680 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12681 #define REGSET(p,t) \
12682 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12683 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12684 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12685 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12686 #define REGSETH(p,t) \
12687 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12688 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12689 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12690 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12691 #define REGSET2(p,t) \
12692 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12693 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12694 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12695 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12696
12697 static const struct reg_entry reg_names[] =
12698 {
12699 /* ARM integer registers. */
12700 REGSET(r, RN), REGSET(R, RN),
12701
12702 /* ATPCS synonyms. */
12703 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12704 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12705 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12706
12707 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12708 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12709 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12710
12711 /* Well-known aliases. */
12712 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12713 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12714
12715 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12716 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12717
12718 /* Coprocessor numbers. */
12719 REGSET(p, CP), REGSET(P, CP),
12720
12721 /* Coprocessor register numbers. The "cr" variants are for backward
12722 compatibility. */
12723 REGSET(c, CN), REGSET(C, CN),
12724 REGSET(cr, CN), REGSET(CR, CN),
12725
12726 /* FPA registers. */
12727 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12728 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12729
12730 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12731 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12732
12733 /* VFP SP registers. */
12734 REGSET(s,VFS), REGSET(S,VFS),
12735 REGSETH(s,VFS), REGSETH(S,VFS),
12736
12737 /* VFP DP Registers. */
12738 REGSET(d,VFD), REGSET(D,VFD),
12739 /* Extra Neon DP registers. */
12740 REGSETH(d,VFD), REGSETH(D,VFD),
12741
12742 /* Neon QP registers. */
12743 REGSET2(q,NQ), REGSET2(Q,NQ),
12744
12745 /* VFP control registers. */
12746 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12747 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12748
12749 /* Maverick DSP coprocessor registers. */
12750 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12751 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12752
12753 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12754 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12755 REGDEF(dspsc,0,DSPSC),
12756
12757 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12758 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12759 REGDEF(DSPSC,0,DSPSC),
12760
12761 /* iWMMXt data registers - p0, c0-15. */
12762 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12763
12764 /* iWMMXt control registers - p1, c0-3. */
12765 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12766 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12767 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12768 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12769
12770 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12771 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12772 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12773 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12774 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12775
12776 /* XScale accumulator registers. */
12777 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12778 };
12779 #undef REGDEF
12780 #undef REGNUM
12781 #undef REGSET
12782
12783 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12784 within psr_required_here. */
12785 static const struct asm_psr psrs[] =
12786 {
12787 /* Backward compatibility notation. Note that "all" is no longer
12788 truly all possible PSR bits. */
12789 {"all", PSR_c | PSR_f},
12790 {"flg", PSR_f},
12791 {"ctl", PSR_c},
12792
12793 /* Individual flags. */
12794 {"f", PSR_f},
12795 {"c", PSR_c},
12796 {"x", PSR_x},
12797 {"s", PSR_s},
12798 /* Combinations of flags. */
12799 {"fs", PSR_f | PSR_s},
12800 {"fx", PSR_f | PSR_x},
12801 {"fc", PSR_f | PSR_c},
12802 {"sf", PSR_s | PSR_f},
12803 {"sx", PSR_s | PSR_x},
12804 {"sc", PSR_s | PSR_c},
12805 {"xf", PSR_x | PSR_f},
12806 {"xs", PSR_x | PSR_s},
12807 {"xc", PSR_x | PSR_c},
12808 {"cf", PSR_c | PSR_f},
12809 {"cs", PSR_c | PSR_s},
12810 {"cx", PSR_c | PSR_x},
12811 {"fsx", PSR_f | PSR_s | PSR_x},
12812 {"fsc", PSR_f | PSR_s | PSR_c},
12813 {"fxs", PSR_f | PSR_x | PSR_s},
12814 {"fxc", PSR_f | PSR_x | PSR_c},
12815 {"fcs", PSR_f | PSR_c | PSR_s},
12816 {"fcx", PSR_f | PSR_c | PSR_x},
12817 {"sfx", PSR_s | PSR_f | PSR_x},
12818 {"sfc", PSR_s | PSR_f | PSR_c},
12819 {"sxf", PSR_s | PSR_x | PSR_f},
12820 {"sxc", PSR_s | PSR_x | PSR_c},
12821 {"scf", PSR_s | PSR_c | PSR_f},
12822 {"scx", PSR_s | PSR_c | PSR_x},
12823 {"xfs", PSR_x | PSR_f | PSR_s},
12824 {"xfc", PSR_x | PSR_f | PSR_c},
12825 {"xsf", PSR_x | PSR_s | PSR_f},
12826 {"xsc", PSR_x | PSR_s | PSR_c},
12827 {"xcf", PSR_x | PSR_c | PSR_f},
12828 {"xcs", PSR_x | PSR_c | PSR_s},
12829 {"cfs", PSR_c | PSR_f | PSR_s},
12830 {"cfx", PSR_c | PSR_f | PSR_x},
12831 {"csf", PSR_c | PSR_s | PSR_f},
12832 {"csx", PSR_c | PSR_s | PSR_x},
12833 {"cxf", PSR_c | PSR_x | PSR_f},
12834 {"cxs", PSR_c | PSR_x | PSR_s},
12835 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12836 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12837 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12838 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12839 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12840 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12841 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12842 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12843 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12844 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12845 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12846 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12847 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12848 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12849 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12850 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12851 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12852 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12853 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12854 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12855 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12856 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12857 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12858 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12859 };
12860
12861 /* Table of V7M psr names. */
12862 static const struct asm_psr v7m_psrs[] =
12863 {
12864 {"apsr", 0 },
12865 {"iapsr", 1 },
12866 {"eapsr", 2 },
12867 {"psr", 3 },
12868 {"ipsr", 5 },
12869 {"epsr", 6 },
12870 {"iepsr", 7 },
12871 {"msp", 8 },
12872 {"psp", 9 },
12873 {"primask", 16},
12874 {"basepri", 17},
12875 {"basepri_max", 18},
12876 {"faultmask", 19},
12877 {"control", 20}
12878 };
12879
12880 /* Table of all shift-in-operand names. */
12881 static const struct asm_shift_name shift_names [] =
12882 {
12883 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12884 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12885 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12886 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12887 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12888 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12889 };
12890
12891 /* Table of all explicit relocation names. */
12892 #ifdef OBJ_ELF
12893 static struct reloc_entry reloc_names[] =
12894 {
12895 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12896 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12897 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12898 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12899 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12900 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12901 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12902 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12903 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12904 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12905 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12906 };
12907 #endif
12908
12909 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12910 static const struct asm_cond conds[] =
12911 {
12912 {"eq", 0x0},
12913 {"ne", 0x1},
12914 {"cs", 0x2}, {"hs", 0x2},
12915 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12916 {"mi", 0x4},
12917 {"pl", 0x5},
12918 {"vs", 0x6},
12919 {"vc", 0x7},
12920 {"hi", 0x8},
12921 {"ls", 0x9},
12922 {"ge", 0xa},
12923 {"lt", 0xb},
12924 {"gt", 0xc},
12925 {"le", 0xd},
12926 {"al", 0xe}
12927 };
12928
12929 static struct asm_barrier_opt barrier_opt_names[] =
12930 {
12931 { "sy", 0xf },
12932 { "un", 0x7 },
12933 { "st", 0xe },
12934 { "unst", 0x6 }
12935 };
12936
12937 /* Table of ARM-format instructions. */
12938
12939 /* Macros for gluing together operand strings. N.B. In all cases
12940 other than OPS0, the trailing OP_stop comes from default
12941 zero-initialization of the unspecified elements of the array. */
12942 #define OPS0() { OP_stop, }
12943 #define OPS1(a) { OP_##a, }
12944 #define OPS2(a,b) { OP_##a,OP_##b, }
12945 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12946 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12947 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12948 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12949
12950 /* These macros abstract out the exact format of the mnemonic table and
12951 save some repeated characters. */
12952
12953 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12954 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12955 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12956 THUMB_VARIANT, do_##ae, do_##te }
12957
12958 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12959 a T_MNEM_xyz enumerator. */
12960 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12961 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12962 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12963 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12964
12965 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12966 infix after the third character. */
12967 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12968 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12969 THUMB_VARIANT, do_##ae, do_##te }
12970 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12971 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12972 THUMB_VARIANT, do_##ae, do_##te }
12973 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12974 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12975 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12976 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12977 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12978 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12979 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12980 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12981
12982 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12983 appear in the condition table. */
12984 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12985 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12986 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12987
12988 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12989 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12990 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12991 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12992 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12993 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12994 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12995 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12996 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12997 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12998 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12999 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13000 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13001 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13002 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13003 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13004 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13005 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13006 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13007 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13008
13009 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13010 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13011 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13012 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13013
13014 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13015 field is still 0xE. Many of the Thumb variants can be executed
13016 conditionally, so this is checked separately. */
13017 #define TUE(mnem, op, top, nops, ops, ae, te) \
13018 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13019 THUMB_VARIANT, do_##ae, do_##te }
13020
13021 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13022 condition code field. */
13023 #define TUF(mnem, op, top, nops, ops, ae, te) \
13024 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13025 THUMB_VARIANT, do_##ae, do_##te }
13026
13027 /* ARM-only variants of all the above. */
13028 #define CE(mnem, op, nops, ops, ae) \
13029 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13030
13031 #define C3(mnem, op, nops, ops, ae) \
13032 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13033
13034 /* Legacy mnemonics that always have conditional infix after the third
13035 character. */
13036 #define CL(mnem, op, nops, ops, ae) \
13037 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13038 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13039
13040 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13041 #define cCE(mnem, op, nops, ops, ae) \
13042 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13043
13044 /* Legacy coprocessor instructions where conditional infix and conditional
13045 suffix are ambiguous. For consistency this includes all FPA instructions,
13046 not just the potentially ambiguous ones. */
13047 #define cCL(mnem, op, nops, ops, ae) \
13048 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13049 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13050
13051 /* Coprocessor, takes either a suffix or a position-3 infix
13052 (for an FPA corner case). */
13053 #define C3E(mnem, op, nops, ops, ae) \
13054 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
13055 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13056
13057 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
13058 { #m1 #m2 #m3, OPS##nops ops, \
13059 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13060 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13061
13062 #define CM(m1, m2, op, nops, ops, ae) \
13063 xCM_(m1, , m2, op, nops, ops, ae), \
13064 xCM_(m1, eq, m2, op, nops, ops, ae), \
13065 xCM_(m1, ne, m2, op, nops, ops, ae), \
13066 xCM_(m1, cs, m2, op, nops, ops, ae), \
13067 xCM_(m1, hs, m2, op, nops, ops, ae), \
13068 xCM_(m1, cc, m2, op, nops, ops, ae), \
13069 xCM_(m1, ul, m2, op, nops, ops, ae), \
13070 xCM_(m1, lo, m2, op, nops, ops, ae), \
13071 xCM_(m1, mi, m2, op, nops, ops, ae), \
13072 xCM_(m1, pl, m2, op, nops, ops, ae), \
13073 xCM_(m1, vs, m2, op, nops, ops, ae), \
13074 xCM_(m1, vc, m2, op, nops, ops, ae), \
13075 xCM_(m1, hi, m2, op, nops, ops, ae), \
13076 xCM_(m1, ls, m2, op, nops, ops, ae), \
13077 xCM_(m1, ge, m2, op, nops, ops, ae), \
13078 xCM_(m1, lt, m2, op, nops, ops, ae), \
13079 xCM_(m1, gt, m2, op, nops, ops, ae), \
13080 xCM_(m1, le, m2, op, nops, ops, ae), \
13081 xCM_(m1, al, m2, op, nops, ops, ae)
13082
13083 #define UE(mnem, op, nops, ops, ae) \
13084 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13085
13086 #define UF(mnem, op, nops, ops, ae) \
13087 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13088
13089 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
13090 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
13091 use the same encoding function for each. */
13092 #define NUF(mnem, op, nops, ops, enc) \
13093 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
13094 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13095
13096 /* Neon data processing, version which indirects through neon_enc_tab for
13097 the various overloaded versions of opcodes. */
13098 #define nUF(mnem, op, nops, ops, enc) \
13099 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13100 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13101
13102 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13103 version. */
13104 #define NCE(mnem, op, nops, ops, enc) \
13105 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13106 THUMB_VARIANT, do_##enc, do_##enc }
13107
13108 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13109 #define nCE(mnem, op, nops, ops, enc) \
13110 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13111 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13112
13113 #define do_0 0
13114
13115 /* Thumb-only, unconditional. */
13116 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13117
13118 static const struct asm_opcode insns[] =
13119 {
13120 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13121 #define THUMB_VARIANT &arm_ext_v4t
13122 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13123 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13124 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13125 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13126 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13127 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13128 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13129 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13130 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13131 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13132 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13133 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13134 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13135 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13136 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13137 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13138
13139 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13140 for setting PSR flag bits. They are obsolete in V6 and do not
13141 have Thumb equivalents. */
13142 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13143 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13144 CL(tstp, 110f000, 2, (RR, SH), cmp),
13145 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13146 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13147 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13148 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13149 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13150 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13151
13152 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13153 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13154 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13155 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13156
13157 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13158 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13159 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13160 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13161
13162 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13163 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13164 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13165 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13166 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13167 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13168
13169 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13170 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13171 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13172 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13173
13174 /* Pseudo ops. */
13175 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13176 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13177 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13178
13179 /* Thumb-compatibility pseudo ops. */
13180 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13181 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13182 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13183 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13184 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13185 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13186 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13187 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13188 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13189 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13190 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13191 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13192
13193 #undef THUMB_VARIANT
13194 #define THUMB_VARIANT &arm_ext_v6
13195 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13196
13197 /* V1 instructions with no Thumb analogue prior to V6T2. */
13198 #undef THUMB_VARIANT
13199 #define THUMB_VARIANT &arm_ext_v6t2
13200 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13201 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13202 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13203 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13204 CL(teqp, 130f000, 2, (RR, SH), cmp),
13205
13206 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13207 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13208 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13209 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13210
13211 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13212 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13213
13214 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13215 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13216
13217 /* V1 instructions with no Thumb analogue at all. */
13218 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13219 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13220
13221 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13222 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13223 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13224 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13225 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13226 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13227 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13228 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13229
13230 #undef ARM_VARIANT
13231 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13232 #undef THUMB_VARIANT
13233 #define THUMB_VARIANT &arm_ext_v4t
13234 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13235 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13236
13237 #undef THUMB_VARIANT
13238 #define THUMB_VARIANT &arm_ext_v6t2
13239 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13240 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13241
13242 /* Generic coprocessor instructions. */
13243 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13244 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13245 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13246 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13247 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13248 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13249 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13250
13251 #undef ARM_VARIANT
13252 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13253 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13254 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13255
13256 #undef ARM_VARIANT
13257 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13258 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13259 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13260
13261 #undef ARM_VARIANT
13262 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13263 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13264 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13265 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13266 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13267 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13268 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13269 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13270 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13271
13272 #undef ARM_VARIANT
13273 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13274 #undef THUMB_VARIANT
13275 #define THUMB_VARIANT &arm_ext_v4t
13276 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13277 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13278 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13279 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13280 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13281 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13282
13283 #undef ARM_VARIANT
13284 #define ARM_VARIANT &arm_ext_v4t_5
13285 /* ARM Architecture 4T. */
13286 /* Note: bx (and blx) are required on V5, even if the processor does
13287 not support Thumb. */
13288 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13289
13290 #undef ARM_VARIANT
13291 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13292 #undef THUMB_VARIANT
13293 #define THUMB_VARIANT &arm_ext_v5t
13294 /* Note: blx has 2 variants; the .value coded here is for
13295 BLX(2). Only this variant has conditional execution. */
13296 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13297 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13298
13299 #undef THUMB_VARIANT
13300 #define THUMB_VARIANT &arm_ext_v6t2
13301 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13302 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13303 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13304 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13305 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13306 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13307 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13308 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13309
13310 #undef ARM_VARIANT
13311 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13312 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13313 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13314 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13315 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13316
13317 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13318 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13319
13320 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13321 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13322 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13323 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13324
13325 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13326 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13327 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13328 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13329
13330 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13331 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13332
13333 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13334 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13335 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13336 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13337
13338 #undef ARM_VARIANT
13339 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13340 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13341 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13342 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13343
13344 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13345 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13346
13347 #undef ARM_VARIANT
13348 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13349 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13350
13351 #undef ARM_VARIANT
13352 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13353 #undef THUMB_VARIANT
13354 #define THUMB_VARIANT &arm_ext_v6
13355 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13356 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13357 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13358 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13359 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13360 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13361 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13362 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13363 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13364 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13365
13366 #undef THUMB_VARIANT
13367 #define THUMB_VARIANT &arm_ext_v6t2
13368 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13369 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13370 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13371
13372 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13373 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13374
13375 /* ARM V6 not included in V7M (eg. integer SIMD). */
13376 #undef THUMB_VARIANT
13377 #define THUMB_VARIANT &arm_ext_v6_notm
13378 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13379 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13380 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13381 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13382 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13383 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13384 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13385 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13386 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13387 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13388 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13389 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13390 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13391 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13392 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13393 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13394 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13395 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13396 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13397 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13398 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13399 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13400 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13401 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13402 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13403 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13404 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13405 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13406 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13407 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13408 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13409 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13410 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13411 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13412 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13413 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13414 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13415 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13416 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13417 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13418 UF(rfeib, 9900a00, 1, (RRw), rfe),
13419 UF(rfeda, 8100a00, 1, (RRw), rfe),
13420 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13421 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13422 UF(rfefa, 9900a00, 1, (RRw), rfe),
13423 UF(rfeea, 8100a00, 1, (RRw), rfe),
13424 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13425 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13426 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13427 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13428 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13429 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13430 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13431 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13432 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13433 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13434 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13435 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13436 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13437 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13438 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13439 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13440 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13441 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13442 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13443 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13444 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13445 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13446 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13447 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13448 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13449 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13450 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13451 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13452 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13453 UF(srsib, 9cd0500, 1, (I31w), srs),
13454 UF(srsda, 84d0500, 1, (I31w), srs),
13455 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13456 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13457 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13458 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13459 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13460 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13461 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13462
13463 #undef ARM_VARIANT
13464 #define ARM_VARIANT &arm_ext_v6k
13465 #undef THUMB_VARIANT
13466 #define THUMB_VARIANT &arm_ext_v6k
13467 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13468 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13469 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13470 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13471
13472 #undef THUMB_VARIANT
13473 #define THUMB_VARIANT &arm_ext_v6_notm
13474 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13475 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13476
13477 #undef THUMB_VARIANT
13478 #define THUMB_VARIANT &arm_ext_v6t2
13479 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13480 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13481 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13482 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13483 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13484
13485 #undef ARM_VARIANT
13486 #define ARM_VARIANT &arm_ext_v6z
13487 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13488
13489 #undef ARM_VARIANT
13490 #define ARM_VARIANT &arm_ext_v6t2
13491 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13492 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13493 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13494 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13495
13496 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13497 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
13498 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
13499 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13500
13501 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13502 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13503 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13504 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13505
13506 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13507 UT(cbz, b100, 2, (RR, EXP), t_czb),
13508 /* ARM does not really have an IT instruction. */
13509 TUE(it, 0, bf08, 1, (COND), it, t_it),
13510 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13511 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13512 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13513 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13514 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13515 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13516 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13517 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13518 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13519 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13520 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13521 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13522 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13523 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13524
13525 /* Thumb2 only instructions. */
13526 #undef ARM_VARIANT
13527 #define ARM_VARIANT NULL
13528
13529 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13530 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13531 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13532 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13533
13534 /* Thumb-2 hardware division instructions (R and M profiles only). */
13535 #undef THUMB_VARIANT
13536 #define THUMB_VARIANT &arm_ext_div
13537 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13538 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13539
13540 /* ARM V7 instructions. */
13541 #undef ARM_VARIANT
13542 #define ARM_VARIANT &arm_ext_v7
13543 #undef THUMB_VARIANT
13544 #define THUMB_VARIANT &arm_ext_v7
13545 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13546 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13547 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13548 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13549 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13550
13551 #undef ARM_VARIANT
13552 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13553 cCE(wfs, e200110, 1, (RR), rd),
13554 cCE(rfs, e300110, 1, (RR), rd),
13555 cCE(wfc, e400110, 1, (RR), rd),
13556 cCE(rfc, e500110, 1, (RR), rd),
13557
13558 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13559 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13560 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13561 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13562
13563 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13564 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13565 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13566 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13567
13568 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13569 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13570 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13571 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13572 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13573 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13574 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13575 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13576 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13577 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13578 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13579 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13580
13581 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13582 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13583 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13584 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13585 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13586 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13587 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13588 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13589 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13590 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13591 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13592 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13593
13594 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13595 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13596 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13597 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13598 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13599 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13600 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13601 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13602 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13603 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13604 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13605 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13606
13607 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13608 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13609 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13610 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13611 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13612 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13613 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13614 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13615 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13616 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13617 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13618 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13619
13620 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13621 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13622 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13623 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13624 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13625 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13626 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13627 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13628 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13629 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13630 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13631 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13632
13633 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13634 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13635 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13636 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13637 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13638 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13639 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13640 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13641 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13642 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13643 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13644 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13645
13646 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13647 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13648 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13649 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13650 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13651 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13652 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13653 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13654 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13655 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13656 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13657 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13658
13659 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13660 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13661 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13662 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13663 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13664 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13665 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13666 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13667 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13668 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13669 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13670 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13671
13672 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13673 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13674 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13675 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13676 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13677 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13678 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13679 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13680 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13681 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13682 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13683 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13684
13685 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13686 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13687 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13688 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13689 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13690 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13691 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13692 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13693 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13694 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13695 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13696 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13697
13698 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13699 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13700 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13701 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13702 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13703 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13704 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13705 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13706 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13707 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13708 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13709 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13710
13711 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13712 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13713 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13714 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13715 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13716 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13717 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13718 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13719 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13720 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13721 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13722 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13723
13724 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13725 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13726 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13727 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13728 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13729 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13730 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13731 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13732 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13733 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13734 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13735 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13736
13737 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13738 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13739 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13740 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13741 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13742 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13743 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13744 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13745 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13746 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13747 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13748 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13749
13750 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13751 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13752 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13753 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13754 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13755 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13756 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13757 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13758 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13759 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13760 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13761 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13762
13763 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13764 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13765 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13766 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13767 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13768 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13769 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13770 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13771 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13772 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13773 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13774 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13775
13776 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13777 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13778 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13779 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13780 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13781 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13782 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13784 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13788
13789 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13794 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13797 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13801
13802 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13807 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13810 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13814
13815 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13820 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13823 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13827
13828 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13829 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13830 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13831 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13832 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13833 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13834 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13835 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13836 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13837 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13838 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13839 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13840
13841 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13842 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13843 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13844 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13845 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13846 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13847 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13848 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13849 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13850 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13851 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13852 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13853
13854 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13855 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13856 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13857 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13858 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13859 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13860 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13861 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13862 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13863 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13864 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13865 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13866
13867 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13868 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13869 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13870 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13871 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13872 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13873 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13874 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13875 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13876 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13877 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13878 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13879
13880 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13881 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13882 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13883 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13884 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13885 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13886 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13887 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13888 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13889 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13890 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13891 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13892
13893 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13894 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13895 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13896 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13897 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13898 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13899 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13900 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13901 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13902 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13903 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13904 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13905
13906 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13907 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13908 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13909 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13910 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13911 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13912 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13913 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13914 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13915 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13916 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13917 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13918
13919 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13920 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13921 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13922 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13923 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13924 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13925 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13926 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13927 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13928 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13929 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13930 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13931
13932 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13933 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13934 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13935 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13936 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13937 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13938 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13939 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13940 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13941 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13942 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13943 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13944
13945 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13946 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13947 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13948 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13949
13950 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13951 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13952 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13953 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13954 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13955 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13956 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13957 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13958 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13959 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13960 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13961 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13962
13963 /* The implementation of the FIX instruction is broken on some
13964 assemblers, in that it accepts a precision specifier as well as a
13965 rounding specifier, despite the fact that this is meaningless.
13966 To be more compatible, we accept it as well, though of course it
13967 does not set any bits. */
13968 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13969 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13970 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13971 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13972 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13973 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13974 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13975 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13976 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13977 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13978 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13979 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13980 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13981
13982 /* Instructions that were new with the real FPA, call them V2. */
13983 #undef ARM_VARIANT
13984 #define ARM_VARIANT &fpu_fpa_ext_v2
13985 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13986 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13987 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13988 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13989 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13990 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13991
13992 #undef ARM_VARIANT
13993 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13994 /* Moves and type conversions. */
13995 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13996 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13997 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13998 cCE(fmstat, ef1fa10, 0, (), noargs),
13999 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
14000 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
14001 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
14002 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14003 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
14004 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14005 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
14006 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
14007
14008 /* Memory operations. */
14009 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
14010 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
14011 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14012 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14013 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14014 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14015 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14016 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14017 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14018 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14019 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14020 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14021 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14022 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14023 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14024 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14025 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14026 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14027
14028 /* Monadic operations. */
14029 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
14030 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
14031 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
14032
14033 /* Dyadic operations. */
14034 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14035 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14036 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14037 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14038 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14039 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14040 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14041 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14042 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14043
14044 /* Comparisons. */
14045 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
14046 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
14047 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
14048 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
14049
14050 #undef ARM_VARIANT
14051 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
14052 /* Moves and type conversions. */
14053 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14054 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14055 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14056 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
14057 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
14058 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
14059 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
14060 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14061 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
14062 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14063 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14064 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14065 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14066
14067 /* Memory operations. */
14068 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
14069 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
14070 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14071 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14072 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14073 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14074 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14075 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14076 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14077 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14078
14079 /* Monadic operations. */
14080 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14081 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14082 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14083
14084 /* Dyadic operations. */
14085 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14086 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14087 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14088 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14089 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14090 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14091 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14092 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14093 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14094
14095 /* Comparisons. */
14096 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14097 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
14098 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14099 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
14100
14101 #undef ARM_VARIANT
14102 #define ARM_VARIANT &fpu_vfp_ext_v2
14103 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
14104 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
14105 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
14106 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
14107
14108 #undef THUMB_VARIANT
14109 #define THUMB_VARIANT &fpu_neon_ext_v1
14110 #undef ARM_VARIANT
14111 #define ARM_VARIANT &fpu_neon_ext_v1
14112 /* Data processing with three registers of the same length. */
14113 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14114 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
14115 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14116 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14117 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14118 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14119 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14120 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14121 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14122 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14123 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14124 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14125 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14126 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14127 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14128 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14129 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14130 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14131 /* If not immediate, fall back to neon_dyadic_i64_su.
14132 shl_imm should accept I8 I16 I32 I64,
14133 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14134 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14135 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14136 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14137 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14138 /* Logic ops, types optional & ignored. */
14139 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14140 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14141 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14142 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14143 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14144 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14145 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14146 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14147 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14148 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14149 /* Bitfield ops, untyped. */
14150 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14151 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14152 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14153 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14154 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14155 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14156 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14157 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14158 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14159 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14160 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14161 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14162 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14163 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14164 back to neon_dyadic_if_su. */
14165 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14166 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14167 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14168 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14169 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14170 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14171 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14172 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14173 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14174 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14175 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14176 /* As above, D registers only. */
14177 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14178 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14179 /* Int and float variants, signedness unimportant. */
14180 /* If not scalar, fall back to neon_dyadic_if_i. */
14181 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14182 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14183 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14184 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14185 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14186 /* Add/sub take types I8 I16 I32 I64 F32. */
14187 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14188 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14189 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14190 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14191 /* vtst takes sizes 8, 16, 32. */
14192 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14193 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14194 /* VMUL takes I8 I16 I32 F32 P8. */
14195 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14196 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14197 /* VQD{R}MULH takes S16 S32. */
14198 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14199 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14200 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14201 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14202 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14203 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14204 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14205 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14206 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14207 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14208 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14209 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14210 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14211 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14212 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14213 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14214
14215 /* Two address, int/float. Types S8 S16 S32 F32. */
14216 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14217 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14218 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14219 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14220
14221 /* Data processing with two registers and a shift amount. */
14222 /* Right shifts, and variants with rounding.
14223 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14224 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14225 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14226 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14227 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14228 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14229 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14230 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14231 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14232 /* Shift and insert. Sizes accepted 8 16 32 64. */
14233 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14234 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14235 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14236 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14237 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14238 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14239 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14240 /* Right shift immediate, saturating & narrowing, with rounding variants.
14241 Types accepted S16 S32 S64 U16 U32 U64. */
14242 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14243 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14244 /* As above, unsigned. Types accepted S16 S32 S64. */
14245 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14246 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14247 /* Right shift narrowing. Types accepted I16 I32 I64. */
14248 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14249 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14250 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14251 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14252 /* CVT with optional immediate for fixed-point variant. */
14253 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14254 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14255
14256 /* One register and an immediate value. All encoding special-cased! */
14257 #undef THUMB_VARIANT
14258 #define THUMB_VARIANT &fpu_vfp_ext_v1
14259 #undef ARM_VARIANT
14260 #define ARM_VARIANT &fpu_vfp_ext_v1
14261 NCE(vmov, 0, 1, (VMOV), neon_mov),
14262
14263 #undef THUMB_VARIANT
14264 #define THUMB_VARIANT &fpu_neon_ext_v1
14265 #undef ARM_VARIANT
14266 #define ARM_VARIANT &fpu_neon_ext_v1
14267 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14268 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14269 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14270
14271 /* Data processing, three registers of different lengths. */
14272 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14273 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14274 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14275 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14276 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14277 /* If not scalar, fall back to neon_dyadic_long.
14278 Vector types as above, scalar types S16 S32 U16 U32. */
14279 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14280 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14281 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14282 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14283 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14284 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14285 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14286 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14287 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14288 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14289 /* Saturating doubling multiplies. Types S16 S32. */
14290 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14291 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14292 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14293 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14294 S16 S32 U16 U32. */
14295 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14296
14297 /* Extract. Size 8. */
14298 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14299 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14300
14301 /* Two registers, miscellaneous. */
14302 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14303 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14304 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14305 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14306 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14307 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14308 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14309 /* Vector replicate. Sizes 8 16 32. */
14310 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14311 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14312 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14313 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14314 /* VMOVN. Types I16 I32 I64. */
14315 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14316 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14317 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14318 /* VQMOVUN. Types S16 S32 S64. */
14319 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14320 /* VZIP / VUZP. Sizes 8 16 32. */
14321 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14322 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14323 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14324 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14325 /* VQABS / VQNEG. Types S8 S16 S32. */
14326 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14327 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14328 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14329 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14330 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14331 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14332 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14333 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14334 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14335 /* Reciprocal estimates. Types U32 F32. */
14336 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14337 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14338 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14339 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14340 /* VCLS. Types S8 S16 S32. */
14341 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14342 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14343 /* VCLZ. Types I8 I16 I32. */
14344 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14345 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14346 /* VCNT. Size 8. */
14347 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14348 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14349 /* Two address, untyped. */
14350 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14351 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14352 /* VTRN. Sizes 8 16 32. */
14353 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14354 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14355
14356 /* Table lookup. Size 8. */
14357 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14358 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14359
14360 #undef THUMB_VARIANT
14361 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
14362 #undef ARM_VARIANT
14363 #define ARM_VARIANT &fpu_vfp_ext_v1xd
14364
14365 /* Load/store instructions. Available in Neon or VFPv3. */
14366 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14367 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14368 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14369 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14370 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14371 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14372 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14373 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14374
14375 #undef THUMB_VARIANT
14376 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14377 #undef ARM_VARIANT
14378 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14379
14380 /* Neon element/structure load/store. */
14381 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14382 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14383 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14384 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14385 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14386 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14387 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14388 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14389
14390 #undef THUMB_VARIANT
14391 #define THUMB_VARIANT &fpu_vfp_ext_v3
14392 #undef ARM_VARIANT
14393 #define ARM_VARIANT &fpu_vfp_ext_v3
14394
14395 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14396 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14397 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14398 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14399 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14400 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14401 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14402 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14403 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14404 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14405 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14406 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14407 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14408 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14409 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14410 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14411 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14412 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14413
14414 #undef THUMB_VARIANT
14415 #undef ARM_VARIANT
14416 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14417 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14418 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14419 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14420 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14421 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14422 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14423 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14424 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14425
14426 #undef ARM_VARIANT
14427 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14428 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14429 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14430 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14431 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14432 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14433 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14434 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14435 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14436 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14437 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14438 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14439 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14440 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14441 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14442 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14443 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14444 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14445 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14446 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14447 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14448 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14449 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14450 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14451 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14452 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14453 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14454 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14455 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14456 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14457 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14458 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14459 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14460 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14461 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14462 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14463 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14464 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14465 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14466 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14467 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14468 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14469 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14470 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14471 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14472 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14473 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14474 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14475 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14476 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14477 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14478 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14479 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14480 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14481 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14482 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14483 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14484 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14485 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14486 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14487 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14488 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14489 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14490 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14491 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14492 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14493 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14494 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14495 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14496 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14497 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14498 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14499 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14500 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14501 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14502 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14503 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14504 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14505 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14506 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14507 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14508 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14509 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14510 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14511 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14512 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14513 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14514 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14515 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14516 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14517 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14518 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14519 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14520 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14521 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14522 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14523 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14524 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14525 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14526 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14527 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14528 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14529 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14530 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14531 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14532 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14533 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14534 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14535 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14536 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14537 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14538 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14539 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14540 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14541 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14542 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14543 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14544 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14545 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14546 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14547 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14548 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14549 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14550 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14551 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14552 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14553 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14554 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14555 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14556 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14557 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14558 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14559 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14560 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14561 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14562 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14563 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14564 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14565 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14566 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14567 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14568 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14569 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14570 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14571 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14572 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14573 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14574 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14575 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14576 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14577 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14578 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14579 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14580 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14581 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14582 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14583 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14584 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14585 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14586 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14587 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14588 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14589 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14590
14591 #undef ARM_VARIANT
14592 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14593 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14594 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14595 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14596 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14597 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14598 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14599 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14600 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14601 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14602 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14603 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14604 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14605 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14606 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14607 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14608 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14609 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14610 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14611 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14612 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14613 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14614 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14615 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14616 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14617 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14618 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14619 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14620 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14621 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14622 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14623 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14624 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14625 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14626 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14627 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14628 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14629 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14630 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14631 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14632 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14633 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14634 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14635 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14636 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14637 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14638 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14639 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14640 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14641 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14642 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14643 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14644 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14645 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14646 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14647 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14648 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14649 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14650 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14651 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14652 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14653 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14654 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14655 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14656 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14657 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14658 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14659 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14660 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14661 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14662 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14663 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14664 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14665 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14666 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14667 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14668 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14669 };
14670 #undef ARM_VARIANT
14671 #undef THUMB_VARIANT
14672 #undef TCE
14673 #undef TCM
14674 #undef TUE
14675 #undef TUF
14676 #undef TCC
14677 #undef cCE
14678 #undef cCL
14679 #undef C3E
14680 #undef CE
14681 #undef CM
14682 #undef UE
14683 #undef UF
14684 #undef UT
14685 #undef NUF
14686 #undef nUF
14687 #undef NCE
14688 #undef nCE
14689 #undef OPS0
14690 #undef OPS1
14691 #undef OPS2
14692 #undef OPS3
14693 #undef OPS4
14694 #undef OPS5
14695 #undef OPS6
14696 #undef do_0
14697 \f
14698 /* MD interface: bits in the object file. */
14699
14700 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14701 for use in the a.out file, and stores them in the array pointed to by buf.
14702 This knows about the endian-ness of the target machine and does
14703 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14704 2 (short) and 4 (long) Floating numbers are put out as a series of
14705 LITTLENUMS (shorts, here at least). */
14706
14707 void
14708 md_number_to_chars (char * buf, valueT val, int n)
14709 {
14710 if (target_big_endian)
14711 number_to_chars_bigendian (buf, val, n);
14712 else
14713 number_to_chars_littleendian (buf, val, n);
14714 }
14715
14716 static valueT
14717 md_chars_to_number (char * buf, int n)
14718 {
14719 valueT result = 0;
14720 unsigned char * where = (unsigned char *) buf;
14721
14722 if (target_big_endian)
14723 {
14724 while (n--)
14725 {
14726 result <<= 8;
14727 result |= (*where++ & 255);
14728 }
14729 }
14730 else
14731 {
14732 while (n--)
14733 {
14734 result <<= 8;
14735 result |= (where[n] & 255);
14736 }
14737 }
14738
14739 return result;
14740 }
14741
14742 /* MD interface: Sections. */
14743
14744 /* Estimate the size of a frag before relaxing. Assume everything fits in
14745 2 bytes. */
14746
14747 int
14748 md_estimate_size_before_relax (fragS * fragp,
14749 segT segtype ATTRIBUTE_UNUSED)
14750 {
14751 fragp->fr_var = 2;
14752 return 2;
14753 }
14754
14755 /* Convert a machine dependent frag. */
14756
14757 void
14758 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14759 {
14760 unsigned long insn;
14761 unsigned long old_op;
14762 char *buf;
14763 expressionS exp;
14764 fixS *fixp;
14765 int reloc_type;
14766 int pc_rel;
14767 int opcode;
14768
14769 buf = fragp->fr_literal + fragp->fr_fix;
14770
14771 old_op = bfd_get_16(abfd, buf);
14772 if (fragp->fr_symbol) {
14773 exp.X_op = O_symbol;
14774 exp.X_add_symbol = fragp->fr_symbol;
14775 } else {
14776 exp.X_op = O_constant;
14777 }
14778 exp.X_add_number = fragp->fr_offset;
14779 opcode = fragp->fr_subtype;
14780 switch (opcode)
14781 {
14782 case T_MNEM_ldr_pc:
14783 case T_MNEM_ldr_pc2:
14784 case T_MNEM_ldr_sp:
14785 case T_MNEM_str_sp:
14786 case T_MNEM_ldr:
14787 case T_MNEM_ldrb:
14788 case T_MNEM_ldrh:
14789 case T_MNEM_str:
14790 case T_MNEM_strb:
14791 case T_MNEM_strh:
14792 if (fragp->fr_var == 4)
14793 {
14794 insn = THUMB_OP32(opcode);
14795 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14796 {
14797 insn |= (old_op & 0x700) << 4;
14798 }
14799 else
14800 {
14801 insn |= (old_op & 7) << 12;
14802 insn |= (old_op & 0x38) << 13;
14803 }
14804 insn |= 0x00000c00;
14805 put_thumb32_insn (buf, insn);
14806 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14807 }
14808 else
14809 {
14810 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14811 }
14812 pc_rel = (opcode == T_MNEM_ldr_pc2);
14813 break;
14814 case T_MNEM_adr:
14815 if (fragp->fr_var == 4)
14816 {
14817 insn = THUMB_OP32 (opcode);
14818 insn |= (old_op & 0xf0) << 4;
14819 put_thumb32_insn (buf, insn);
14820 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14821 }
14822 else
14823 {
14824 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14825 exp.X_add_number -= 4;
14826 }
14827 pc_rel = 1;
14828 break;
14829 case T_MNEM_mov:
14830 case T_MNEM_movs:
14831 case T_MNEM_cmp:
14832 case T_MNEM_cmn:
14833 if (fragp->fr_var == 4)
14834 {
14835 int r0off = (opcode == T_MNEM_mov
14836 || opcode == T_MNEM_movs) ? 0 : 8;
14837 insn = THUMB_OP32 (opcode);
14838 insn = (insn & 0xe1ffffff) | 0x10000000;
14839 insn |= (old_op & 0x700) << r0off;
14840 put_thumb32_insn (buf, insn);
14841 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14842 }
14843 else
14844 {
14845 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14846 }
14847 pc_rel = 0;
14848 break;
14849 case T_MNEM_b:
14850 if (fragp->fr_var == 4)
14851 {
14852 insn = THUMB_OP32(opcode);
14853 put_thumb32_insn (buf, insn);
14854 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14855 }
14856 else
14857 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14858 pc_rel = 1;
14859 break;
14860 case T_MNEM_bcond:
14861 if (fragp->fr_var == 4)
14862 {
14863 insn = THUMB_OP32(opcode);
14864 insn |= (old_op & 0xf00) << 14;
14865 put_thumb32_insn (buf, insn);
14866 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14867 }
14868 else
14869 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14870 pc_rel = 1;
14871 break;
14872 case T_MNEM_add_sp:
14873 case T_MNEM_add_pc:
14874 case T_MNEM_inc_sp:
14875 case T_MNEM_dec_sp:
14876 if (fragp->fr_var == 4)
14877 {
14878 /* ??? Choose between add and addw. */
14879 insn = THUMB_OP32 (opcode);
14880 insn |= (old_op & 0xf0) << 4;
14881 put_thumb32_insn (buf, insn);
14882 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14883 }
14884 else
14885 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14886 pc_rel = 0;
14887 break;
14888
14889 case T_MNEM_addi:
14890 case T_MNEM_addis:
14891 case T_MNEM_subi:
14892 case T_MNEM_subis:
14893 if (fragp->fr_var == 4)
14894 {
14895 insn = THUMB_OP32 (opcode);
14896 insn |= (old_op & 0xf0) << 4;
14897 insn |= (old_op & 0xf) << 16;
14898 put_thumb32_insn (buf, insn);
14899 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14900 }
14901 else
14902 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14903 pc_rel = 0;
14904 break;
14905 default:
14906 abort();
14907 }
14908 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14909 reloc_type);
14910 fixp->fx_file = fragp->fr_file;
14911 fixp->fx_line = fragp->fr_line;
14912 fragp->fr_fix += fragp->fr_var;
14913 }
14914
14915 /* Return the size of a relaxable immediate operand instruction.
14916 SHIFT and SIZE specify the form of the allowable immediate. */
14917 static int
14918 relax_immediate (fragS *fragp, int size, int shift)
14919 {
14920 offsetT offset;
14921 offsetT mask;
14922 offsetT low;
14923
14924 /* ??? Should be able to do better than this. */
14925 if (fragp->fr_symbol)
14926 return 4;
14927
14928 low = (1 << shift) - 1;
14929 mask = (1 << (shift + size)) - (1 << shift);
14930 offset = fragp->fr_offset;
14931 /* Force misaligned offsets to 32-bit variant. */
14932 if (offset & low)
14933 return -4;
14934 if (offset & ~mask)
14935 return 4;
14936 return 2;
14937 }
14938
14939 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14940 load. */
14941 static int
14942 relax_adr (fragS *fragp, asection *sec)
14943 {
14944 addressT addr;
14945 offsetT val;
14946
14947 /* Assume worst case for symbols not known to be in the same section. */
14948 if (!S_IS_DEFINED(fragp->fr_symbol)
14949 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14950 return 4;
14951
14952 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14953 addr = fragp->fr_address + fragp->fr_fix;
14954 addr = (addr + 4) & ~3;
14955 /* Fix the insn as the 4-byte version if the target address is not
14956 sufficiently aligned. This is prevents an infinite loop when two
14957 instructions have contradictory range/alignment requirements. */
14958 if (val & 3)
14959 return -4;
14960 val -= addr;
14961 if (val < 0 || val > 1020)
14962 return 4;
14963 return 2;
14964 }
14965
14966 /* Return the size of a relaxable add/sub immediate instruction. */
14967 static int
14968 relax_addsub (fragS *fragp, asection *sec)
14969 {
14970 char *buf;
14971 int op;
14972
14973 buf = fragp->fr_literal + fragp->fr_fix;
14974 op = bfd_get_16(sec->owner, buf);
14975 if ((op & 0xf) == ((op >> 4) & 0xf))
14976 return relax_immediate (fragp, 8, 0);
14977 else
14978 return relax_immediate (fragp, 3, 0);
14979 }
14980
14981
14982 /* Return the size of a relaxable branch instruction. BITS is the
14983 size of the offset field in the narrow instruction. */
14984
14985 static int
14986 relax_branch (fragS *fragp, asection *sec, int bits)
14987 {
14988 addressT addr;
14989 offsetT val;
14990 offsetT limit;
14991
14992 /* Assume worst case for symbols not known to be in the same section. */
14993 if (!S_IS_DEFINED(fragp->fr_symbol)
14994 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14995 return 4;
14996
14997 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14998 addr = fragp->fr_address + fragp->fr_fix + 4;
14999 val -= addr;
15000
15001 /* Offset is a signed value *2 */
15002 limit = 1 << bits;
15003 if (val >= limit || val < -limit)
15004 return 4;
15005 return 2;
15006 }
15007
15008
15009 /* Relax a machine dependent frag. This returns the amount by which
15010 the current size of the frag should change. */
15011
15012 int
15013 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
15014 {
15015 int oldsize;
15016 int newsize;
15017
15018 oldsize = fragp->fr_var;
15019 switch (fragp->fr_subtype)
15020 {
15021 case T_MNEM_ldr_pc2:
15022 newsize = relax_adr(fragp, sec);
15023 break;
15024 case T_MNEM_ldr_pc:
15025 case T_MNEM_ldr_sp:
15026 case T_MNEM_str_sp:
15027 newsize = relax_immediate(fragp, 8, 2);
15028 break;
15029 case T_MNEM_ldr:
15030 case T_MNEM_str:
15031 newsize = relax_immediate(fragp, 5, 2);
15032 break;
15033 case T_MNEM_ldrh:
15034 case T_MNEM_strh:
15035 newsize = relax_immediate(fragp, 5, 1);
15036 break;
15037 case T_MNEM_ldrb:
15038 case T_MNEM_strb:
15039 newsize = relax_immediate(fragp, 5, 0);
15040 break;
15041 case T_MNEM_adr:
15042 newsize = relax_adr(fragp, sec);
15043 break;
15044 case T_MNEM_mov:
15045 case T_MNEM_movs:
15046 case T_MNEM_cmp:
15047 case T_MNEM_cmn:
15048 newsize = relax_immediate(fragp, 8, 0);
15049 break;
15050 case T_MNEM_b:
15051 newsize = relax_branch(fragp, sec, 11);
15052 break;
15053 case T_MNEM_bcond:
15054 newsize = relax_branch(fragp, sec, 8);
15055 break;
15056 case T_MNEM_add_sp:
15057 case T_MNEM_add_pc:
15058 newsize = relax_immediate (fragp, 8, 2);
15059 break;
15060 case T_MNEM_inc_sp:
15061 case T_MNEM_dec_sp:
15062 newsize = relax_immediate (fragp, 7, 2);
15063 break;
15064 case T_MNEM_addi:
15065 case T_MNEM_addis:
15066 case T_MNEM_subi:
15067 case T_MNEM_subis:
15068 newsize = relax_addsub (fragp, sec);
15069 break;
15070 default:
15071 abort();
15072 }
15073 if (newsize < 0)
15074 {
15075 fragp->fr_var = -newsize;
15076 md_convert_frag (sec->owner, sec, fragp);
15077 frag_wane(fragp);
15078 return -(newsize + oldsize);
15079 }
15080 fragp->fr_var = newsize;
15081 return newsize - oldsize;
15082 }
15083
15084 /* Round up a section size to the appropriate boundary. */
15085
15086 valueT
15087 md_section_align (segT segment ATTRIBUTE_UNUSED,
15088 valueT size)
15089 {
15090 #ifdef OBJ_ELF
15091 return size;
15092 #else
15093 /* Round all sects to multiple of 4. */
15094 return (size + 3) & ~3;
15095 #endif
15096 }
15097
15098 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
15099 of an rs_align_code fragment. */
15100
15101 void
15102 arm_handle_align (fragS * fragP)
15103 {
15104 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
15105 static char const thumb_noop[2] = { 0xc0, 0x46 };
15106 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
15107 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
15108
15109 int bytes, fix, noop_size;
15110 char * p;
15111 const char * noop;
15112
15113 if (fragP->fr_type != rs_align_code)
15114 return;
15115
15116 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
15117 p = fragP->fr_literal + fragP->fr_fix;
15118 fix = 0;
15119
15120 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
15121 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
15122
15123 if (fragP->tc_frag_data)
15124 {
15125 if (target_big_endian)
15126 noop = thumb_bigend_noop;
15127 else
15128 noop = thumb_noop;
15129 noop_size = sizeof (thumb_noop);
15130 }
15131 else
15132 {
15133 if (target_big_endian)
15134 noop = arm_bigend_noop;
15135 else
15136 noop = arm_noop;
15137 noop_size = sizeof (arm_noop);
15138 }
15139
15140 if (bytes & (noop_size - 1))
15141 {
15142 fix = bytes & (noop_size - 1);
15143 memset (p, 0, fix);
15144 p += fix;
15145 bytes -= fix;
15146 }
15147
15148 while (bytes >= noop_size)
15149 {
15150 memcpy (p, noop, noop_size);
15151 p += noop_size;
15152 bytes -= noop_size;
15153 fix += noop_size;
15154 }
15155
15156 fragP->fr_fix += fix;
15157 fragP->fr_var = noop_size;
15158 }
15159
15160 /* Called from md_do_align. Used to create an alignment
15161 frag in a code section. */
15162
15163 void
15164 arm_frag_align_code (int n, int max)
15165 {
15166 char * p;
15167
15168 /* We assume that there will never be a requirement
15169 to support alignments greater than 32 bytes. */
15170 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15171 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15172
15173 p = frag_var (rs_align_code,
15174 MAX_MEM_FOR_RS_ALIGN_CODE,
15175 1,
15176 (relax_substateT) max,
15177 (symbolS *) NULL,
15178 (offsetT) n,
15179 (char *) NULL);
15180 *p = 0;
15181 }
15182
15183 /* Perform target specific initialisation of a frag. */
15184
15185 void
15186 arm_init_frag (fragS * fragP)
15187 {
15188 /* Record whether this frag is in an ARM or a THUMB area. */
15189 fragP->tc_frag_data = thumb_mode;
15190 }
15191
15192 #ifdef OBJ_ELF
15193 /* When we change sections we need to issue a new mapping symbol. */
15194
15195 void
15196 arm_elf_change_section (void)
15197 {
15198 flagword flags;
15199 segment_info_type *seginfo;
15200
15201 /* Link an unlinked unwind index table section to the .text section. */
15202 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15203 && elf_linked_to_section (now_seg) == NULL)
15204 elf_linked_to_section (now_seg) = text_section;
15205
15206 if (!SEG_NORMAL (now_seg))
15207 return;
15208
15209 flags = bfd_get_section_flags (stdoutput, now_seg);
15210
15211 /* We can ignore sections that only contain debug info. */
15212 if ((flags & SEC_ALLOC) == 0)
15213 return;
15214
15215 seginfo = seg_info (now_seg);
15216 mapstate = seginfo->tc_segment_info_data.mapstate;
15217 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15218 }
15219
15220 int
15221 arm_elf_section_type (const char * str, size_t len)
15222 {
15223 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15224 return SHT_ARM_EXIDX;
15225
15226 return -1;
15227 }
15228 \f
15229 /* Code to deal with unwinding tables. */
15230
15231 static void add_unwind_adjustsp (offsetT);
15232
15233 /* Cenerate and deferred unwind frame offset. */
15234
15235 static void
15236 flush_pending_unwind (void)
15237 {
15238 offsetT offset;
15239
15240 offset = unwind.pending_offset;
15241 unwind.pending_offset = 0;
15242 if (offset != 0)
15243 add_unwind_adjustsp (offset);
15244 }
15245
15246 /* Add an opcode to this list for this function. Two-byte opcodes should
15247 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15248 order. */
15249
15250 static void
15251 add_unwind_opcode (valueT op, int length)
15252 {
15253 /* Add any deferred stack adjustment. */
15254 if (unwind.pending_offset)
15255 flush_pending_unwind ();
15256
15257 unwind.sp_restored = 0;
15258
15259 if (unwind.opcode_count + length > unwind.opcode_alloc)
15260 {
15261 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15262 if (unwind.opcodes)
15263 unwind.opcodes = xrealloc (unwind.opcodes,
15264 unwind.opcode_alloc);
15265 else
15266 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15267 }
15268 while (length > 0)
15269 {
15270 length--;
15271 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15272 op >>= 8;
15273 unwind.opcode_count++;
15274 }
15275 }
15276
15277 /* Add unwind opcodes to adjust the stack pointer. */
15278
15279 static void
15280 add_unwind_adjustsp (offsetT offset)
15281 {
15282 valueT op;
15283
15284 if (offset > 0x200)
15285 {
15286 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15287 char bytes[5];
15288 int n;
15289 valueT o;
15290
15291 /* Long form: 0xb2, uleb128. */
15292 /* This might not fit in a word so add the individual bytes,
15293 remembering the list is built in reverse order. */
15294 o = (valueT) ((offset - 0x204) >> 2);
15295 if (o == 0)
15296 add_unwind_opcode (0, 1);
15297
15298 /* Calculate the uleb128 encoding of the offset. */
15299 n = 0;
15300 while (o)
15301 {
15302 bytes[n] = o & 0x7f;
15303 o >>= 7;
15304 if (o)
15305 bytes[n] |= 0x80;
15306 n++;
15307 }
15308 /* Add the insn. */
15309 for (; n; n--)
15310 add_unwind_opcode (bytes[n - 1], 1);
15311 add_unwind_opcode (0xb2, 1);
15312 }
15313 else if (offset > 0x100)
15314 {
15315 /* Two short opcodes. */
15316 add_unwind_opcode (0x3f, 1);
15317 op = (offset - 0x104) >> 2;
15318 add_unwind_opcode (op, 1);
15319 }
15320 else if (offset > 0)
15321 {
15322 /* Short opcode. */
15323 op = (offset - 4) >> 2;
15324 add_unwind_opcode (op, 1);
15325 }
15326 else if (offset < 0)
15327 {
15328 offset = -offset;
15329 while (offset > 0x100)
15330 {
15331 add_unwind_opcode (0x7f, 1);
15332 offset -= 0x100;
15333 }
15334 op = ((offset - 4) >> 2) | 0x40;
15335 add_unwind_opcode (op, 1);
15336 }
15337 }
15338
15339 /* Finish the list of unwind opcodes for this function. */
15340 static void
15341 finish_unwind_opcodes (void)
15342 {
15343 valueT op;
15344
15345 if (unwind.fp_used)
15346 {
15347 /* Adjust sp as necessary. */
15348 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15349 flush_pending_unwind ();
15350
15351 /* After restoring sp from the frame pointer. */
15352 op = 0x90 | unwind.fp_reg;
15353 add_unwind_opcode (op, 1);
15354 }
15355 else
15356 flush_pending_unwind ();
15357 }
15358
15359
15360 /* Start an exception table entry. If idx is nonzero this is an index table
15361 entry. */
15362
15363 static void
15364 start_unwind_section (const segT text_seg, int idx)
15365 {
15366 const char * text_name;
15367 const char * prefix;
15368 const char * prefix_once;
15369 const char * group_name;
15370 size_t prefix_len;
15371 size_t text_len;
15372 char * sec_name;
15373 size_t sec_name_len;
15374 int type;
15375 int flags;
15376 int linkonce;
15377
15378 if (idx)
15379 {
15380 prefix = ELF_STRING_ARM_unwind;
15381 prefix_once = ELF_STRING_ARM_unwind_once;
15382 type = SHT_ARM_EXIDX;
15383 }
15384 else
15385 {
15386 prefix = ELF_STRING_ARM_unwind_info;
15387 prefix_once = ELF_STRING_ARM_unwind_info_once;
15388 type = SHT_PROGBITS;
15389 }
15390
15391 text_name = segment_name (text_seg);
15392 if (streq (text_name, ".text"))
15393 text_name = "";
15394
15395 if (strncmp (text_name, ".gnu.linkonce.t.",
15396 strlen (".gnu.linkonce.t.")) == 0)
15397 {
15398 prefix = prefix_once;
15399 text_name += strlen (".gnu.linkonce.t.");
15400 }
15401
15402 prefix_len = strlen (prefix);
15403 text_len = strlen (text_name);
15404 sec_name_len = prefix_len + text_len;
15405 sec_name = xmalloc (sec_name_len + 1);
15406 memcpy (sec_name, prefix, prefix_len);
15407 memcpy (sec_name + prefix_len, text_name, text_len);
15408 sec_name[prefix_len + text_len] = '\0';
15409
15410 flags = SHF_ALLOC;
15411 linkonce = 0;
15412 group_name = 0;
15413
15414 /* Handle COMDAT group. */
15415 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15416 {
15417 group_name = elf_group_name (text_seg);
15418 if (group_name == NULL)
15419 {
15420 as_bad ("Group section `%s' has no group signature",
15421 segment_name (text_seg));
15422 ignore_rest_of_line ();
15423 return;
15424 }
15425 flags |= SHF_GROUP;
15426 linkonce = 1;
15427 }
15428
15429 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15430
15431 /* Set the setion link for index tables. */
15432 if (idx)
15433 elf_linked_to_section (now_seg) = text_seg;
15434 }
15435
15436
15437 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15438 personality routine data. Returns zero, or the index table value for
15439 and inline entry. */
15440
15441 static valueT
15442 create_unwind_entry (int have_data)
15443 {
15444 int size;
15445 addressT where;
15446 char *ptr;
15447 /* The current word of data. */
15448 valueT data;
15449 /* The number of bytes left in this word. */
15450 int n;
15451
15452 finish_unwind_opcodes ();
15453
15454 /* Remember the current text section. */
15455 unwind.saved_seg = now_seg;
15456 unwind.saved_subseg = now_subseg;
15457
15458 start_unwind_section (now_seg, 0);
15459
15460 if (unwind.personality_routine == NULL)
15461 {
15462 if (unwind.personality_index == -2)
15463 {
15464 if (have_data)
15465 as_bad (_("handerdata in cantunwind frame"));
15466 return 1; /* EXIDX_CANTUNWIND. */
15467 }
15468
15469 /* Use a default personality routine if none is specified. */
15470 if (unwind.personality_index == -1)
15471 {
15472 if (unwind.opcode_count > 3)
15473 unwind.personality_index = 1;
15474 else
15475 unwind.personality_index = 0;
15476 }
15477
15478 /* Space for the personality routine entry. */
15479 if (unwind.personality_index == 0)
15480 {
15481 if (unwind.opcode_count > 3)
15482 as_bad (_("too many unwind opcodes for personality routine 0"));
15483
15484 if (!have_data)
15485 {
15486 /* All the data is inline in the index table. */
15487 data = 0x80;
15488 n = 3;
15489 while (unwind.opcode_count > 0)
15490 {
15491 unwind.opcode_count--;
15492 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15493 n--;
15494 }
15495
15496 /* Pad with "finish" opcodes. */
15497 while (n--)
15498 data = (data << 8) | 0xb0;
15499
15500 return data;
15501 }
15502 size = 0;
15503 }
15504 else
15505 /* We get two opcodes "free" in the first word. */
15506 size = unwind.opcode_count - 2;
15507 }
15508 else
15509 /* An extra byte is required for the opcode count. */
15510 size = unwind.opcode_count + 1;
15511
15512 size = (size + 3) >> 2;
15513 if (size > 0xff)
15514 as_bad (_("too many unwind opcodes"));
15515
15516 frag_align (2, 0, 0);
15517 record_alignment (now_seg, 2);
15518 unwind.table_entry = expr_build_dot ();
15519
15520 /* Allocate the table entry. */
15521 ptr = frag_more ((size << 2) + 4);
15522 where = frag_now_fix () - ((size << 2) + 4);
15523
15524 switch (unwind.personality_index)
15525 {
15526 case -1:
15527 /* ??? Should this be a PLT generating relocation? */
15528 /* Custom personality routine. */
15529 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15530 BFD_RELOC_ARM_PREL31);
15531
15532 where += 4;
15533 ptr += 4;
15534
15535 /* Set the first byte to the number of additional words. */
15536 data = size - 1;
15537 n = 3;
15538 break;
15539
15540 /* ABI defined personality routines. */
15541 case 0:
15542 /* Three opcodes bytes are packed into the first word. */
15543 data = 0x80;
15544 n = 3;
15545 break;
15546
15547 case 1:
15548 case 2:
15549 /* The size and first two opcode bytes go in the first word. */
15550 data = ((0x80 + unwind.personality_index) << 8) | size;
15551 n = 2;
15552 break;
15553
15554 default:
15555 /* Should never happen. */
15556 abort ();
15557 }
15558
15559 /* Pack the opcodes into words (MSB first), reversing the list at the same
15560 time. */
15561 while (unwind.opcode_count > 0)
15562 {
15563 if (n == 0)
15564 {
15565 md_number_to_chars (ptr, data, 4);
15566 ptr += 4;
15567 n = 4;
15568 data = 0;
15569 }
15570 unwind.opcode_count--;
15571 n--;
15572 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15573 }
15574
15575 /* Finish off the last word. */
15576 if (n < 4)
15577 {
15578 /* Pad with "finish" opcodes. */
15579 while (n--)
15580 data = (data << 8) | 0xb0;
15581
15582 md_number_to_chars (ptr, data, 4);
15583 }
15584
15585 if (!have_data)
15586 {
15587 /* Add an empty descriptor if there is no user-specified data. */
15588 ptr = frag_more (4);
15589 md_number_to_chars (ptr, 0, 4);
15590 }
15591
15592 return 0;
15593 }
15594
15595 /* Convert REGNAME to a DWARF-2 register number. */
15596
15597 int
15598 tc_arm_regname_to_dw2regnum (char *regname)
15599 {
15600 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
15601
15602 if (reg == FAIL)
15603 return -1;
15604
15605 return reg;
15606 }
15607
15608 /* Initialize the DWARF-2 unwind information for this procedure. */
15609
15610 void
15611 tc_arm_frame_initial_instructions (void)
15612 {
15613 cfi_add_CFA_def_cfa (REG_SP, 0);
15614 }
15615 #endif /* OBJ_ELF */
15616
15617
15618 /* MD interface: Symbol and relocation handling. */
15619
15620 /* Return the address within the segment that a PC-relative fixup is
15621 relative to. For ARM, PC-relative fixups applied to instructions
15622 are generally relative to the location of the fixup plus 8 bytes.
15623 Thumb branches are offset by 4, and Thumb loads relative to PC
15624 require special handling. */
15625
15626 long
15627 md_pcrel_from_section (fixS * fixP, segT seg)
15628 {
15629 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15630
15631 /* If this is pc-relative and we are going to emit a relocation
15632 then we just want to put out any pipeline compensation that the linker
15633 will need. Otherwise we want to use the calculated base.
15634 For WinCE we skip the bias for externals as well, since this
15635 is how the MS ARM-CE assembler behaves and we want to be compatible. */
15636 if (fixP->fx_pcrel
15637 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15638 || (arm_force_relocation (fixP)
15639 #ifdef TE_WINCE
15640 && !S_IS_EXTERNAL (fixP->fx_addsy)
15641 #endif
15642 )))
15643 base = 0;
15644
15645 switch (fixP->fx_r_type)
15646 {
15647 /* PC relative addressing on the Thumb is slightly odd as the
15648 bottom two bits of the PC are forced to zero for the
15649 calculation. This happens *after* application of the
15650 pipeline offset. However, Thumb adrl already adjusts for
15651 this, so we need not do it again. */
15652 case BFD_RELOC_ARM_THUMB_ADD:
15653 return base & ~3;
15654
15655 case BFD_RELOC_ARM_THUMB_OFFSET:
15656 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15657 case BFD_RELOC_ARM_T32_ADD_PC12:
15658 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15659 return (base + 4) & ~3;
15660
15661 /* Thumb branches are simply offset by +4. */
15662 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15663 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15664 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15665 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15666 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15667 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15668 case BFD_RELOC_THUMB_PCREL_BLX:
15669 return base + 4;
15670
15671 /* ARM mode branches are offset by +8. However, the Windows CE
15672 loader expects the relocation not to take this into account. */
15673 case BFD_RELOC_ARM_PCREL_BRANCH:
15674 case BFD_RELOC_ARM_PCREL_CALL:
15675 case BFD_RELOC_ARM_PCREL_JUMP:
15676 case BFD_RELOC_ARM_PCREL_BLX:
15677 case BFD_RELOC_ARM_PLT32:
15678 #ifdef TE_WINCE
15679 /* When handling fixups immediately, because we have already
15680 discovered the value of a symbol, or the address of the frag involved
15681 we must account for the offset by +8, as the OS loader will never see the reloc.
15682 see fixup_segment() in write.c
15683 The S_IS_EXTERNAL test handles the case of global symbols.
15684 Those need the calculated base, not just the pipe compensation the linker will need. */
15685 if (fixP->fx_pcrel
15686 && fixP->fx_addsy != NULL
15687 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
15688 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
15689 return base + 8;
15690 return base;
15691 #else
15692 return base + 8;
15693 #endif
15694
15695 /* ARM mode loads relative to PC are also offset by +8. Unlike
15696 branches, the Windows CE loader *does* expect the relocation
15697 to take this into account. */
15698 case BFD_RELOC_ARM_OFFSET_IMM:
15699 case BFD_RELOC_ARM_OFFSET_IMM8:
15700 case BFD_RELOC_ARM_HWLITERAL:
15701 case BFD_RELOC_ARM_LITERAL:
15702 case BFD_RELOC_ARM_CP_OFF_IMM:
15703 return base + 8;
15704
15705
15706 /* Other PC-relative relocations are un-offset. */
15707 default:
15708 return base;
15709 }
15710 }
15711
15712 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15713 Otherwise we have no need to default values of symbols. */
15714
15715 symbolS *
15716 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15717 {
15718 #ifdef OBJ_ELF
15719 if (name[0] == '_' && name[1] == 'G'
15720 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15721 {
15722 if (!GOT_symbol)
15723 {
15724 if (symbol_find (name))
15725 as_bad ("GOT already in the symbol table");
15726
15727 GOT_symbol = symbol_new (name, undefined_section,
15728 (valueT) 0, & zero_address_frag);
15729 }
15730
15731 return GOT_symbol;
15732 }
15733 #endif
15734
15735 return 0;
15736 }
15737
15738 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15739 computed as two separate immediate values, added together. We
15740 already know that this value cannot be computed by just one ARM
15741 instruction. */
15742
15743 static unsigned int
15744 validate_immediate_twopart (unsigned int val,
15745 unsigned int * highpart)
15746 {
15747 unsigned int a;
15748 unsigned int i;
15749
15750 for (i = 0; i < 32; i += 2)
15751 if (((a = rotate_left (val, i)) & 0xff) != 0)
15752 {
15753 if (a & 0xff00)
15754 {
15755 if (a & ~ 0xffff)
15756 continue;
15757 * highpart = (a >> 8) | ((i + 24) << 7);
15758 }
15759 else if (a & 0xff0000)
15760 {
15761 if (a & 0xff000000)
15762 continue;
15763 * highpart = (a >> 16) | ((i + 16) << 7);
15764 }
15765 else
15766 {
15767 assert (a & 0xff000000);
15768 * highpart = (a >> 24) | ((i + 8) << 7);
15769 }
15770
15771 return (a & 0xff) | (i << 7);
15772 }
15773
15774 return FAIL;
15775 }
15776
15777 static int
15778 validate_offset_imm (unsigned int val, int hwse)
15779 {
15780 if ((hwse && val > 255) || val > 4095)
15781 return FAIL;
15782 return val;
15783 }
15784
15785 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15786 negative immediate constant by altering the instruction. A bit of
15787 a hack really.
15788 MOV <-> MVN
15789 AND <-> BIC
15790 ADC <-> SBC
15791 by inverting the second operand, and
15792 ADD <-> SUB
15793 CMP <-> CMN
15794 by negating the second operand. */
15795
15796 static int
15797 negate_data_op (unsigned long * instruction,
15798 unsigned long value)
15799 {
15800 int op, new_inst;
15801 unsigned long negated, inverted;
15802
15803 negated = encode_arm_immediate (-value);
15804 inverted = encode_arm_immediate (~value);
15805
15806 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15807 switch (op)
15808 {
15809 /* First negates. */
15810 case OPCODE_SUB: /* ADD <-> SUB */
15811 new_inst = OPCODE_ADD;
15812 value = negated;
15813 break;
15814
15815 case OPCODE_ADD:
15816 new_inst = OPCODE_SUB;
15817 value = negated;
15818 break;
15819
15820 case OPCODE_CMP: /* CMP <-> CMN */
15821 new_inst = OPCODE_CMN;
15822 value = negated;
15823 break;
15824
15825 case OPCODE_CMN:
15826 new_inst = OPCODE_CMP;
15827 value = negated;
15828 break;
15829
15830 /* Now Inverted ops. */
15831 case OPCODE_MOV: /* MOV <-> MVN */
15832 new_inst = OPCODE_MVN;
15833 value = inverted;
15834 break;
15835
15836 case OPCODE_MVN:
15837 new_inst = OPCODE_MOV;
15838 value = inverted;
15839 break;
15840
15841 case OPCODE_AND: /* AND <-> BIC */
15842 new_inst = OPCODE_BIC;
15843 value = inverted;
15844 break;
15845
15846 case OPCODE_BIC:
15847 new_inst = OPCODE_AND;
15848 value = inverted;
15849 break;
15850
15851 case OPCODE_ADC: /* ADC <-> SBC */
15852 new_inst = OPCODE_SBC;
15853 value = inverted;
15854 break;
15855
15856 case OPCODE_SBC:
15857 new_inst = OPCODE_ADC;
15858 value = inverted;
15859 break;
15860
15861 /* We cannot do anything. */
15862 default:
15863 return FAIL;
15864 }
15865
15866 if (value == (unsigned) FAIL)
15867 return FAIL;
15868
15869 *instruction &= OPCODE_MASK;
15870 *instruction |= new_inst << DATA_OP_SHIFT;
15871 return value;
15872 }
15873
15874 /* Like negate_data_op, but for Thumb-2. */
15875
15876 static unsigned int
15877 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15878 {
15879 int op, new_inst;
15880 int rd;
15881 offsetT negated, inverted;
15882
15883 negated = encode_thumb32_immediate (-value);
15884 inverted = encode_thumb32_immediate (~value);
15885
15886 rd = (*instruction >> 8) & 0xf;
15887 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15888 switch (op)
15889 {
15890 /* ADD <-> SUB. Includes CMP <-> CMN. */
15891 case T2_OPCODE_SUB:
15892 new_inst = T2_OPCODE_ADD;
15893 value = negated;
15894 break;
15895
15896 case T2_OPCODE_ADD:
15897 new_inst = T2_OPCODE_SUB;
15898 value = negated;
15899 break;
15900
15901 /* ORR <-> ORN. Includes MOV <-> MVN. */
15902 case T2_OPCODE_ORR:
15903 new_inst = T2_OPCODE_ORN;
15904 value = inverted;
15905 break;
15906
15907 case T2_OPCODE_ORN:
15908 new_inst = T2_OPCODE_ORR;
15909 value = inverted;
15910 break;
15911
15912 /* AND <-> BIC. TST has no inverted equivalent. */
15913 case T2_OPCODE_AND:
15914 new_inst = T2_OPCODE_BIC;
15915 if (rd == 15)
15916 value = FAIL;
15917 else
15918 value = inverted;
15919 break;
15920
15921 case T2_OPCODE_BIC:
15922 new_inst = T2_OPCODE_AND;
15923 value = inverted;
15924 break;
15925
15926 /* ADC <-> SBC */
15927 case T2_OPCODE_ADC:
15928 new_inst = T2_OPCODE_SBC;
15929 value = inverted;
15930 break;
15931
15932 case T2_OPCODE_SBC:
15933 new_inst = T2_OPCODE_ADC;
15934 value = inverted;
15935 break;
15936
15937 /* We cannot do anything. */
15938 default:
15939 return FAIL;
15940 }
15941
15942 if (value == FAIL)
15943 return FAIL;
15944
15945 *instruction &= T2_OPCODE_MASK;
15946 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15947 return value;
15948 }
15949
15950 /* Read a 32-bit thumb instruction from buf. */
15951 static unsigned long
15952 get_thumb32_insn (char * buf)
15953 {
15954 unsigned long insn;
15955 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15956 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15957
15958 return insn;
15959 }
15960
15961
15962 /* We usually want to set the low bit on the address of thumb function
15963 symbols. In particular .word foo - . should have the low bit set.
15964 Generic code tries to fold the difference of two symbols to
15965 a constant. Prevent this and force a relocation when the first symbols
15966 is a thumb function. */
15967 int
15968 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15969 {
15970 if (op == O_subtract
15971 && l->X_op == O_symbol
15972 && r->X_op == O_symbol
15973 && THUMB_IS_FUNC (l->X_add_symbol))
15974 {
15975 l->X_op = O_subtract;
15976 l->X_op_symbol = r->X_add_symbol;
15977 l->X_add_number -= r->X_add_number;
15978 return 1;
15979 }
15980 /* Process as normal. */
15981 return 0;
15982 }
15983
15984 void
15985 md_apply_fix (fixS * fixP,
15986 valueT * valP,
15987 segT seg)
15988 {
15989 offsetT value = * valP;
15990 offsetT newval;
15991 unsigned int newimm;
15992 unsigned long temp;
15993 int sign;
15994 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15995
15996 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15997
15998 /* Note whether this will delete the relocation. */
15999 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
16000 fixP->fx_done = 1;
16001
16002 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16003 consistency with the behavior on 32-bit hosts. Remember value
16004 for emit_reloc. */
16005 value &= 0xffffffff;
16006 value ^= 0x80000000;
16007 value -= 0x80000000;
16008
16009 *valP = value;
16010 fixP->fx_addnumber = value;
16011
16012 /* Same treatment for fixP->fx_offset. */
16013 fixP->fx_offset &= 0xffffffff;
16014 fixP->fx_offset ^= 0x80000000;
16015 fixP->fx_offset -= 0x80000000;
16016
16017 switch (fixP->fx_r_type)
16018 {
16019 case BFD_RELOC_NONE:
16020 /* This will need to go in the object file. */
16021 fixP->fx_done = 0;
16022 break;
16023
16024 case BFD_RELOC_ARM_IMMEDIATE:
16025 /* We claim that this fixup has been processed here,
16026 even if in fact we generate an error because we do
16027 not have a reloc for it, so tc_gen_reloc will reject it. */
16028 fixP->fx_done = 1;
16029
16030 if (fixP->fx_addsy
16031 && ! S_IS_DEFINED (fixP->fx_addsy))
16032 {
16033 as_bad_where (fixP->fx_file, fixP->fx_line,
16034 _("undefined symbol %s used as an immediate value"),
16035 S_GET_NAME (fixP->fx_addsy));
16036 break;
16037 }
16038
16039 newimm = encode_arm_immediate (value);
16040 temp = md_chars_to_number (buf, INSN_SIZE);
16041
16042 /* If the instruction will fail, see if we can fix things up by
16043 changing the opcode. */
16044 if (newimm == (unsigned int) FAIL
16045 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
16046 {
16047 as_bad_where (fixP->fx_file, fixP->fx_line,
16048 _("invalid constant (%lx) after fixup"),
16049 (unsigned long) value);
16050 break;
16051 }
16052
16053 newimm |= (temp & 0xfffff000);
16054 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16055 break;
16056
16057 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16058 {
16059 unsigned int highpart = 0;
16060 unsigned int newinsn = 0xe1a00000; /* nop. */
16061
16062 newimm = encode_arm_immediate (value);
16063 temp = md_chars_to_number (buf, INSN_SIZE);
16064
16065 /* If the instruction will fail, see if we can fix things up by
16066 changing the opcode. */
16067 if (newimm == (unsigned int) FAIL
16068 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
16069 {
16070 /* No ? OK - try using two ADD instructions to generate
16071 the value. */
16072 newimm = validate_immediate_twopart (value, & highpart);
16073
16074 /* Yes - then make sure that the second instruction is
16075 also an add. */
16076 if (newimm != (unsigned int) FAIL)
16077 newinsn = temp;
16078 /* Still No ? Try using a negated value. */
16079 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
16080 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
16081 /* Otherwise - give up. */
16082 else
16083 {
16084 as_bad_where (fixP->fx_file, fixP->fx_line,
16085 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
16086 (long) value);
16087 break;
16088 }
16089
16090 /* Replace the first operand in the 2nd instruction (which
16091 is the PC) with the destination register. We have
16092 already added in the PC in the first instruction and we
16093 do not want to do it again. */
16094 newinsn &= ~ 0xf0000;
16095 newinsn |= ((newinsn & 0x0f000) << 4);
16096 }
16097
16098 newimm |= (temp & 0xfffff000);
16099 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16100
16101 highpart |= (newinsn & 0xfffff000);
16102 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
16103 }
16104 break;
16105
16106 case BFD_RELOC_ARM_OFFSET_IMM:
16107 if (!fixP->fx_done && seg->use_rela_p)
16108 value = 0;
16109
16110 case BFD_RELOC_ARM_LITERAL:
16111 sign = value >= 0;
16112
16113 if (value < 0)
16114 value = - value;
16115
16116 if (validate_offset_imm (value, 0) == FAIL)
16117 {
16118 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
16119 as_bad_where (fixP->fx_file, fixP->fx_line,
16120 _("invalid literal constant: pool needs to be closer"));
16121 else
16122 as_bad_where (fixP->fx_file, fixP->fx_line,
16123 _("bad immediate value for offset (%ld)"),
16124 (long) value);
16125 break;
16126 }
16127
16128 newval = md_chars_to_number (buf, INSN_SIZE);
16129 newval &= 0xff7ff000;
16130 newval |= value | (sign ? INDEX_UP : 0);
16131 md_number_to_chars (buf, newval, INSN_SIZE);
16132 break;
16133
16134 case BFD_RELOC_ARM_OFFSET_IMM8:
16135 case BFD_RELOC_ARM_HWLITERAL:
16136 sign = value >= 0;
16137
16138 if (value < 0)
16139 value = - value;
16140
16141 if (validate_offset_imm (value, 1) == FAIL)
16142 {
16143 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
16144 as_bad_where (fixP->fx_file, fixP->fx_line,
16145 _("invalid literal constant: pool needs to be closer"));
16146 else
16147 as_bad (_("bad immediate value for half-word offset (%ld)"),
16148 (long) value);
16149 break;
16150 }
16151
16152 newval = md_chars_to_number (buf, INSN_SIZE);
16153 newval &= 0xff7ff0f0;
16154 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16155 md_number_to_chars (buf, newval, INSN_SIZE);
16156 break;
16157
16158 case BFD_RELOC_ARM_T32_OFFSET_U8:
16159 if (value < 0 || value > 1020 || value % 4 != 0)
16160 as_bad_where (fixP->fx_file, fixP->fx_line,
16161 _("bad immediate value for offset (%ld)"), (long) value);
16162 value /= 4;
16163
16164 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16165 newval |= value;
16166 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16167 break;
16168
16169 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16170 /* This is a complicated relocation used for all varieties of Thumb32
16171 load/store instruction with immediate offset:
16172
16173 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16174 *4, optional writeback(W)
16175 (doubleword load/store)
16176
16177 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16178 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16179 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16180 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16181 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16182
16183 Uppercase letters indicate bits that are already encoded at
16184 this point. Lowercase letters are our problem. For the
16185 second block of instructions, the secondary opcode nybble
16186 (bits 8..11) is present, and bit 23 is zero, even if this is
16187 a PC-relative operation. */
16188 newval = md_chars_to_number (buf, THUMB_SIZE);
16189 newval <<= 16;
16190 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16191
16192 if ((newval & 0xf0000000) == 0xe0000000)
16193 {
16194 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16195 if (value >= 0)
16196 newval |= (1 << 23);
16197 else
16198 value = -value;
16199 if (value % 4 != 0)
16200 {
16201 as_bad_where (fixP->fx_file, fixP->fx_line,
16202 _("offset not a multiple of 4"));
16203 break;
16204 }
16205 value /= 4;
16206 if (value > 0xff)
16207 {
16208 as_bad_where (fixP->fx_file, fixP->fx_line,
16209 _("offset out of range"));
16210 break;
16211 }
16212 newval &= ~0xff;
16213 }
16214 else if ((newval & 0x000f0000) == 0x000f0000)
16215 {
16216 /* PC-relative, 12-bit offset. */
16217 if (value >= 0)
16218 newval |= (1 << 23);
16219 else
16220 value = -value;
16221 if (value > 0xfff)
16222 {
16223 as_bad_where (fixP->fx_file, fixP->fx_line,
16224 _("offset out of range"));
16225 break;
16226 }
16227 newval &= ~0xfff;
16228 }
16229 else if ((newval & 0x00000100) == 0x00000100)
16230 {
16231 /* Writeback: 8-bit, +/- offset. */
16232 if (value >= 0)
16233 newval |= (1 << 9);
16234 else
16235 value = -value;
16236 if (value > 0xff)
16237 {
16238 as_bad_where (fixP->fx_file, fixP->fx_line,
16239 _("offset out of range"));
16240 break;
16241 }
16242 newval &= ~0xff;
16243 }
16244 else if ((newval & 0x00000f00) == 0x00000e00)
16245 {
16246 /* T-instruction: positive 8-bit offset. */
16247 if (value < 0 || value > 0xff)
16248 {
16249 as_bad_where (fixP->fx_file, fixP->fx_line,
16250 _("offset out of range"));
16251 break;
16252 }
16253 newval &= ~0xff;
16254 newval |= value;
16255 }
16256 else
16257 {
16258 /* Positive 12-bit or negative 8-bit offset. */
16259 int limit;
16260 if (value >= 0)
16261 {
16262 newval |= (1 << 23);
16263 limit = 0xfff;
16264 }
16265 else
16266 {
16267 value = -value;
16268 limit = 0xff;
16269 }
16270 if (value > limit)
16271 {
16272 as_bad_where (fixP->fx_file, fixP->fx_line,
16273 _("offset out of range"));
16274 break;
16275 }
16276 newval &= ~limit;
16277 }
16278
16279 newval |= value;
16280 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16281 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16282 break;
16283
16284 case BFD_RELOC_ARM_SHIFT_IMM:
16285 newval = md_chars_to_number (buf, INSN_SIZE);
16286 if (((unsigned long) value) > 32
16287 || (value == 32
16288 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16289 {
16290 as_bad_where (fixP->fx_file, fixP->fx_line,
16291 _("shift expression is too large"));
16292 break;
16293 }
16294
16295 if (value == 0)
16296 /* Shifts of zero must be done as lsl. */
16297 newval &= ~0x60;
16298 else if (value == 32)
16299 value = 0;
16300 newval &= 0xfffff07f;
16301 newval |= (value & 0x1f) << 7;
16302 md_number_to_chars (buf, newval, INSN_SIZE);
16303 break;
16304
16305 case BFD_RELOC_ARM_T32_IMMEDIATE:
16306 case BFD_RELOC_ARM_T32_IMM12:
16307 case BFD_RELOC_ARM_T32_ADD_PC12:
16308 /* We claim that this fixup has been processed here,
16309 even if in fact we generate an error because we do
16310 not have a reloc for it, so tc_gen_reloc will reject it. */
16311 fixP->fx_done = 1;
16312
16313 if (fixP->fx_addsy
16314 && ! S_IS_DEFINED (fixP->fx_addsy))
16315 {
16316 as_bad_where (fixP->fx_file, fixP->fx_line,
16317 _("undefined symbol %s used as an immediate value"),
16318 S_GET_NAME (fixP->fx_addsy));
16319 break;
16320 }
16321
16322 newval = md_chars_to_number (buf, THUMB_SIZE);
16323 newval <<= 16;
16324 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16325
16326 /* FUTURE: Implement analogue of negate_data_op for T32. */
16327 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16328 {
16329 newimm = encode_thumb32_immediate (value);
16330 if (newimm == (unsigned int) FAIL)
16331 newimm = thumb32_negate_data_op (&newval, value);
16332 }
16333 else
16334 {
16335 /* 12 bit immediate for addw/subw. */
16336 if (value < 0)
16337 {
16338 value = -value;
16339 newval ^= 0x00a00000;
16340 }
16341 if (value > 0xfff)
16342 newimm = (unsigned int) FAIL;
16343 else
16344 newimm = value;
16345 }
16346
16347 if (newimm == (unsigned int)FAIL)
16348 {
16349 as_bad_where (fixP->fx_file, fixP->fx_line,
16350 _("invalid constant (%lx) after fixup"),
16351 (unsigned long) value);
16352 break;
16353 }
16354
16355 newval |= (newimm & 0x800) << 15;
16356 newval |= (newimm & 0x700) << 4;
16357 newval |= (newimm & 0x0ff);
16358
16359 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16360 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16361 break;
16362
16363 case BFD_RELOC_ARM_SMC:
16364 if (((unsigned long) value) > 0xffff)
16365 as_bad_where (fixP->fx_file, fixP->fx_line,
16366 _("invalid smc expression"));
16367 newval = md_chars_to_number (buf, INSN_SIZE);
16368 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16369 md_number_to_chars (buf, newval, INSN_SIZE);
16370 break;
16371
16372 case BFD_RELOC_ARM_SWI:
16373 if (fixP->tc_fix_data != 0)
16374 {
16375 if (((unsigned long) value) > 0xff)
16376 as_bad_where (fixP->fx_file, fixP->fx_line,
16377 _("invalid swi expression"));
16378 newval = md_chars_to_number (buf, THUMB_SIZE);
16379 newval |= value;
16380 md_number_to_chars (buf, newval, THUMB_SIZE);
16381 }
16382 else
16383 {
16384 if (((unsigned long) value) > 0x00ffffff)
16385 as_bad_where (fixP->fx_file, fixP->fx_line,
16386 _("invalid swi expression"));
16387 newval = md_chars_to_number (buf, INSN_SIZE);
16388 newval |= value;
16389 md_number_to_chars (buf, newval, INSN_SIZE);
16390 }
16391 break;
16392
16393 case BFD_RELOC_ARM_MULTI:
16394 if (((unsigned long) value) > 0xffff)
16395 as_bad_where (fixP->fx_file, fixP->fx_line,
16396 _("invalid expression in load/store multiple"));
16397 newval = value | md_chars_to_number (buf, INSN_SIZE);
16398 md_number_to_chars (buf, newval, INSN_SIZE);
16399 break;
16400
16401 #ifdef OBJ_ELF
16402 case BFD_RELOC_ARM_PCREL_CALL:
16403 newval = md_chars_to_number (buf, INSN_SIZE);
16404 if ((newval & 0xf0000000) == 0xf0000000)
16405 temp = 1;
16406 else
16407 temp = 3;
16408 goto arm_branch_common;
16409
16410 case BFD_RELOC_ARM_PCREL_JUMP:
16411 case BFD_RELOC_ARM_PLT32:
16412 #endif
16413 case BFD_RELOC_ARM_PCREL_BRANCH:
16414 temp = 3;
16415 goto arm_branch_common;
16416
16417 case BFD_RELOC_ARM_PCREL_BLX:
16418 temp = 1;
16419 arm_branch_common:
16420 /* We are going to store value (shifted right by two) in the
16421 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16422 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16423 also be be clear. */
16424 if (value & temp)
16425 as_bad_where (fixP->fx_file, fixP->fx_line,
16426 _("misaligned branch destination"));
16427 if ((value & (offsetT)0xfe000000) != (offsetT)0
16428 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16429 as_bad_where (fixP->fx_file, fixP->fx_line,
16430 _("branch out of range"));
16431
16432 if (fixP->fx_done || !seg->use_rela_p)
16433 {
16434 newval = md_chars_to_number (buf, INSN_SIZE);
16435 newval |= (value >> 2) & 0x00ffffff;
16436 /* Set the H bit on BLX instructions. */
16437 if (temp == 1)
16438 {
16439 if (value & 2)
16440 newval |= 0x01000000;
16441 else
16442 newval &= ~0x01000000;
16443 }
16444 md_number_to_chars (buf, newval, INSN_SIZE);
16445 }
16446 break;
16447
16448 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16449 /* CZB can only branch forward. */
16450 if (value & ~0x7e)
16451 as_bad_where (fixP->fx_file, fixP->fx_line,
16452 _("branch out of range"));
16453
16454 if (fixP->fx_done || !seg->use_rela_p)
16455 {
16456 newval = md_chars_to_number (buf, THUMB_SIZE);
16457 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16458 md_number_to_chars (buf, newval, THUMB_SIZE);
16459 }
16460 break;
16461
16462 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16463 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16464 as_bad_where (fixP->fx_file, fixP->fx_line,
16465 _("branch out of range"));
16466
16467 if (fixP->fx_done || !seg->use_rela_p)
16468 {
16469 newval = md_chars_to_number (buf, THUMB_SIZE);
16470 newval |= (value & 0x1ff) >> 1;
16471 md_number_to_chars (buf, newval, THUMB_SIZE);
16472 }
16473 break;
16474
16475 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16476 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16477 as_bad_where (fixP->fx_file, fixP->fx_line,
16478 _("branch out of range"));
16479
16480 if (fixP->fx_done || !seg->use_rela_p)
16481 {
16482 newval = md_chars_to_number (buf, THUMB_SIZE);
16483 newval |= (value & 0xfff) >> 1;
16484 md_number_to_chars (buf, newval, THUMB_SIZE);
16485 }
16486 break;
16487
16488 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16489 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16490 as_bad_where (fixP->fx_file, fixP->fx_line,
16491 _("conditional branch out of range"));
16492
16493 if (fixP->fx_done || !seg->use_rela_p)
16494 {
16495 offsetT newval2;
16496 addressT S, J1, J2, lo, hi;
16497
16498 S = (value & 0x00100000) >> 20;
16499 J2 = (value & 0x00080000) >> 19;
16500 J1 = (value & 0x00040000) >> 18;
16501 hi = (value & 0x0003f000) >> 12;
16502 lo = (value & 0x00000ffe) >> 1;
16503
16504 newval = md_chars_to_number (buf, THUMB_SIZE);
16505 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16506 newval |= (S << 10) | hi;
16507 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16508 md_number_to_chars (buf, newval, THUMB_SIZE);
16509 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16510 }
16511 break;
16512
16513 case BFD_RELOC_THUMB_PCREL_BLX:
16514 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16515 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16516 as_bad_where (fixP->fx_file, fixP->fx_line,
16517 _("branch out of range"));
16518
16519 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16520 /* For a BLX instruction, make sure that the relocation is rounded up
16521 to a word boundary. This follows the semantics of the instruction
16522 which specifies that bit 1 of the target address will come from bit
16523 1 of the base address. */
16524 value = (value + 1) & ~ 1;
16525
16526 if (fixP->fx_done || !seg->use_rela_p)
16527 {
16528 offsetT newval2;
16529
16530 newval = md_chars_to_number (buf, THUMB_SIZE);
16531 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16532 newval |= (value & 0x7fffff) >> 12;
16533 newval2 |= (value & 0xfff) >> 1;
16534 md_number_to_chars (buf, newval, THUMB_SIZE);
16535 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16536 }
16537 break;
16538
16539 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16540 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16541 as_bad_where (fixP->fx_file, fixP->fx_line,
16542 _("branch out of range"));
16543
16544 if (fixP->fx_done || !seg->use_rela_p)
16545 {
16546 offsetT newval2;
16547 addressT S, I1, I2, lo, hi;
16548
16549 S = (value & 0x01000000) >> 24;
16550 I1 = (value & 0x00800000) >> 23;
16551 I2 = (value & 0x00400000) >> 22;
16552 hi = (value & 0x003ff000) >> 12;
16553 lo = (value & 0x00000ffe) >> 1;
16554
16555 I1 = !(I1 ^ S);
16556 I2 = !(I2 ^ S);
16557
16558 newval = md_chars_to_number (buf, THUMB_SIZE);
16559 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16560 newval |= (S << 10) | hi;
16561 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16562 md_number_to_chars (buf, newval, THUMB_SIZE);
16563 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16564 }
16565 break;
16566
16567 case BFD_RELOC_8:
16568 if (fixP->fx_done || !seg->use_rela_p)
16569 md_number_to_chars (buf, value, 1);
16570 break;
16571
16572 case BFD_RELOC_16:
16573 if (fixP->fx_done || !seg->use_rela_p)
16574 md_number_to_chars (buf, value, 2);
16575 break;
16576
16577 #ifdef OBJ_ELF
16578 case BFD_RELOC_ARM_TLS_GD32:
16579 case BFD_RELOC_ARM_TLS_LE32:
16580 case BFD_RELOC_ARM_TLS_IE32:
16581 case BFD_RELOC_ARM_TLS_LDM32:
16582 case BFD_RELOC_ARM_TLS_LDO32:
16583 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16584 /* fall through */
16585
16586 case BFD_RELOC_ARM_GOT32:
16587 case BFD_RELOC_ARM_GOTOFF:
16588 case BFD_RELOC_ARM_TARGET2:
16589 if (fixP->fx_done || !seg->use_rela_p)
16590 md_number_to_chars (buf, 0, 4);
16591 break;
16592 #endif
16593
16594 case BFD_RELOC_RVA:
16595 case BFD_RELOC_32:
16596 case BFD_RELOC_ARM_TARGET1:
16597 case BFD_RELOC_ARM_ROSEGREL32:
16598 case BFD_RELOC_ARM_SBREL32:
16599 case BFD_RELOC_32_PCREL:
16600 if (fixP->fx_done || !seg->use_rela_p)
16601 #ifdef TE_WINCE
16602 /* For WinCE we only do this for pcrel fixups. */
16603 if (fixP->fx_done || fixP->fx_pcrel)
16604 #endif
16605 md_number_to_chars (buf, value, 4);
16606 break;
16607
16608 #ifdef OBJ_ELF
16609 case BFD_RELOC_ARM_PREL31:
16610 if (fixP->fx_done || !seg->use_rela_p)
16611 {
16612 newval = md_chars_to_number (buf, 4) & 0x80000000;
16613 if ((value ^ (value >> 1)) & 0x40000000)
16614 {
16615 as_bad_where (fixP->fx_file, fixP->fx_line,
16616 _("rel31 relocation overflow"));
16617 }
16618 newval |= value & 0x7fffffff;
16619 md_number_to_chars (buf, newval, 4);
16620 }
16621 break;
16622 #endif
16623
16624 case BFD_RELOC_ARM_CP_OFF_IMM:
16625 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16626 if (value < -1023 || value > 1023 || (value & 3))
16627 as_bad_where (fixP->fx_file, fixP->fx_line,
16628 _("co-processor offset out of range"));
16629 cp_off_common:
16630 sign = value >= 0;
16631 if (value < 0)
16632 value = -value;
16633 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16634 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16635 newval = md_chars_to_number (buf, INSN_SIZE);
16636 else
16637 newval = get_thumb32_insn (buf);
16638 newval &= 0xff7fff00;
16639 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16640 if (value == 0)
16641 newval &= ~WRITE_BACK;
16642 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16643 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16644 md_number_to_chars (buf, newval, INSN_SIZE);
16645 else
16646 put_thumb32_insn (buf, newval);
16647 break;
16648
16649 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16650 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16651 if (value < -255 || value > 255)
16652 as_bad_where (fixP->fx_file, fixP->fx_line,
16653 _("co-processor offset out of range"));
16654 value *= 4;
16655 goto cp_off_common;
16656
16657 case BFD_RELOC_ARM_THUMB_OFFSET:
16658 newval = md_chars_to_number (buf, THUMB_SIZE);
16659 /* Exactly what ranges, and where the offset is inserted depends
16660 on the type of instruction, we can establish this from the
16661 top 4 bits. */
16662 switch (newval >> 12)
16663 {
16664 case 4: /* PC load. */
16665 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16666 forced to zero for these loads; md_pcrel_from has already
16667 compensated for this. */
16668 if (value & 3)
16669 as_bad_where (fixP->fx_file, fixP->fx_line,
16670 _("invalid offset, target not word aligned (0x%08lX)"),
16671 (((unsigned long) fixP->fx_frag->fr_address
16672 + (unsigned long) fixP->fx_where) & ~3)
16673 + (unsigned long) value);
16674
16675 if (value & ~0x3fc)
16676 as_bad_where (fixP->fx_file, fixP->fx_line,
16677 _("invalid offset, value too big (0x%08lX)"),
16678 (long) value);
16679
16680 newval |= value >> 2;
16681 break;
16682
16683 case 9: /* SP load/store. */
16684 if (value & ~0x3fc)
16685 as_bad_where (fixP->fx_file, fixP->fx_line,
16686 _("invalid offset, value too big (0x%08lX)"),
16687 (long) value);
16688 newval |= value >> 2;
16689 break;
16690
16691 case 6: /* Word load/store. */
16692 if (value & ~0x7c)
16693 as_bad_where (fixP->fx_file, fixP->fx_line,
16694 _("invalid offset, value too big (0x%08lX)"),
16695 (long) value);
16696 newval |= value << 4; /* 6 - 2. */
16697 break;
16698
16699 case 7: /* Byte load/store. */
16700 if (value & ~0x1f)
16701 as_bad_where (fixP->fx_file, fixP->fx_line,
16702 _("invalid offset, value too big (0x%08lX)"),
16703 (long) value);
16704 newval |= value << 6;
16705 break;
16706
16707 case 8: /* Halfword load/store. */
16708 if (value & ~0x3e)
16709 as_bad_where (fixP->fx_file, fixP->fx_line,
16710 _("invalid offset, value too big (0x%08lX)"),
16711 (long) value);
16712 newval |= value << 5; /* 6 - 1. */
16713 break;
16714
16715 default:
16716 as_bad_where (fixP->fx_file, fixP->fx_line,
16717 "Unable to process relocation for thumb opcode: %lx",
16718 (unsigned long) newval);
16719 break;
16720 }
16721 md_number_to_chars (buf, newval, THUMB_SIZE);
16722 break;
16723
16724 case BFD_RELOC_ARM_THUMB_ADD:
16725 /* This is a complicated relocation, since we use it for all of
16726 the following immediate relocations:
16727
16728 3bit ADD/SUB
16729 8bit ADD/SUB
16730 9bit ADD/SUB SP word-aligned
16731 10bit ADD PC/SP word-aligned
16732
16733 The type of instruction being processed is encoded in the
16734 instruction field:
16735
16736 0x8000 SUB
16737 0x00F0 Rd
16738 0x000F Rs
16739 */
16740 newval = md_chars_to_number (buf, THUMB_SIZE);
16741 {
16742 int rd = (newval >> 4) & 0xf;
16743 int rs = newval & 0xf;
16744 int subtract = !!(newval & 0x8000);
16745
16746 /* Check for HI regs, only very restricted cases allowed:
16747 Adjusting SP, and using PC or SP to get an address. */
16748 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16749 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16750 as_bad_where (fixP->fx_file, fixP->fx_line,
16751 _("invalid Hi register with immediate"));
16752
16753 /* If value is negative, choose the opposite instruction. */
16754 if (value < 0)
16755 {
16756 value = -value;
16757 subtract = !subtract;
16758 if (value < 0)
16759 as_bad_where (fixP->fx_file, fixP->fx_line,
16760 _("immediate value out of range"));
16761 }
16762
16763 if (rd == REG_SP)
16764 {
16765 if (value & ~0x1fc)
16766 as_bad_where (fixP->fx_file, fixP->fx_line,
16767 _("invalid immediate for stack address calculation"));
16768 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16769 newval |= value >> 2;
16770 }
16771 else if (rs == REG_PC || rs == REG_SP)
16772 {
16773 if (subtract || value & ~0x3fc)
16774 as_bad_where (fixP->fx_file, fixP->fx_line,
16775 _("invalid immediate for address calculation (value = 0x%08lX)"),
16776 (unsigned long) value);
16777 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16778 newval |= rd << 8;
16779 newval |= value >> 2;
16780 }
16781 else if (rs == rd)
16782 {
16783 if (value & ~0xff)
16784 as_bad_where (fixP->fx_file, fixP->fx_line,
16785 _("immediate value out of range"));
16786 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16787 newval |= (rd << 8) | value;
16788 }
16789 else
16790 {
16791 if (value & ~0x7)
16792 as_bad_where (fixP->fx_file, fixP->fx_line,
16793 _("immediate value out of range"));
16794 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16795 newval |= rd | (rs << 3) | (value << 6);
16796 }
16797 }
16798 md_number_to_chars (buf, newval, THUMB_SIZE);
16799 break;
16800
16801 case BFD_RELOC_ARM_THUMB_IMM:
16802 newval = md_chars_to_number (buf, THUMB_SIZE);
16803 if (value < 0 || value > 255)
16804 as_bad_where (fixP->fx_file, fixP->fx_line,
16805 _("invalid immediate: %ld is too large"),
16806 (long) value);
16807 newval |= value;
16808 md_number_to_chars (buf, newval, THUMB_SIZE);
16809 break;
16810
16811 case BFD_RELOC_ARM_THUMB_SHIFT:
16812 /* 5bit shift value (0..32). LSL cannot take 32. */
16813 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16814 temp = newval & 0xf800;
16815 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16816 as_bad_where (fixP->fx_file, fixP->fx_line,
16817 _("invalid shift value: %ld"), (long) value);
16818 /* Shifts of zero must be encoded as LSL. */
16819 if (value == 0)
16820 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16821 /* Shifts of 32 are encoded as zero. */
16822 else if (value == 32)
16823 value = 0;
16824 newval |= value << 6;
16825 md_number_to_chars (buf, newval, THUMB_SIZE);
16826 break;
16827
16828 case BFD_RELOC_VTABLE_INHERIT:
16829 case BFD_RELOC_VTABLE_ENTRY:
16830 fixP->fx_done = 0;
16831 return;
16832
16833 case BFD_RELOC_ARM_MOVW:
16834 case BFD_RELOC_ARM_MOVT:
16835 case BFD_RELOC_ARM_THUMB_MOVW:
16836 case BFD_RELOC_ARM_THUMB_MOVT:
16837 if (fixP->fx_done || !seg->use_rela_p)
16838 {
16839 /* REL format relocations are limited to a 16-bit addend. */
16840 if (!fixP->fx_done)
16841 {
16842 if (value < -0x1000 || value > 0xffff)
16843 as_bad_where (fixP->fx_file, fixP->fx_line,
16844 _("offset too big"));
16845 }
16846 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
16847 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16848 {
16849 value >>= 16;
16850 }
16851
16852 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
16853 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16854 {
16855 newval = get_thumb32_insn (buf);
16856 newval &= 0xfbf08f00;
16857 newval |= (value & 0xf000) << 4;
16858 newval |= (value & 0x0800) << 15;
16859 newval |= (value & 0x0700) << 4;
16860 newval |= (value & 0x00ff);
16861 put_thumb32_insn (buf, newval);
16862 }
16863 else
16864 {
16865 newval = md_chars_to_number (buf, 4);
16866 newval &= 0xfff0f000;
16867 newval |= value & 0x0fff;
16868 newval |= (value & 0xf000) << 4;
16869 md_number_to_chars (buf, newval, 4);
16870 }
16871 }
16872 return;
16873
16874 case BFD_RELOC_UNUSED:
16875 default:
16876 as_bad_where (fixP->fx_file, fixP->fx_line,
16877 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16878 }
16879 }
16880
16881 /* Translate internal representation of relocation info to BFD target
16882 format. */
16883
16884 arelent *
16885 tc_gen_reloc (asection *section, fixS *fixp)
16886 {
16887 arelent * reloc;
16888 bfd_reloc_code_real_type code;
16889
16890 reloc = xmalloc (sizeof (arelent));
16891
16892 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16893 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16894 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16895
16896 if (fixp->fx_pcrel)
16897 {
16898 if (section->use_rela_p)
16899 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16900 else
16901 fixp->fx_offset = reloc->address;
16902 }
16903 reloc->addend = fixp->fx_offset;
16904
16905 switch (fixp->fx_r_type)
16906 {
16907 case BFD_RELOC_8:
16908 if (fixp->fx_pcrel)
16909 {
16910 code = BFD_RELOC_8_PCREL;
16911 break;
16912 }
16913
16914 case BFD_RELOC_16:
16915 if (fixp->fx_pcrel)
16916 {
16917 code = BFD_RELOC_16_PCREL;
16918 break;
16919 }
16920
16921 case BFD_RELOC_32:
16922 if (fixp->fx_pcrel)
16923 {
16924 code = BFD_RELOC_32_PCREL;
16925 break;
16926 }
16927
16928 case BFD_RELOC_ARM_MOVW:
16929 if (fixp->fx_pcrel)
16930 {
16931 code = BFD_RELOC_ARM_MOVW_PCREL;
16932 break;
16933 }
16934
16935 case BFD_RELOC_ARM_MOVT:
16936 if (fixp->fx_pcrel)
16937 {
16938 code = BFD_RELOC_ARM_MOVT_PCREL;
16939 break;
16940 }
16941
16942 case BFD_RELOC_ARM_THUMB_MOVW:
16943 if (fixp->fx_pcrel)
16944 {
16945 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
16946 break;
16947 }
16948
16949 case BFD_RELOC_ARM_THUMB_MOVT:
16950 if (fixp->fx_pcrel)
16951 {
16952 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
16953 break;
16954 }
16955
16956 case BFD_RELOC_NONE:
16957 case BFD_RELOC_ARM_PCREL_BRANCH:
16958 case BFD_RELOC_ARM_PCREL_BLX:
16959 case BFD_RELOC_RVA:
16960 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16961 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16962 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16963 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16964 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16965 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16966 case BFD_RELOC_THUMB_PCREL_BLX:
16967 case BFD_RELOC_VTABLE_ENTRY:
16968 case BFD_RELOC_VTABLE_INHERIT:
16969 code = fixp->fx_r_type;
16970 break;
16971
16972 case BFD_RELOC_ARM_LITERAL:
16973 case BFD_RELOC_ARM_HWLITERAL:
16974 /* If this is called then the a literal has
16975 been referenced across a section boundary. */
16976 as_bad_where (fixp->fx_file, fixp->fx_line,
16977 _("literal referenced across section boundary"));
16978 return NULL;
16979
16980 #ifdef OBJ_ELF
16981 case BFD_RELOC_ARM_GOT32:
16982 case BFD_RELOC_ARM_GOTOFF:
16983 case BFD_RELOC_ARM_PLT32:
16984 case BFD_RELOC_ARM_TARGET1:
16985 case BFD_RELOC_ARM_ROSEGREL32:
16986 case BFD_RELOC_ARM_SBREL32:
16987 case BFD_RELOC_ARM_PREL31:
16988 case BFD_RELOC_ARM_TARGET2:
16989 case BFD_RELOC_ARM_TLS_LE32:
16990 case BFD_RELOC_ARM_TLS_LDO32:
16991 case BFD_RELOC_ARM_PCREL_CALL:
16992 case BFD_RELOC_ARM_PCREL_JUMP:
16993 code = fixp->fx_r_type;
16994 break;
16995
16996 case BFD_RELOC_ARM_TLS_GD32:
16997 case BFD_RELOC_ARM_TLS_IE32:
16998 case BFD_RELOC_ARM_TLS_LDM32:
16999 /* BFD will include the symbol's address in the addend.
17000 But we don't want that, so subtract it out again here. */
17001 if (!S_IS_COMMON (fixp->fx_addsy))
17002 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
17003 code = fixp->fx_r_type;
17004 break;
17005 #endif
17006
17007 case BFD_RELOC_ARM_IMMEDIATE:
17008 as_bad_where (fixp->fx_file, fixp->fx_line,
17009 _("internal relocation (type: IMMEDIATE) not fixed up"));
17010 return NULL;
17011
17012 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17013 as_bad_where (fixp->fx_file, fixp->fx_line,
17014 _("ADRL used for a symbol not defined in the same file"));
17015 return NULL;
17016
17017 case BFD_RELOC_ARM_OFFSET_IMM:
17018 if (section->use_rela_p)
17019 {
17020 code = fixp->fx_r_type;
17021 break;
17022 }
17023
17024 if (fixp->fx_addsy != NULL
17025 && !S_IS_DEFINED (fixp->fx_addsy)
17026 && S_IS_LOCAL (fixp->fx_addsy))
17027 {
17028 as_bad_where (fixp->fx_file, fixp->fx_line,
17029 _("undefined local label `%s'"),
17030 S_GET_NAME (fixp->fx_addsy));
17031 return NULL;
17032 }
17033
17034 as_bad_where (fixp->fx_file, fixp->fx_line,
17035 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
17036 return NULL;
17037
17038 default:
17039 {
17040 char * type;
17041
17042 switch (fixp->fx_r_type)
17043 {
17044 case BFD_RELOC_NONE: type = "NONE"; break;
17045 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
17046 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
17047 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
17048 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
17049 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
17050 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
17051 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
17052 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
17053 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
17054 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
17055 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
17056 default: type = _("<unknown>"); break;
17057 }
17058 as_bad_where (fixp->fx_file, fixp->fx_line,
17059 _("cannot represent %s relocation in this object file format"),
17060 type);
17061 return NULL;
17062 }
17063 }
17064
17065 #ifdef OBJ_ELF
17066 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
17067 && GOT_symbol
17068 && fixp->fx_addsy == GOT_symbol)
17069 {
17070 code = BFD_RELOC_ARM_GOTPC;
17071 reloc->addend = fixp->fx_offset = reloc->address;
17072 }
17073 #endif
17074
17075 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
17076
17077 if (reloc->howto == NULL)
17078 {
17079 as_bad_where (fixp->fx_file, fixp->fx_line,
17080 _("cannot represent %s relocation in this object file format"),
17081 bfd_get_reloc_code_name (code));
17082 return NULL;
17083 }
17084
17085 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
17086 vtable entry to be used in the relocation's section offset. */
17087 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17088 reloc->address = fixp->fx_offset;
17089
17090 return reloc;
17091 }
17092
17093 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
17094
17095 void
17096 cons_fix_new_arm (fragS * frag,
17097 int where,
17098 int size,
17099 expressionS * exp)
17100 {
17101 bfd_reloc_code_real_type type;
17102 int pcrel = 0;
17103
17104 /* Pick a reloc.
17105 FIXME: @@ Should look at CPU word size. */
17106 switch (size)
17107 {
17108 case 1:
17109 type = BFD_RELOC_8;
17110 break;
17111 case 2:
17112 type = BFD_RELOC_16;
17113 break;
17114 case 4:
17115 default:
17116 type = BFD_RELOC_32;
17117 break;
17118 case 8:
17119 type = BFD_RELOC_64;
17120 break;
17121 }
17122
17123 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
17124 }
17125
17126 #if defined OBJ_COFF || defined OBJ_ELF
17127 void
17128 arm_validate_fix (fixS * fixP)
17129 {
17130 /* If the destination of the branch is a defined symbol which does not have
17131 the THUMB_FUNC attribute, then we must be calling a function which has
17132 the (interfacearm) attribute. We look for the Thumb entry point to that
17133 function and change the branch to refer to that function instead. */
17134 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
17135 && fixP->fx_addsy != NULL
17136 && S_IS_DEFINED (fixP->fx_addsy)
17137 && ! THUMB_IS_FUNC (fixP->fx_addsy))
17138 {
17139 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
17140 }
17141 }
17142 #endif
17143
17144 int
17145 arm_force_relocation (struct fix * fixp)
17146 {
17147 #if defined (OBJ_COFF) && defined (TE_PE)
17148 if (fixp->fx_r_type == BFD_RELOC_RVA)
17149 return 1;
17150 #endif
17151
17152 /* Resolve these relocations even if the symbol is extern or weak. */
17153 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
17154 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
17155 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
17156 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17157 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
17158 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
17159 return 0;
17160
17161 return generic_force_reloc (fixp);
17162 }
17163
17164 #ifdef OBJ_COFF
17165 bfd_boolean
17166 arm_fix_adjustable (fixS * fixP)
17167 {
17168 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
17169 local labels from being added to the output symbol table when they
17170 are used with the ADRL pseudo op. The ADRL relocation should always
17171 be resolved before the binbary is emitted, so it is safe to say that
17172 it is adjustable. */
17173 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
17174 return 1;
17175
17176 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
17177 to be cloned, and without this test relocs would still be generated
17178 against the original, pre-cloned symbol. Such symbols would not appear
17179 in the symbol table however, and so a valid reloc could not be
17180 generated. So check to see if the fixup is against a symbol which has
17181 been removed from the symbol chain, and if it is, then allow it to be
17182 adjusted into a reloc against a section symbol. */
17183 if (fixP->fx_addsy != NULL
17184 && ! S_IS_LOCAL (fixP->fx_addsy)
17185 && symbol_next (fixP->fx_addsy) == NULL
17186 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
17187 return 1;
17188
17189 return 0;
17190 }
17191 #endif
17192
17193 #ifdef OBJ_ELF
17194 /* Relocations against function names must be left unadjusted,
17195 so that the linker can use this information to generate interworking
17196 stubs. The MIPS version of this function
17197 also prevents relocations that are mips-16 specific, but I do not
17198 know why it does this.
17199
17200 FIXME:
17201 There is one other problem that ought to be addressed here, but
17202 which currently is not: Taking the address of a label (rather
17203 than a function) and then later jumping to that address. Such
17204 addresses also ought to have their bottom bit set (assuming that
17205 they reside in Thumb code), but at the moment they will not. */
17206
17207 bfd_boolean
17208 arm_fix_adjustable (fixS * fixP)
17209 {
17210 if (fixP->fx_addsy == NULL)
17211 return 1;
17212
17213 /* Preserve relocations against symbols with function type. */
17214 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
17215 return 0;
17216
17217 if (THUMB_IS_FUNC (fixP->fx_addsy)
17218 && fixP->fx_subsy == NULL)
17219 return 0;
17220
17221 /* We need the symbol name for the VTABLE entries. */
17222 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
17223 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17224 return 0;
17225
17226 /* Don't allow symbols to be discarded on GOT related relocs. */
17227 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
17228 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
17229 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
17230 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
17231 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
17232 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
17233 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
17234 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
17235 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
17236 return 0;
17237
17238 return 1;
17239 }
17240
17241 const char *
17242 elf32_arm_target_format (void)
17243 {
17244 #ifdef TE_SYMBIAN
17245 return (target_big_endian
17246 ? "elf32-bigarm-symbian"
17247 : "elf32-littlearm-symbian");
17248 #elif defined (TE_VXWORKS)
17249 return (target_big_endian
17250 ? "elf32-bigarm-vxworks"
17251 : "elf32-littlearm-vxworks");
17252 #else
17253 if (target_big_endian)
17254 return "elf32-bigarm";
17255 else
17256 return "elf32-littlearm";
17257 #endif
17258 }
17259
17260 void
17261 armelf_frob_symbol (symbolS * symp,
17262 int * puntp)
17263 {
17264 elf_frob_symbol (symp, puntp);
17265 }
17266 #endif
17267
17268 /* MD interface: Finalization. */
17269
17270 /* A good place to do this, although this was probably not intended
17271 for this kind of use. We need to dump the literal pool before
17272 references are made to a null symbol pointer. */
17273
17274 void
17275 arm_cleanup (void)
17276 {
17277 literal_pool * pool;
17278
17279 for (pool = list_of_pools; pool; pool = pool->next)
17280 {
17281 /* Put it at the end of the relevent section. */
17282 subseg_set (pool->section, pool->sub_section);
17283 #ifdef OBJ_ELF
17284 arm_elf_change_section ();
17285 #endif
17286 s_ltorg (0);
17287 }
17288 }
17289
17290 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17291 ARM ones. */
17292
17293 void
17294 arm_adjust_symtab (void)
17295 {
17296 #ifdef OBJ_COFF
17297 symbolS * sym;
17298
17299 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17300 {
17301 if (ARM_IS_THUMB (sym))
17302 {
17303 if (THUMB_IS_FUNC (sym))
17304 {
17305 /* Mark the symbol as a Thumb function. */
17306 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17307 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17308 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17309
17310 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17311 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17312 else
17313 as_bad (_("%s: unexpected function type: %d"),
17314 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17315 }
17316 else switch (S_GET_STORAGE_CLASS (sym))
17317 {
17318 case C_EXT:
17319 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17320 break;
17321 case C_STAT:
17322 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17323 break;
17324 case C_LABEL:
17325 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17326 break;
17327 default:
17328 /* Do nothing. */
17329 break;
17330 }
17331 }
17332
17333 if (ARM_IS_INTERWORK (sym))
17334 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17335 }
17336 #endif
17337 #ifdef OBJ_ELF
17338 symbolS * sym;
17339 char bind;
17340
17341 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17342 {
17343 if (ARM_IS_THUMB (sym))
17344 {
17345 elf_symbol_type * elf_sym;
17346
17347 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17348 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17349
17350 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
17351 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
17352 {
17353 /* If it's a .thumb_func, declare it as so,
17354 otherwise tag label as .code 16. */
17355 if (THUMB_IS_FUNC (sym))
17356 elf_sym->internal_elf_sym.st_info =
17357 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17358 else
17359 elf_sym->internal_elf_sym.st_info =
17360 ELF_ST_INFO (bind, STT_ARM_16BIT);
17361 }
17362 }
17363 }
17364 #endif
17365 }
17366
17367 /* MD interface: Initialization. */
17368
17369 static void
17370 set_constant_flonums (void)
17371 {
17372 int i;
17373
17374 for (i = 0; i < NUM_FLOAT_VALS; i++)
17375 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17376 abort ();
17377 }
17378
17379 void
17380 md_begin (void)
17381 {
17382 unsigned mach;
17383 unsigned int i;
17384
17385 if ( (arm_ops_hsh = hash_new ()) == NULL
17386 || (arm_cond_hsh = hash_new ()) == NULL
17387 || (arm_shift_hsh = hash_new ()) == NULL
17388 || (arm_psr_hsh = hash_new ()) == NULL
17389 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17390 || (arm_reg_hsh = hash_new ()) == NULL
17391 || (arm_reloc_hsh = hash_new ()) == NULL
17392 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17393 as_fatal (_("virtual memory exhausted"));
17394
17395 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17396 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17397 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17398 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17399 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17400 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17401 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17402 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17403 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17404 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17405 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17406 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17407 for (i = 0;
17408 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17409 i++)
17410 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17411 (PTR) (barrier_opt_names + i));
17412 #ifdef OBJ_ELF
17413 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17414 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17415 #endif
17416
17417 set_constant_flonums ();
17418
17419 /* Set the cpu variant based on the command-line options. We prefer
17420 -mcpu= over -march= if both are set (as for GCC); and we prefer
17421 -mfpu= over any other way of setting the floating point unit.
17422 Use of legacy options with new options are faulted. */
17423 if (legacy_cpu)
17424 {
17425 if (mcpu_cpu_opt || march_cpu_opt)
17426 as_bad (_("use of old and new-style options to set CPU type"));
17427
17428 mcpu_cpu_opt = legacy_cpu;
17429 }
17430 else if (!mcpu_cpu_opt)
17431 mcpu_cpu_opt = march_cpu_opt;
17432
17433 if (legacy_fpu)
17434 {
17435 if (mfpu_opt)
17436 as_bad (_("use of old and new-style options to set FPU type"));
17437
17438 mfpu_opt = legacy_fpu;
17439 }
17440 else if (!mfpu_opt)
17441 {
17442 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17443 /* Some environments specify a default FPU. If they don't, infer it
17444 from the processor. */
17445 if (mcpu_fpu_opt)
17446 mfpu_opt = mcpu_fpu_opt;
17447 else
17448 mfpu_opt = march_fpu_opt;
17449 #else
17450 mfpu_opt = &fpu_default;
17451 #endif
17452 }
17453
17454 if (!mfpu_opt)
17455 {
17456 if (!mcpu_cpu_opt)
17457 mfpu_opt = &fpu_default;
17458 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17459 mfpu_opt = &fpu_arch_vfp_v2;
17460 else
17461 mfpu_opt = &fpu_arch_fpa;
17462 }
17463
17464 #ifdef CPU_DEFAULT
17465 if (!mcpu_cpu_opt)
17466 {
17467 mcpu_cpu_opt = &cpu_default;
17468 selected_cpu = cpu_default;
17469 }
17470 #else
17471 if (mcpu_cpu_opt)
17472 selected_cpu = *mcpu_cpu_opt;
17473 else
17474 mcpu_cpu_opt = &arm_arch_any;
17475 #endif
17476
17477 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17478
17479 arm_arch_used = thumb_arch_used = arm_arch_none;
17480
17481 #if defined OBJ_COFF || defined OBJ_ELF
17482 {
17483 unsigned int flags = 0;
17484
17485 #if defined OBJ_ELF
17486 flags = meabi_flags;
17487
17488 switch (meabi_flags)
17489 {
17490 case EF_ARM_EABI_UNKNOWN:
17491 #endif
17492 /* Set the flags in the private structure. */
17493 if (uses_apcs_26) flags |= F_APCS26;
17494 if (support_interwork) flags |= F_INTERWORK;
17495 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17496 if (pic_code) flags |= F_PIC;
17497 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17498 flags |= F_SOFT_FLOAT;
17499
17500 switch (mfloat_abi_opt)
17501 {
17502 case ARM_FLOAT_ABI_SOFT:
17503 case ARM_FLOAT_ABI_SOFTFP:
17504 flags |= F_SOFT_FLOAT;
17505 break;
17506
17507 case ARM_FLOAT_ABI_HARD:
17508 if (flags & F_SOFT_FLOAT)
17509 as_bad (_("hard-float conflicts with specified fpu"));
17510 break;
17511 }
17512
17513 /* Using pure-endian doubles (even if soft-float). */
17514 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17515 flags |= F_VFP_FLOAT;
17516
17517 #if defined OBJ_ELF
17518 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17519 flags |= EF_ARM_MAVERICK_FLOAT;
17520 break;
17521
17522 case EF_ARM_EABI_VER4:
17523 case EF_ARM_EABI_VER5:
17524 /* No additional flags to set. */
17525 break;
17526
17527 default:
17528 abort ();
17529 }
17530 #endif
17531 bfd_set_private_flags (stdoutput, flags);
17532
17533 /* We have run out flags in the COFF header to encode the
17534 status of ATPCS support, so instead we create a dummy,
17535 empty, debug section called .arm.atpcs. */
17536 if (atpcs)
17537 {
17538 asection * sec;
17539
17540 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17541
17542 if (sec != NULL)
17543 {
17544 bfd_set_section_flags
17545 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17546 bfd_set_section_size (stdoutput, sec, 0);
17547 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17548 }
17549 }
17550 }
17551 #endif
17552
17553 /* Record the CPU type as well. */
17554 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17555 mach = bfd_mach_arm_iWMMXt;
17556 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17557 mach = bfd_mach_arm_XScale;
17558 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17559 mach = bfd_mach_arm_ep9312;
17560 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17561 mach = bfd_mach_arm_5TE;
17562 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17563 {
17564 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17565 mach = bfd_mach_arm_5T;
17566 else
17567 mach = bfd_mach_arm_5;
17568 }
17569 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17570 {
17571 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17572 mach = bfd_mach_arm_4T;
17573 else
17574 mach = bfd_mach_arm_4;
17575 }
17576 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17577 mach = bfd_mach_arm_3M;
17578 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17579 mach = bfd_mach_arm_3;
17580 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17581 mach = bfd_mach_arm_2a;
17582 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17583 mach = bfd_mach_arm_2;
17584 else
17585 mach = bfd_mach_arm_unknown;
17586
17587 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17588 }
17589
17590 /* Command line processing. */
17591
17592 /* md_parse_option
17593 Invocation line includes a switch not recognized by the base assembler.
17594 See if it's a processor-specific option.
17595
17596 This routine is somewhat complicated by the need for backwards
17597 compatibility (since older releases of gcc can't be changed).
17598 The new options try to make the interface as compatible as
17599 possible with GCC.
17600
17601 New options (supported) are:
17602
17603 -mcpu=<cpu name> Assemble for selected processor
17604 -march=<architecture name> Assemble for selected architecture
17605 -mfpu=<fpu architecture> Assemble for selected FPU.
17606 -EB/-mbig-endian Big-endian
17607 -EL/-mlittle-endian Little-endian
17608 -k Generate PIC code
17609 -mthumb Start in Thumb mode
17610 -mthumb-interwork Code supports ARM/Thumb interworking
17611
17612 For now we will also provide support for:
17613
17614 -mapcs-32 32-bit Program counter
17615 -mapcs-26 26-bit Program counter
17616 -macps-float Floats passed in FP registers
17617 -mapcs-reentrant Reentrant code
17618 -matpcs
17619 (sometime these will probably be replaced with -mapcs=<list of options>
17620 and -matpcs=<list of options>)
17621
17622 The remaining options are only supported for back-wards compatibility.
17623 Cpu variants, the arm part is optional:
17624 -m[arm]1 Currently not supported.
17625 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17626 -m[arm]3 Arm 3 processor
17627 -m[arm]6[xx], Arm 6 processors
17628 -m[arm]7[xx][t][[d]m] Arm 7 processors
17629 -m[arm]8[10] Arm 8 processors
17630 -m[arm]9[20][tdmi] Arm 9 processors
17631 -mstrongarm[110[0]] StrongARM processors
17632 -mxscale XScale processors
17633 -m[arm]v[2345[t[e]]] Arm architectures
17634 -mall All (except the ARM1)
17635 FP variants:
17636 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17637 -mfpe-old (No float load/store multiples)
17638 -mvfpxd VFP Single precision
17639 -mvfp All VFP
17640 -mno-fpu Disable all floating point instructions
17641
17642 The following CPU names are recognized:
17643 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17644 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17645 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17646 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17647 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17648 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17649 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17650
17651 */
17652
17653 const char * md_shortopts = "m:k";
17654
17655 #ifdef ARM_BI_ENDIAN
17656 #define OPTION_EB (OPTION_MD_BASE + 0)
17657 #define OPTION_EL (OPTION_MD_BASE + 1)
17658 #else
17659 #if TARGET_BYTES_BIG_ENDIAN
17660 #define OPTION_EB (OPTION_MD_BASE + 0)
17661 #else
17662 #define OPTION_EL (OPTION_MD_BASE + 1)
17663 #endif
17664 #endif
17665
17666 struct option md_longopts[] =
17667 {
17668 #ifdef OPTION_EB
17669 {"EB", no_argument, NULL, OPTION_EB},
17670 #endif
17671 #ifdef OPTION_EL
17672 {"EL", no_argument, NULL, OPTION_EL},
17673 #endif
17674 {NULL, no_argument, NULL, 0}
17675 };
17676
17677 size_t md_longopts_size = sizeof (md_longopts);
17678
17679 struct arm_option_table
17680 {
17681 char *option; /* Option name to match. */
17682 char *help; /* Help information. */
17683 int *var; /* Variable to change. */
17684 int value; /* What to change it to. */
17685 char *deprecated; /* If non-null, print this message. */
17686 };
17687
17688 struct arm_option_table arm_opts[] =
17689 {
17690 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17691 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17692 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17693 &support_interwork, 1, NULL},
17694 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17695 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17696 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17697 1, NULL},
17698 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17699 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17700 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17701 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17702 NULL},
17703
17704 /* These are recognized by the assembler, but have no affect on code. */
17705 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17706 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17707 {NULL, NULL, NULL, 0, NULL}
17708 };
17709
17710 struct arm_legacy_option_table
17711 {
17712 char *option; /* Option name to match. */
17713 const arm_feature_set **var; /* Variable to change. */
17714 const arm_feature_set value; /* What to change it to. */
17715 char *deprecated; /* If non-null, print this message. */
17716 };
17717
17718 const struct arm_legacy_option_table arm_legacy_opts[] =
17719 {
17720 /* DON'T add any new processors to this list -- we want the whole list
17721 to go away... Add them to the processors table instead. */
17722 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17723 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17724 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17725 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17726 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17727 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17728 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17729 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17730 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17731 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17732 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17733 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17734 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17735 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17736 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17737 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17738 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17739 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17740 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17741 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17742 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17743 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17744 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17745 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17746 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17747 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17748 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17749 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17750 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17751 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17752 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17753 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17754 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17755 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17756 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17757 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17758 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17759 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17760 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17761 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17762 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17763 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17764 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17765 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17766 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17767 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17768 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17769 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17770 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17771 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17772 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17773 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17774 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17775 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17776 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17777 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17778 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17779 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17780 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17781 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17782 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17783 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17784 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17785 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17786 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17787 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17788 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17789 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17790 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17791 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17792 N_("use -mcpu=strongarm110")},
17793 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17794 N_("use -mcpu=strongarm1100")},
17795 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17796 N_("use -mcpu=strongarm1110")},
17797 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17798 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17799 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17800
17801 /* Architecture variants -- don't add any more to this list either. */
17802 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17803 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17804 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17805 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17806 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17807 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17808 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17809 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17810 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17811 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17812 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17813 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17814 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17815 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17816 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17817 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17818 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17819 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17820
17821 /* Floating point variants -- don't add any more to this list either. */
17822 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17823 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17824 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17825 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17826 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17827
17828 {NULL, NULL, ARM_ARCH_NONE, NULL}
17829 };
17830
17831 struct arm_cpu_option_table
17832 {
17833 char *name;
17834 const arm_feature_set value;
17835 /* For some CPUs we assume an FPU unless the user explicitly sets
17836 -mfpu=... */
17837 const arm_feature_set default_fpu;
17838 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17839 case. */
17840 const char *canonical_name;
17841 };
17842
17843 /* This list should, at a minimum, contain all the cpu names
17844 recognized by GCC. */
17845 static const struct arm_cpu_option_table arm_cpus[] =
17846 {
17847 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17848 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17849 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17850 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17851 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17852 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17853 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17854 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17855 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17856 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17857 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17858 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17859 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17860 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17861 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17862 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17863 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17864 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17865 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17866 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17867 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17868 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17869 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17870 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17871 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17872 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17873 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17874 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17875 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17876 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17877 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17878 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17879 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17880 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17881 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17882 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17883 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17884 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17885 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17886 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17887 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17888 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17889 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17890 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17891 /* For V5 or later processors we default to using VFP; but the user
17892 should really set the FPU type explicitly. */
17893 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17894 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17895 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17896 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17897 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17898 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17899 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17900 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17901 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17902 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17903 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17904 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17905 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17906 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17907 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17908 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17909 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17910 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17911 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17912 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17913 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17914 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17915 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17916 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17917 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17918 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17919 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17920 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17921 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17922 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17923 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17924 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17925 | FPU_NEON_EXT_V1),
17926 NULL},
17927 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17928 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17929 /* ??? XSCALE is really an architecture. */
17930 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17931 /* ??? iwmmxt is not a processor. */
17932 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17933 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17934 /* Maverick */
17935 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17936 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17937 };
17938
17939 struct arm_arch_option_table
17940 {
17941 char *name;
17942 const arm_feature_set value;
17943 const arm_feature_set default_fpu;
17944 };
17945
17946 /* This list should, at a minimum, contain all the architecture names
17947 recognized by GCC. */
17948 static const struct arm_arch_option_table arm_archs[] =
17949 {
17950 {"all", ARM_ANY, FPU_ARCH_FPA},
17951 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17952 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17953 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17954 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17955 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17956 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17957 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17958 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17959 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17960 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17961 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17962 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17963 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17964 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17965 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17966 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17967 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17968 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17969 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17970 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17971 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17972 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17973 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17974 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17975 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17976 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17977 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17978 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17979 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17980 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17981 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17982 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17983 };
17984
17985 /* ISA extensions in the co-processor space. */
17986 struct arm_option_cpu_value_table
17987 {
17988 char *name;
17989 const arm_feature_set value;
17990 };
17991
17992 static const struct arm_option_cpu_value_table arm_extensions[] =
17993 {
17994 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17995 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17996 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17997 {NULL, ARM_ARCH_NONE}
17998 };
17999
18000 /* This list should, at a minimum, contain all the fpu names
18001 recognized by GCC. */
18002 static const struct arm_option_cpu_value_table arm_fpus[] =
18003 {
18004 {"softfpa", FPU_NONE},
18005 {"fpe", FPU_ARCH_FPE},
18006 {"fpe2", FPU_ARCH_FPE},
18007 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
18008 {"fpa", FPU_ARCH_FPA},
18009 {"fpa10", FPU_ARCH_FPA},
18010 {"fpa11", FPU_ARCH_FPA},
18011 {"arm7500fe", FPU_ARCH_FPA},
18012 {"softvfp", FPU_ARCH_VFP},
18013 {"softvfp+vfp", FPU_ARCH_VFP_V2},
18014 {"vfp", FPU_ARCH_VFP_V2},
18015 {"vfp9", FPU_ARCH_VFP_V2},
18016 {"vfp3", FPU_ARCH_VFP_V3},
18017 {"vfp10", FPU_ARCH_VFP_V2},
18018 {"vfp10-r0", FPU_ARCH_VFP_V1},
18019 {"vfpxd", FPU_ARCH_VFP_V1xD},
18020 {"arm1020t", FPU_ARCH_VFP_V1},
18021 {"arm1020e", FPU_ARCH_VFP_V2},
18022 {"arm1136jfs", FPU_ARCH_VFP_V2},
18023 {"arm1136jf-s", FPU_ARCH_VFP_V2},
18024 {"maverick", FPU_ARCH_MAVERICK},
18025 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
18026 {NULL, ARM_ARCH_NONE}
18027 };
18028
18029 struct arm_option_value_table
18030 {
18031 char *name;
18032 long value;
18033 };
18034
18035 static const struct arm_option_value_table arm_float_abis[] =
18036 {
18037 {"hard", ARM_FLOAT_ABI_HARD},
18038 {"softfp", ARM_FLOAT_ABI_SOFTFP},
18039 {"soft", ARM_FLOAT_ABI_SOFT},
18040 {NULL, 0}
18041 };
18042
18043 #ifdef OBJ_ELF
18044 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
18045 static const struct arm_option_value_table arm_eabis[] =
18046 {
18047 {"gnu", EF_ARM_EABI_UNKNOWN},
18048 {"4", EF_ARM_EABI_VER4},
18049 {"5", EF_ARM_EABI_VER5},
18050 {NULL, 0}
18051 };
18052 #endif
18053
18054 struct arm_long_option_table
18055 {
18056 char * option; /* Substring to match. */
18057 char * help; /* Help information. */
18058 int (* func) (char * subopt); /* Function to decode sub-option. */
18059 char * deprecated; /* If non-null, print this message. */
18060 };
18061
18062 static int
18063 arm_parse_extension (char * str, const arm_feature_set **opt_p)
18064 {
18065 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
18066
18067 /* Copy the feature set, so that we can modify it. */
18068 *ext_set = **opt_p;
18069 *opt_p = ext_set;
18070
18071 while (str != NULL && *str != 0)
18072 {
18073 const struct arm_option_cpu_value_table * opt;
18074 char * ext;
18075 int optlen;
18076
18077 if (*str != '+')
18078 {
18079 as_bad (_("invalid architectural extension"));
18080 return 0;
18081 }
18082
18083 str++;
18084 ext = strchr (str, '+');
18085
18086 if (ext != NULL)
18087 optlen = ext - str;
18088 else
18089 optlen = strlen (str);
18090
18091 if (optlen == 0)
18092 {
18093 as_bad (_("missing architectural extension"));
18094 return 0;
18095 }
18096
18097 for (opt = arm_extensions; opt->name != NULL; opt++)
18098 if (strncmp (opt->name, str, optlen) == 0)
18099 {
18100 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
18101 break;
18102 }
18103
18104 if (opt->name == NULL)
18105 {
18106 as_bad (_("unknown architectural extnsion `%s'"), str);
18107 return 0;
18108 }
18109
18110 str = ext;
18111 };
18112
18113 return 1;
18114 }
18115
18116 static int
18117 arm_parse_cpu (char * str)
18118 {
18119 const struct arm_cpu_option_table * opt;
18120 char * ext = strchr (str, '+');
18121 int optlen;
18122
18123 if (ext != NULL)
18124 optlen = ext - str;
18125 else
18126 optlen = strlen (str);
18127
18128 if (optlen == 0)
18129 {
18130 as_bad (_("missing cpu name `%s'"), str);
18131 return 0;
18132 }
18133
18134 for (opt = arm_cpus; opt->name != NULL; opt++)
18135 if (strncmp (opt->name, str, optlen) == 0)
18136 {
18137 mcpu_cpu_opt = &opt->value;
18138 mcpu_fpu_opt = &opt->default_fpu;
18139 if (opt->canonical_name)
18140 strcpy(selected_cpu_name, opt->canonical_name);
18141 else
18142 {
18143 int i;
18144 for (i = 0; i < optlen; i++)
18145 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18146 selected_cpu_name[i] = 0;
18147 }
18148
18149 if (ext != NULL)
18150 return arm_parse_extension (ext, &mcpu_cpu_opt);
18151
18152 return 1;
18153 }
18154
18155 as_bad (_("unknown cpu `%s'"), str);
18156 return 0;
18157 }
18158
18159 static int
18160 arm_parse_arch (char * str)
18161 {
18162 const struct arm_arch_option_table *opt;
18163 char *ext = strchr (str, '+');
18164 int optlen;
18165
18166 if (ext != NULL)
18167 optlen = ext - str;
18168 else
18169 optlen = strlen (str);
18170
18171 if (optlen == 0)
18172 {
18173 as_bad (_("missing architecture name `%s'"), str);
18174 return 0;
18175 }
18176
18177 for (opt = arm_archs; opt->name != NULL; opt++)
18178 if (streq (opt->name, str))
18179 {
18180 march_cpu_opt = &opt->value;
18181 march_fpu_opt = &opt->default_fpu;
18182 strcpy(selected_cpu_name, opt->name);
18183
18184 if (ext != NULL)
18185 return arm_parse_extension (ext, &march_cpu_opt);
18186
18187 return 1;
18188 }
18189
18190 as_bad (_("unknown architecture `%s'\n"), str);
18191 return 0;
18192 }
18193
18194 static int
18195 arm_parse_fpu (char * str)
18196 {
18197 const struct arm_option_cpu_value_table * opt;
18198
18199 for (opt = arm_fpus; opt->name != NULL; opt++)
18200 if (streq (opt->name, str))
18201 {
18202 mfpu_opt = &opt->value;
18203 return 1;
18204 }
18205
18206 as_bad (_("unknown floating point format `%s'\n"), str);
18207 return 0;
18208 }
18209
18210 static int
18211 arm_parse_float_abi (char * str)
18212 {
18213 const struct arm_option_value_table * opt;
18214
18215 for (opt = arm_float_abis; opt->name != NULL; opt++)
18216 if (streq (opt->name, str))
18217 {
18218 mfloat_abi_opt = opt->value;
18219 return 1;
18220 }
18221
18222 as_bad (_("unknown floating point abi `%s'\n"), str);
18223 return 0;
18224 }
18225
18226 #ifdef OBJ_ELF
18227 static int
18228 arm_parse_eabi (char * str)
18229 {
18230 const struct arm_option_value_table *opt;
18231
18232 for (opt = arm_eabis; opt->name != NULL; opt++)
18233 if (streq (opt->name, str))
18234 {
18235 meabi_flags = opt->value;
18236 return 1;
18237 }
18238 as_bad (_("unknown EABI `%s'\n"), str);
18239 return 0;
18240 }
18241 #endif
18242
18243 struct arm_long_option_table arm_long_opts[] =
18244 {
18245 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18246 arm_parse_cpu, NULL},
18247 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18248 arm_parse_arch, NULL},
18249 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18250 arm_parse_fpu, NULL},
18251 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18252 arm_parse_float_abi, NULL},
18253 #ifdef OBJ_ELF
18254 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18255 arm_parse_eabi, NULL},
18256 #endif
18257 {NULL, NULL, 0, NULL}
18258 };
18259
18260 int
18261 md_parse_option (int c, char * arg)
18262 {
18263 struct arm_option_table *opt;
18264 const struct arm_legacy_option_table *fopt;
18265 struct arm_long_option_table *lopt;
18266
18267 switch (c)
18268 {
18269 #ifdef OPTION_EB
18270 case OPTION_EB:
18271 target_big_endian = 1;
18272 break;
18273 #endif
18274
18275 #ifdef OPTION_EL
18276 case OPTION_EL:
18277 target_big_endian = 0;
18278 break;
18279 #endif
18280
18281 case 'a':
18282 /* Listing option. Just ignore these, we don't support additional
18283 ones. */
18284 return 0;
18285
18286 default:
18287 for (opt = arm_opts; opt->option != NULL; opt++)
18288 {
18289 if (c == opt->option[0]
18290 && ((arg == NULL && opt->option[1] == 0)
18291 || streq (arg, opt->option + 1)))
18292 {
18293 #if WARN_DEPRECATED
18294 /* If the option is deprecated, tell the user. */
18295 if (opt->deprecated != NULL)
18296 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18297 arg ? arg : "", _(opt->deprecated));
18298 #endif
18299
18300 if (opt->var != NULL)
18301 *opt->var = opt->value;
18302
18303 return 1;
18304 }
18305 }
18306
18307 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18308 {
18309 if (c == fopt->option[0]
18310 && ((arg == NULL && fopt->option[1] == 0)
18311 || streq (arg, fopt->option + 1)))
18312 {
18313 #if WARN_DEPRECATED
18314 /* If the option is deprecated, tell the user. */
18315 if (fopt->deprecated != NULL)
18316 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18317 arg ? arg : "", _(fopt->deprecated));
18318 #endif
18319
18320 if (fopt->var != NULL)
18321 *fopt->var = &fopt->value;
18322
18323 return 1;
18324 }
18325 }
18326
18327 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18328 {
18329 /* These options are expected to have an argument. */
18330 if (c == lopt->option[0]
18331 && arg != NULL
18332 && strncmp (arg, lopt->option + 1,
18333 strlen (lopt->option + 1)) == 0)
18334 {
18335 #if WARN_DEPRECATED
18336 /* If the option is deprecated, tell the user. */
18337 if (lopt->deprecated != NULL)
18338 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18339 _(lopt->deprecated));
18340 #endif
18341
18342 /* Call the sup-option parser. */
18343 return lopt->func (arg + strlen (lopt->option) - 1);
18344 }
18345 }
18346
18347 return 0;
18348 }
18349
18350 return 1;
18351 }
18352
18353 void
18354 md_show_usage (FILE * fp)
18355 {
18356 struct arm_option_table *opt;
18357 struct arm_long_option_table *lopt;
18358
18359 fprintf (fp, _(" ARM-specific assembler options:\n"));
18360
18361 for (opt = arm_opts; opt->option != NULL; opt++)
18362 if (opt->help != NULL)
18363 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18364
18365 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18366 if (lopt->help != NULL)
18367 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18368
18369 #ifdef OPTION_EB
18370 fprintf (fp, _("\
18371 -EB assemble code for a big-endian cpu\n"));
18372 #endif
18373
18374 #ifdef OPTION_EL
18375 fprintf (fp, _("\
18376 -EL assemble code for a little-endian cpu\n"));
18377 #endif
18378 }
18379
18380
18381 #ifdef OBJ_ELF
18382 typedef struct
18383 {
18384 int val;
18385 arm_feature_set flags;
18386 } cpu_arch_ver_table;
18387
18388 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18389 least features first. */
18390 static const cpu_arch_ver_table cpu_arch_ver[] =
18391 {
18392 {1, ARM_ARCH_V4},
18393 {2, ARM_ARCH_V4T},
18394 {3, ARM_ARCH_V5},
18395 {4, ARM_ARCH_V5TE},
18396 {5, ARM_ARCH_V5TEJ},
18397 {6, ARM_ARCH_V6},
18398 {7, ARM_ARCH_V6Z},
18399 {8, ARM_ARCH_V6K},
18400 {9, ARM_ARCH_V6T2},
18401 {10, ARM_ARCH_V7A},
18402 {10, ARM_ARCH_V7R},
18403 {10, ARM_ARCH_V7M},
18404 {0, ARM_ARCH_NONE}
18405 };
18406
18407 /* Set the public EABI object attributes. */
18408 static void
18409 aeabi_set_public_attributes (void)
18410 {
18411 int arch;
18412 arm_feature_set flags;
18413 arm_feature_set tmp;
18414 const cpu_arch_ver_table *p;
18415
18416 /* Choose the architecture based on the capabilities of the requested cpu
18417 (if any) and/or the instructions actually used. */
18418 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18419 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18420 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18421
18422 tmp = flags;
18423 arch = 0;
18424 for (p = cpu_arch_ver; p->val; p++)
18425 {
18426 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18427 {
18428 arch = p->val;
18429 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18430 }
18431 }
18432
18433 /* Tag_CPU_name. */
18434 if (selected_cpu_name[0])
18435 {
18436 char *p;
18437
18438 p = selected_cpu_name;
18439 if (strncmp(p, "armv", 4) == 0)
18440 {
18441 int i;
18442
18443 p += 4;
18444 for (i = 0; p[i]; i++)
18445 p[i] = TOUPPER (p[i]);
18446 }
18447 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18448 }
18449 /* Tag_CPU_arch. */
18450 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18451 /* Tag_CPU_arch_profile. */
18452 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18453 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18454 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18455 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18456 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18457 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18458 /* Tag_ARM_ISA_use. */
18459 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18460 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18461 /* Tag_THUMB_ISA_use. */
18462 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18463 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18464 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18465 /* Tag_VFP_arch. */
18466 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18467 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18468 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18469 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18470 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18471 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18472 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18473 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18474 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18475 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18476 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18477 /* Tag_WMMX_arch. */
18478 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18479 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18480 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18481 /* Tag_NEON_arch. */
18482 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18483 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18484 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18485 }
18486
18487 /* Add the .ARM.attributes section. */
18488 void
18489 arm_md_end (void)
18490 {
18491 segT s;
18492 char *p;
18493 addressT addr;
18494 offsetT size;
18495
18496 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18497 return;
18498
18499 aeabi_set_public_attributes ();
18500 size = elf32_arm_eabi_attr_size (stdoutput);
18501 s = subseg_new (".ARM.attributes", 0);
18502 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18503 addr = frag_now_fix ();
18504 p = frag_more (size);
18505 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18506 }
18507 #endif /* OBJ_ELF */
18508
18509
18510 /* Parse a .cpu directive. */
18511
18512 static void
18513 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18514 {
18515 const struct arm_cpu_option_table *opt;
18516 char *name;
18517 char saved_char;
18518
18519 name = input_line_pointer;
18520 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18521 input_line_pointer++;
18522 saved_char = *input_line_pointer;
18523 *input_line_pointer = 0;
18524
18525 /* Skip the first "all" entry. */
18526 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18527 if (streq (opt->name, name))
18528 {
18529 mcpu_cpu_opt = &opt->value;
18530 selected_cpu = opt->value;
18531 if (opt->canonical_name)
18532 strcpy(selected_cpu_name, opt->canonical_name);
18533 else
18534 {
18535 int i;
18536 for (i = 0; opt->name[i]; i++)
18537 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18538 selected_cpu_name[i] = 0;
18539 }
18540 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18541 *input_line_pointer = saved_char;
18542 demand_empty_rest_of_line ();
18543 return;
18544 }
18545 as_bad (_("unknown cpu `%s'"), name);
18546 *input_line_pointer = saved_char;
18547 ignore_rest_of_line ();
18548 }
18549
18550
18551 /* Parse a .arch directive. */
18552
18553 static void
18554 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18555 {
18556 const struct arm_arch_option_table *opt;
18557 char saved_char;
18558 char *name;
18559
18560 name = input_line_pointer;
18561 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18562 input_line_pointer++;
18563 saved_char = *input_line_pointer;
18564 *input_line_pointer = 0;
18565
18566 /* Skip the first "all" entry. */
18567 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18568 if (streq (opt->name, name))
18569 {
18570 mcpu_cpu_opt = &opt->value;
18571 selected_cpu = opt->value;
18572 strcpy(selected_cpu_name, opt->name);
18573 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18574 *input_line_pointer = saved_char;
18575 demand_empty_rest_of_line ();
18576 return;
18577 }
18578
18579 as_bad (_("unknown architecture `%s'\n"), name);
18580 *input_line_pointer = saved_char;
18581 ignore_rest_of_line ();
18582 }
18583
18584
18585 /* Parse a .fpu directive. */
18586
18587 static void
18588 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18589 {
18590 const struct arm_option_cpu_value_table *opt;
18591 char saved_char;
18592 char *name;
18593
18594 name = input_line_pointer;
18595 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18596 input_line_pointer++;
18597 saved_char = *input_line_pointer;
18598 *input_line_pointer = 0;
18599
18600 for (opt = arm_fpus; opt->name != NULL; opt++)
18601 if (streq (opt->name, name))
18602 {
18603 mfpu_opt = &opt->value;
18604 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18605 *input_line_pointer = saved_char;
18606 demand_empty_rest_of_line ();
18607 return;
18608 }
18609
18610 as_bad (_("unknown floating point format `%s'\n"), name);
18611 *input_line_pointer = saved_char;
18612 ignore_rest_of_line ();
18613 }
18614
This page took 0.452409 seconds and 5 git commands to generate.