* config/tc-arm.c (stdarg.h): include.
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dwarf2dbg.h"
41 #include "dw2gencfi.h"
42 #endif
43
44 /* XXX Set this to 1 after the next binutils release. */
45 #define WARN_DEPRECATED 0
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 #if defined __XSCALE__
99 #define CPU_DEFAULT ARM_ARCH_XSCALE
100 #else
101 #if defined __thumb__
102 #define CPU_DEFAULT ARM_ARCH_V5T
103 #endif
104 #endif
105 #endif
106
107 #ifndef FPU_DEFAULT
108 # ifdef TE_LINUX
109 # define FPU_DEFAULT FPU_ARCH_FPA
110 # elif defined (TE_NetBSD)
111 # ifdef OBJ_ELF
112 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 # else
114 /* Legacy a.out format. */
115 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # endif
117 # elif defined (TE_VXWORKS)
118 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 # else
120 /* For backwards compatibility, default to FPA. */
121 # define FPU_DEFAULT FPU_ARCH_FPA
122 # endif
123 #endif /* ifndef FPU_DEFAULT */
124
125 #define streq(a, b) (strcmp (a, b) == 0)
126
127 static arm_feature_set cpu_variant;
128 static arm_feature_set arm_arch_used;
129 static arm_feature_set thumb_arch_used;
130
131 /* Flags stored in private area of BFD structure. */
132 static int uses_apcs_26 = FALSE;
133 static int atpcs = FALSE;
134 static int support_interwork = FALSE;
135 static int uses_apcs_float = FALSE;
136 static int pic_code = FALSE;
137
138 /* Variables that we set while parsing command-line options. Once all
139 options have been read we re-process these values to set the real
140 assembly flags. */
141 static const arm_feature_set *legacy_cpu = NULL;
142 static const arm_feature_set *legacy_fpu = NULL;
143
144 static const arm_feature_set *mcpu_cpu_opt = NULL;
145 static const arm_feature_set *mcpu_fpu_opt = NULL;
146 static const arm_feature_set *march_cpu_opt = NULL;
147 static const arm_feature_set *march_fpu_opt = NULL;
148 static const arm_feature_set *mfpu_opt = NULL;
149
150 /* Constants for known architecture features. */
151 static const arm_feature_set fpu_default = FPU_DEFAULT;
152 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
153 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
154 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
155 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
156 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
157 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
158 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
159 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
160
161 #ifdef CPU_DEFAULT
162 static const arm_feature_set cpu_default = CPU_DEFAULT;
163 #endif
164
165 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
166 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
167 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
168 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
169 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
170 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
171 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
172 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
173 static const arm_feature_set arm_ext_v4t_5 =
174 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
175 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
176 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
177 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
178 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
179 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
180 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
181 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
182 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
183 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
184 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
185 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
186 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
187 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
188 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
189
190 static const arm_feature_set arm_arch_any = ARM_ANY;
191 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
192 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
193 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
194
195 static const arm_feature_set arm_cext_iwmmxt =
196 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
197 static const arm_feature_set arm_cext_xscale =
198 ARM_FEATURE (0, ARM_CEXT_XSCALE);
199 static const arm_feature_set arm_cext_maverick =
200 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
201 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
202 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
203 static const arm_feature_set fpu_vfp_ext_v1xd =
204 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
205 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
206 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
207 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
208 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
209 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
210 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
211
212 static int mfloat_abi_opt = -1;
213 /* Record user cpu selection for object attributes. */
214 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
215 /* Must be long enough to hold any of the names in arm_cpus. */
216 static char selected_cpu_name[16];
217 #ifdef OBJ_ELF
218 # ifdef EABI_DEFAULT
219 static int meabi_flags = EABI_DEFAULT;
220 # else
221 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
222 # endif
223 #endif
224
225 #ifdef OBJ_ELF
226 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
227 symbolS * GOT_symbol;
228 #endif
229
230 /* 0: assemble for ARM,
231 1: assemble for Thumb,
232 2: assemble for Thumb even though target CPU does not support thumb
233 instructions. */
234 static int thumb_mode = 0;
235
236 /* If unified_syntax is true, we are processing the new unified
237 ARM/Thumb syntax. Important differences from the old ARM mode:
238
239 - Immediate operands do not require a # prefix.
240 - Conditional affixes always appear at the end of the
241 instruction. (For backward compatibility, those instructions
242 that formerly had them in the middle, continue to accept them
243 there.)
244 - The IT instruction may appear, and if it does is validated
245 against subsequent conditional affixes. It does not generate
246 machine code.
247
248 Important differences from the old Thumb mode:
249
250 - Immediate operands do not require a # prefix.
251 - Most of the V6T2 instructions are only available in unified mode.
252 - The .N and .W suffixes are recognized and honored (it is an error
253 if they cannot be honored).
254 - All instructions set the flags if and only if they have an 's' affix.
255 - Conditional affixes may be used. They are validated against
256 preceding IT instructions. Unlike ARM mode, you cannot use a
257 conditional affix except in the scope of an IT instruction. */
258
259 static bfd_boolean unified_syntax = FALSE;
260
261 enum neon_el_type
262 {
263 NT_invtype,
264 NT_untyped,
265 NT_integer,
266 NT_float,
267 NT_poly,
268 NT_signed,
269 NT_unsigned
270 };
271
272 struct neon_type_el
273 {
274 enum neon_el_type type;
275 unsigned size;
276 };
277
278 #define NEON_MAX_TYPE_ELS 4
279
280 struct neon_type
281 {
282 struct neon_type_el el[NEON_MAX_TYPE_ELS];
283 unsigned elems;
284 };
285
286 struct arm_it
287 {
288 const char * error;
289 unsigned long instruction;
290 int size;
291 int size_req;
292 int cond;
293 /* "uncond_value" is set to the value in place of the conditional field in
294 unconditional versions of the instruction, or -1 if nothing is
295 appropriate. */
296 int uncond_value;
297 struct neon_type vectype;
298 /* Set to the opcode if the instruction needs relaxation.
299 Zero if the instruction is not relaxed. */
300 unsigned long relax;
301 struct
302 {
303 bfd_reloc_code_real_type type;
304 expressionS exp;
305 int pc_rel;
306 } reloc;
307
308 struct
309 {
310 unsigned reg;
311 signed int imm;
312 struct neon_type_el vectype;
313 unsigned present : 1; /* Operand present. */
314 unsigned isreg : 1; /* Operand was a register. */
315 unsigned immisreg : 1; /* .imm field is a second register. */
316 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
317 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
318 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
319 instructions. This allows us to disambiguate ARM <-> vector insns. */
320 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
321 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned issingle : 1; /* Operand is VFP single-precision register. */
324 unsigned hasreloc : 1; /* Operand has relocation suffix. */
325 unsigned writeback : 1; /* Operand has trailing ! */
326 unsigned preind : 1; /* Preindexed address. */
327 unsigned postind : 1; /* Postindexed address. */
328 unsigned negative : 1; /* Index register was negated. */
329 unsigned shifted : 1; /* Shift applied to operation. */
330 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
331 } operands[6];
332 };
333
334 static struct arm_it inst;
335
336 #define NUM_FLOAT_VALS 8
337
338 const char * fp_const[] =
339 {
340 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
341 };
342
343 /* Number of littlenums required to hold an extended precision number. */
344 #define MAX_LITTLENUMS 6
345
346 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
347
348 #define FAIL (-1)
349 #define SUCCESS (0)
350
351 #define SUFF_S 1
352 #define SUFF_D 2
353 #define SUFF_E 3
354 #define SUFF_P 4
355
356 #define CP_T_X 0x00008000
357 #define CP_T_Y 0x00400000
358
359 #define CONDS_BIT 0x00100000
360 #define LOAD_BIT 0x00100000
361
362 #define DOUBLE_LOAD_FLAG 0x00000001
363
364 struct asm_cond
365 {
366 const char * template;
367 unsigned long value;
368 };
369
370 #define COND_ALWAYS 0xE
371
372 struct asm_psr
373 {
374 const char *template;
375 unsigned long field;
376 };
377
378 struct asm_barrier_opt
379 {
380 const char *template;
381 unsigned long value;
382 };
383
384 /* The bit that distinguishes CPSR and SPSR. */
385 #define SPSR_BIT (1 << 22)
386
387 /* The individual PSR flag bits. */
388 #define PSR_c (1 << 16)
389 #define PSR_x (1 << 17)
390 #define PSR_s (1 << 18)
391 #define PSR_f (1 << 19)
392
393 struct reloc_entry
394 {
395 char *name;
396 bfd_reloc_code_real_type reloc;
397 };
398
399 enum vfp_reg_pos
400 {
401 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
402 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
403 };
404
405 enum vfp_ldstm_type
406 {
407 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
408 };
409
410 /* Bits for DEFINED field in neon_typed_alias. */
411 #define NTA_HASTYPE 1
412 #define NTA_HASINDEX 2
413
414 struct neon_typed_alias
415 {
416 unsigned char defined;
417 unsigned char index;
418 struct neon_type_el eltype;
419 };
420
421 /* ARM register categories. This includes coprocessor numbers and various
422 architecture extensions' registers. */
423 enum arm_reg_type
424 {
425 REG_TYPE_RN,
426 REG_TYPE_CP,
427 REG_TYPE_CN,
428 REG_TYPE_FN,
429 REG_TYPE_VFS,
430 REG_TYPE_VFD,
431 REG_TYPE_NQ,
432 REG_TYPE_VFSD,
433 REG_TYPE_NDQ,
434 REG_TYPE_NSDQ,
435 REG_TYPE_VFC,
436 REG_TYPE_MVF,
437 REG_TYPE_MVD,
438 REG_TYPE_MVFX,
439 REG_TYPE_MVDX,
440 REG_TYPE_MVAX,
441 REG_TYPE_DSPSC,
442 REG_TYPE_MMXWR,
443 REG_TYPE_MMXWC,
444 REG_TYPE_MMXWCG,
445 REG_TYPE_XSCALE,
446 };
447
448 /* Structure for a hash table entry for a register.
449 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
450 information which states whether a vector type or index is specified (for a
451 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 struct reg_entry
453 {
454 const char *name;
455 unsigned char number;
456 unsigned char type;
457 unsigned char builtin;
458 struct neon_typed_alias *neon;
459 };
460
461 /* Diagnostics used when we don't get a register of the expected type. */
462 const char *const reg_expected_msgs[] =
463 {
464 N_("ARM register expected"),
465 N_("bad or missing co-processor number"),
466 N_("co-processor register expected"),
467 N_("FPA register expected"),
468 N_("VFP single precision register expected"),
469 N_("VFP/Neon double precision register expected"),
470 N_("Neon quad precision register expected"),
471 N_("VFP single or double precision register expected"),
472 N_("Neon double or quad precision register expected"),
473 N_("VFP single, double or Neon quad precision register expected"),
474 N_("VFP system register expected"),
475 N_("Maverick MVF register expected"),
476 N_("Maverick MVD register expected"),
477 N_("Maverick MVFX register expected"),
478 N_("Maverick MVDX register expected"),
479 N_("Maverick MVAX register expected"),
480 N_("Maverick DSPSC register expected"),
481 N_("iWMMXt data register expected"),
482 N_("iWMMXt control register expected"),
483 N_("iWMMXt scalar register expected"),
484 N_("XScale accumulator register expected"),
485 };
486
487 /* Some well known registers that we refer to directly elsewhere. */
488 #define REG_SP 13
489 #define REG_LR 14
490 #define REG_PC 15
491
492 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 take 2: */
494 #define INSN_SIZE 4
495
496 struct asm_opcode
497 {
498 /* Basic string to match. */
499 const char *template;
500
501 /* Parameters to instruction. */
502 unsigned char operands[8];
503
504 /* Conditional tag - see opcode_lookup. */
505 unsigned int tag : 4;
506
507 /* Basic instruction code. */
508 unsigned int avalue : 28;
509
510 /* Thumb-format instruction code. */
511 unsigned int tvalue;
512
513 /* Which architecture variant provides this instruction. */
514 const arm_feature_set *avariant;
515 const arm_feature_set *tvariant;
516
517 /* Function to call to encode instruction in ARM format. */
518 void (* aencode) (void);
519
520 /* Function to call to encode instruction in Thumb format. */
521 void (* tencode) (void);
522 };
523
524 /* Defines for various bits that we will want to toggle. */
525 #define INST_IMMEDIATE 0x02000000
526 #define OFFSET_REG 0x02000000
527 #define HWOFFSET_IMM 0x00400000
528 #define SHIFT_BY_REG 0x00000010
529 #define PRE_INDEX 0x01000000
530 #define INDEX_UP 0x00800000
531 #define WRITE_BACK 0x00200000
532 #define LDM_TYPE_2_OR_3 0x00400000
533
534 #define LITERAL_MASK 0xf000f000
535 #define OPCODE_MASK 0xfe1fffff
536 #define V4_STR_BIT 0x00000020
537
538 #define DATA_OP_SHIFT 21
539
540 #define T2_OPCODE_MASK 0xfe1fffff
541 #define T2_DATA_OP_SHIFT 21
542
543 /* Codes to distinguish the arithmetic instructions. */
544 #define OPCODE_AND 0
545 #define OPCODE_EOR 1
546 #define OPCODE_SUB 2
547 #define OPCODE_RSB 3
548 #define OPCODE_ADD 4
549 #define OPCODE_ADC 5
550 #define OPCODE_SBC 6
551 #define OPCODE_RSC 7
552 #define OPCODE_TST 8
553 #define OPCODE_TEQ 9
554 #define OPCODE_CMP 10
555 #define OPCODE_CMN 11
556 #define OPCODE_ORR 12
557 #define OPCODE_MOV 13
558 #define OPCODE_BIC 14
559 #define OPCODE_MVN 15
560
561 #define T2_OPCODE_AND 0
562 #define T2_OPCODE_BIC 1
563 #define T2_OPCODE_ORR 2
564 #define T2_OPCODE_ORN 3
565 #define T2_OPCODE_EOR 4
566 #define T2_OPCODE_ADD 8
567 #define T2_OPCODE_ADC 10
568 #define T2_OPCODE_SBC 11
569 #define T2_OPCODE_SUB 13
570 #define T2_OPCODE_RSB 14
571
572 #define T_OPCODE_MUL 0x4340
573 #define T_OPCODE_TST 0x4200
574 #define T_OPCODE_CMN 0x42c0
575 #define T_OPCODE_NEG 0x4240
576 #define T_OPCODE_MVN 0x43c0
577
578 #define T_OPCODE_ADD_R3 0x1800
579 #define T_OPCODE_SUB_R3 0x1a00
580 #define T_OPCODE_ADD_HI 0x4400
581 #define T_OPCODE_ADD_ST 0xb000
582 #define T_OPCODE_SUB_ST 0xb080
583 #define T_OPCODE_ADD_SP 0xa800
584 #define T_OPCODE_ADD_PC 0xa000
585 #define T_OPCODE_ADD_I8 0x3000
586 #define T_OPCODE_SUB_I8 0x3800
587 #define T_OPCODE_ADD_I3 0x1c00
588 #define T_OPCODE_SUB_I3 0x1e00
589
590 #define T_OPCODE_ASR_R 0x4100
591 #define T_OPCODE_LSL_R 0x4080
592 #define T_OPCODE_LSR_R 0x40c0
593 #define T_OPCODE_ROR_R 0x41c0
594 #define T_OPCODE_ASR_I 0x1000
595 #define T_OPCODE_LSL_I 0x0000
596 #define T_OPCODE_LSR_I 0x0800
597
598 #define T_OPCODE_MOV_I8 0x2000
599 #define T_OPCODE_CMP_I8 0x2800
600 #define T_OPCODE_CMP_LR 0x4280
601 #define T_OPCODE_MOV_HR 0x4600
602 #define T_OPCODE_CMP_HR 0x4500
603
604 #define T_OPCODE_LDR_PC 0x4800
605 #define T_OPCODE_LDR_SP 0x9800
606 #define T_OPCODE_STR_SP 0x9000
607 #define T_OPCODE_LDR_IW 0x6800
608 #define T_OPCODE_STR_IW 0x6000
609 #define T_OPCODE_LDR_IH 0x8800
610 #define T_OPCODE_STR_IH 0x8000
611 #define T_OPCODE_LDR_IB 0x7800
612 #define T_OPCODE_STR_IB 0x7000
613 #define T_OPCODE_LDR_RW 0x5800
614 #define T_OPCODE_STR_RW 0x5000
615 #define T_OPCODE_LDR_RH 0x5a00
616 #define T_OPCODE_STR_RH 0x5200
617 #define T_OPCODE_LDR_RB 0x5c00
618 #define T_OPCODE_STR_RB 0x5400
619
620 #define T_OPCODE_PUSH 0xb400
621 #define T_OPCODE_POP 0xbc00
622
623 #define T_OPCODE_BRANCH 0xe000
624
625 #define THUMB_SIZE 2 /* Size of thumb instruction. */
626 #define THUMB_PP_PC_LR 0x0100
627 #define THUMB_LOAD_BIT 0x0800
628 #define THUMB2_LOAD_BIT 0x00100000
629
630 #define BAD_ARGS _("bad arguments to instruction")
631 #define BAD_PC _("r15 not allowed here")
632 #define BAD_COND _("instruction cannot be conditional")
633 #define BAD_OVERLAP _("registers may not be the same")
634 #define BAD_HIREG _("lo register required")
635 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
636 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
637 #define BAD_BRANCH _("branch must be last instruction in IT block")
638 #define BAD_NOT_IT _("instruction not allowed in IT block")
639 #define BAD_FPU _("selected FPU does not support instruction")
640
641 static struct hash_control *arm_ops_hsh;
642 static struct hash_control *arm_cond_hsh;
643 static struct hash_control *arm_shift_hsh;
644 static struct hash_control *arm_psr_hsh;
645 static struct hash_control *arm_v7m_psr_hsh;
646 static struct hash_control *arm_reg_hsh;
647 static struct hash_control *arm_reloc_hsh;
648 static struct hash_control *arm_barrier_opt_hsh;
649
650 /* Stuff needed to resolve the label ambiguity
651 As:
652 ...
653 label: <insn>
654 may differ from:
655 ...
656 label:
657 <insn>
658 */
659
660 symbolS * last_label_seen;
661 static int label_is_thumb_function_name = FALSE;
662 \f
663 /* Literal pool structure. Held on a per-section
664 and per-sub-section basis. */
665
666 #define MAX_LITERAL_POOL_SIZE 1024
667 typedef struct literal_pool
668 {
669 expressionS literals [MAX_LITERAL_POOL_SIZE];
670 unsigned int next_free_entry;
671 unsigned int id;
672 symbolS * symbol;
673 segT section;
674 subsegT sub_section;
675 struct literal_pool * next;
676 } literal_pool;
677
678 /* Pointer to a linked list of literal pools. */
679 literal_pool * list_of_pools = NULL;
680
681 /* State variables for IT block handling. */
682 static bfd_boolean current_it_mask = 0;
683 static int current_cc;
684
685 \f
686 /* Pure syntax. */
687
688 /* This array holds the chars that always start a comment. If the
689 pre-processor is disabled, these aren't very useful. */
690 const char comment_chars[] = "@";
691
692 /* This array holds the chars that only start a comment at the beginning of
693 a line. If the line seems to have the form '# 123 filename'
694 .line and .file directives will appear in the pre-processed output. */
695 /* Note that input_file.c hand checks for '#' at the beginning of the
696 first line of the input file. This is because the compiler outputs
697 #NO_APP at the beginning of its output. */
698 /* Also note that comments like this one will always work. */
699 const char line_comment_chars[] = "#";
700
701 const char line_separator_chars[] = ";";
702
703 /* Chars that can be used to separate mant
704 from exp in floating point numbers. */
705 const char EXP_CHARS[] = "eE";
706
707 /* Chars that mean this number is a floating point constant. */
708 /* As in 0f12.456 */
709 /* or 0d1.2345e12 */
710
711 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
712
713 /* Prefix characters that indicate the start of an immediate
714 value. */
715 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
716
717 /* Separator character handling. */
718
719 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
720
721 static inline int
722 skip_past_char (char ** str, char c)
723 {
724 if (**str == c)
725 {
726 (*str)++;
727 return SUCCESS;
728 }
729 else
730 return FAIL;
731 }
732 #define skip_past_comma(str) skip_past_char (str, ',')
733
734 /* Arithmetic expressions (possibly involving symbols). */
735
736 /* Return TRUE if anything in the expression is a bignum. */
737
738 static int
739 walk_no_bignums (symbolS * sp)
740 {
741 if (symbol_get_value_expression (sp)->X_op == O_big)
742 return 1;
743
744 if (symbol_get_value_expression (sp)->X_add_symbol)
745 {
746 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
747 || (symbol_get_value_expression (sp)->X_op_symbol
748 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
749 }
750
751 return 0;
752 }
753
754 static int in_my_get_expression = 0;
755
756 /* Third argument to my_get_expression. */
757 #define GE_NO_PREFIX 0
758 #define GE_IMM_PREFIX 1
759 #define GE_OPT_PREFIX 2
760 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
761 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
762 #define GE_OPT_PREFIX_BIG 3
763
764 static int
765 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
766 {
767 char * save_in;
768 segT seg;
769
770 /* In unified syntax, all prefixes are optional. */
771 if (unified_syntax)
772 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
773 : GE_OPT_PREFIX;
774
775 switch (prefix_mode)
776 {
777 case GE_NO_PREFIX: break;
778 case GE_IMM_PREFIX:
779 if (!is_immediate_prefix (**str))
780 {
781 inst.error = _("immediate expression requires a # prefix");
782 return FAIL;
783 }
784 (*str)++;
785 break;
786 case GE_OPT_PREFIX:
787 case GE_OPT_PREFIX_BIG:
788 if (is_immediate_prefix (**str))
789 (*str)++;
790 break;
791 default: abort ();
792 }
793
794 memset (ep, 0, sizeof (expressionS));
795
796 save_in = input_line_pointer;
797 input_line_pointer = *str;
798 in_my_get_expression = 1;
799 seg = expression (ep);
800 in_my_get_expression = 0;
801
802 if (ep->X_op == O_illegal)
803 {
804 /* We found a bad expression in md_operand(). */
805 *str = input_line_pointer;
806 input_line_pointer = save_in;
807 if (inst.error == NULL)
808 inst.error = _("bad expression");
809 return 1;
810 }
811
812 #ifdef OBJ_AOUT
813 if (seg != absolute_section
814 && seg != text_section
815 && seg != data_section
816 && seg != bss_section
817 && seg != undefined_section)
818 {
819 inst.error = _("bad segment");
820 *str = input_line_pointer;
821 input_line_pointer = save_in;
822 return 1;
823 }
824 #endif
825
826 /* Get rid of any bignums now, so that we don't generate an error for which
827 we can't establish a line number later on. Big numbers are never valid
828 in instructions, which is where this routine is always called. */
829 if (prefix_mode != GE_OPT_PREFIX_BIG
830 && (ep->X_op == O_big
831 || (ep->X_add_symbol
832 && (walk_no_bignums (ep->X_add_symbol)
833 || (ep->X_op_symbol
834 && walk_no_bignums (ep->X_op_symbol))))))
835 {
836 inst.error = _("invalid constant");
837 *str = input_line_pointer;
838 input_line_pointer = save_in;
839 return 1;
840 }
841
842 *str = input_line_pointer;
843 input_line_pointer = save_in;
844 return 0;
845 }
846
847 /* Turn a string in input_line_pointer into a floating point constant
848 of type TYPE, and store the appropriate bytes in *LITP. The number
849 of LITTLENUMS emitted is stored in *SIZEP. An error message is
850 returned, or NULL on OK.
851
852 Note that fp constants aren't represent in the normal way on the ARM.
853 In big endian mode, things are as expected. However, in little endian
854 mode fp constants are big-endian word-wise, and little-endian byte-wise
855 within the words. For example, (double) 1.1 in big endian mode is
856 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
857 the byte sequence 99 99 f1 3f 9a 99 99 99.
858
859 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
860
861 char *
862 md_atof (int type, char * litP, int * sizeP)
863 {
864 int prec;
865 LITTLENUM_TYPE words[MAX_LITTLENUMS];
866 char *t;
867 int i;
868
869 switch (type)
870 {
871 case 'f':
872 case 'F':
873 case 's':
874 case 'S':
875 prec = 2;
876 break;
877
878 case 'd':
879 case 'D':
880 case 'r':
881 case 'R':
882 prec = 4;
883 break;
884
885 case 'x':
886 case 'X':
887 prec = 6;
888 break;
889
890 case 'p':
891 case 'P':
892 prec = 6;
893 break;
894
895 default:
896 *sizeP = 0;
897 return _("bad call to MD_ATOF()");
898 }
899
900 t = atof_ieee (input_line_pointer, type, words);
901 if (t)
902 input_line_pointer = t;
903 *sizeP = prec * 2;
904
905 if (target_big_endian)
906 {
907 for (i = 0; i < prec; i++)
908 {
909 md_number_to_chars (litP, (valueT) words[i], 2);
910 litP += 2;
911 }
912 }
913 else
914 {
915 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
916 for (i = prec - 1; i >= 0; i--)
917 {
918 md_number_to_chars (litP, (valueT) words[i], 2);
919 litP += 2;
920 }
921 else
922 /* For a 4 byte float the order of elements in `words' is 1 0.
923 For an 8 byte float the order is 1 0 3 2. */
924 for (i = 0; i < prec; i += 2)
925 {
926 md_number_to_chars (litP, (valueT) words[i + 1], 2);
927 md_number_to_chars (litP + 2, (valueT) words[i], 2);
928 litP += 4;
929 }
930 }
931
932 return 0;
933 }
934
935 /* We handle all bad expressions here, so that we can report the faulty
936 instruction in the error message. */
937 void
938 md_operand (expressionS * expr)
939 {
940 if (in_my_get_expression)
941 expr->X_op = O_illegal;
942 }
943
944 /* Immediate values. */
945
946 /* Generic immediate-value read function for use in directives.
947 Accepts anything that 'expression' can fold to a constant.
948 *val receives the number. */
949 #ifdef OBJ_ELF
950 static int
951 immediate_for_directive (int *val)
952 {
953 expressionS exp;
954 exp.X_op = O_illegal;
955
956 if (is_immediate_prefix (*input_line_pointer))
957 {
958 input_line_pointer++;
959 expression (&exp);
960 }
961
962 if (exp.X_op != O_constant)
963 {
964 as_bad (_("expected #constant"));
965 ignore_rest_of_line ();
966 return FAIL;
967 }
968 *val = exp.X_add_number;
969 return SUCCESS;
970 }
971 #endif
972
973 /* Register parsing. */
974
975 /* Generic register parser. CCP points to what should be the
976 beginning of a register name. If it is indeed a valid register
977 name, advance CCP over it and return the reg_entry structure;
978 otherwise return NULL. Does not issue diagnostics. */
979
980 static struct reg_entry *
981 arm_reg_parse_multi (char **ccp)
982 {
983 char *start = *ccp;
984 char *p;
985 struct reg_entry *reg;
986
987 #ifdef REGISTER_PREFIX
988 if (*start != REGISTER_PREFIX)
989 return NULL;
990 start++;
991 #endif
992 #ifdef OPTIONAL_REGISTER_PREFIX
993 if (*start == OPTIONAL_REGISTER_PREFIX)
994 start++;
995 #endif
996
997 p = start;
998 if (!ISALPHA (*p) || !is_name_beginner (*p))
999 return NULL;
1000
1001 do
1002 p++;
1003 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1004
1005 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1006
1007 if (!reg)
1008 return NULL;
1009
1010 *ccp = p;
1011 return reg;
1012 }
1013
1014 static int
1015 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1016 enum arm_reg_type type)
1017 {
1018 /* Alternative syntaxes are accepted for a few register classes. */
1019 switch (type)
1020 {
1021 case REG_TYPE_MVF:
1022 case REG_TYPE_MVD:
1023 case REG_TYPE_MVFX:
1024 case REG_TYPE_MVDX:
1025 /* Generic coprocessor register names are allowed for these. */
1026 if (reg && reg->type == REG_TYPE_CN)
1027 return reg->number;
1028 break;
1029
1030 case REG_TYPE_CP:
1031 /* For backward compatibility, a bare number is valid here. */
1032 {
1033 unsigned long processor = strtoul (start, ccp, 10);
1034 if (*ccp != start && processor <= 15)
1035 return processor;
1036 }
1037
1038 case REG_TYPE_MMXWC:
1039 /* WC includes WCG. ??? I'm not sure this is true for all
1040 instructions that take WC registers. */
1041 if (reg && reg->type == REG_TYPE_MMXWCG)
1042 return reg->number;
1043 break;
1044
1045 default:
1046 break;
1047 }
1048
1049 return FAIL;
1050 }
1051
1052 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1053 return value is the register number or FAIL. */
1054
1055 static int
1056 arm_reg_parse (char **ccp, enum arm_reg_type type)
1057 {
1058 char *start = *ccp;
1059 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1060 int ret;
1061
1062 /* Do not allow a scalar (reg+index) to parse as a register. */
1063 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1064 return FAIL;
1065
1066 if (reg && reg->type == type)
1067 return reg->number;
1068
1069 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1070 return ret;
1071
1072 *ccp = start;
1073 return FAIL;
1074 }
1075
1076 /* Parse a Neon type specifier. *STR should point at the leading '.'
1077 character. Does no verification at this stage that the type fits the opcode
1078 properly. E.g.,
1079
1080 .i32.i32.s16
1081 .s32.f32
1082 .u16
1083
1084 Can all be legally parsed by this function.
1085
1086 Fills in neon_type struct pointer with parsed information, and updates STR
1087 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1088 type, FAIL if not. */
1089
1090 static int
1091 parse_neon_type (struct neon_type *type, char **str)
1092 {
1093 char *ptr = *str;
1094
1095 if (type)
1096 type->elems = 0;
1097
1098 while (type->elems < NEON_MAX_TYPE_ELS)
1099 {
1100 enum neon_el_type thistype = NT_untyped;
1101 unsigned thissize = -1u;
1102
1103 if (*ptr != '.')
1104 break;
1105
1106 ptr++;
1107
1108 /* Just a size without an explicit type. */
1109 if (ISDIGIT (*ptr))
1110 goto parsesize;
1111
1112 switch (TOLOWER (*ptr))
1113 {
1114 case 'i': thistype = NT_integer; break;
1115 case 'f': thistype = NT_float; break;
1116 case 'p': thistype = NT_poly; break;
1117 case 's': thistype = NT_signed; break;
1118 case 'u': thistype = NT_unsigned; break;
1119 case 'd':
1120 thistype = NT_float;
1121 thissize = 64;
1122 ptr++;
1123 goto done;
1124 default:
1125 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1126 return FAIL;
1127 }
1128
1129 ptr++;
1130
1131 /* .f is an abbreviation for .f32. */
1132 if (thistype == NT_float && !ISDIGIT (*ptr))
1133 thissize = 32;
1134 else
1135 {
1136 parsesize:
1137 thissize = strtoul (ptr, &ptr, 10);
1138
1139 if (thissize != 8 && thissize != 16 && thissize != 32
1140 && thissize != 64)
1141 {
1142 as_bad (_("bad size %d in type specifier"), thissize);
1143 return FAIL;
1144 }
1145 }
1146
1147 done:
1148 if (type)
1149 {
1150 type->el[type->elems].type = thistype;
1151 type->el[type->elems].size = thissize;
1152 type->elems++;
1153 }
1154 }
1155
1156 /* Empty/missing type is not a successful parse. */
1157 if (type->elems == 0)
1158 return FAIL;
1159
1160 *str = ptr;
1161
1162 return SUCCESS;
1163 }
1164
1165 /* Errors may be set multiple times during parsing or bit encoding
1166 (particularly in the Neon bits), but usually the earliest error which is set
1167 will be the most meaningful. Avoid overwriting it with later (cascading)
1168 errors by calling this function. */
1169
1170 static void
1171 first_error (const char *err)
1172 {
1173 if (!inst.error)
1174 inst.error = err;
1175 }
1176
1177 /* Parse a single type, e.g. ".s32", leading period included. */
1178 static int
1179 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1180 {
1181 char *str = *ccp;
1182 struct neon_type optype;
1183
1184 if (*str == '.')
1185 {
1186 if (parse_neon_type (&optype, &str) == SUCCESS)
1187 {
1188 if (optype.elems == 1)
1189 *vectype = optype.el[0];
1190 else
1191 {
1192 first_error (_("only one type should be specified for operand"));
1193 return FAIL;
1194 }
1195 }
1196 else
1197 {
1198 first_error (_("vector type expected"));
1199 return FAIL;
1200 }
1201 }
1202 else
1203 return FAIL;
1204
1205 *ccp = str;
1206
1207 return SUCCESS;
1208 }
1209
1210 /* Special meanings for indices (which have a range of 0-7), which will fit into
1211 a 4-bit integer. */
1212
1213 #define NEON_ALL_LANES 15
1214 #define NEON_INTERLEAVE_LANES 14
1215
1216 /* Parse either a register or a scalar, with an optional type. Return the
1217 register number, and optionally fill in the actual type of the register
1218 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1219 type/index information in *TYPEINFO. */
1220
1221 static int
1222 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1223 enum arm_reg_type *rtype,
1224 struct neon_typed_alias *typeinfo)
1225 {
1226 char *str = *ccp;
1227 struct reg_entry *reg = arm_reg_parse_multi (&str);
1228 struct neon_typed_alias atype;
1229 struct neon_type_el parsetype;
1230
1231 atype.defined = 0;
1232 atype.index = -1;
1233 atype.eltype.type = NT_invtype;
1234 atype.eltype.size = -1;
1235
1236 /* Try alternate syntax for some types of register. Note these are mutually
1237 exclusive with the Neon syntax extensions. */
1238 if (reg == NULL)
1239 {
1240 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1241 if (altreg != FAIL)
1242 *ccp = str;
1243 if (typeinfo)
1244 *typeinfo = atype;
1245 return altreg;
1246 }
1247
1248 /* Undo polymorphism when a set of register types may be accepted. */
1249 if ((type == REG_TYPE_NDQ
1250 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1251 || (type == REG_TYPE_VFSD
1252 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1253 || (type == REG_TYPE_NSDQ
1254 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1255 || reg->type == REG_TYPE_NQ)))
1256 type = reg->type;
1257
1258 if (type != reg->type)
1259 return FAIL;
1260
1261 if (reg->neon)
1262 atype = *reg->neon;
1263
1264 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1265 {
1266 if ((atype.defined & NTA_HASTYPE) != 0)
1267 {
1268 first_error (_("can't redefine type for operand"));
1269 return FAIL;
1270 }
1271 atype.defined |= NTA_HASTYPE;
1272 atype.eltype = parsetype;
1273 }
1274
1275 if (skip_past_char (&str, '[') == SUCCESS)
1276 {
1277 if (type != REG_TYPE_VFD)
1278 {
1279 first_error (_("only D registers may be indexed"));
1280 return FAIL;
1281 }
1282
1283 if ((atype.defined & NTA_HASINDEX) != 0)
1284 {
1285 first_error (_("can't change index for operand"));
1286 return FAIL;
1287 }
1288
1289 atype.defined |= NTA_HASINDEX;
1290
1291 if (skip_past_char (&str, ']') == SUCCESS)
1292 atype.index = NEON_ALL_LANES;
1293 else
1294 {
1295 expressionS exp;
1296
1297 my_get_expression (&exp, &str, GE_NO_PREFIX);
1298
1299 if (exp.X_op != O_constant)
1300 {
1301 first_error (_("constant expression required"));
1302 return FAIL;
1303 }
1304
1305 if (skip_past_char (&str, ']') == FAIL)
1306 return FAIL;
1307
1308 atype.index = exp.X_add_number;
1309 }
1310 }
1311
1312 if (typeinfo)
1313 *typeinfo = atype;
1314
1315 if (rtype)
1316 *rtype = type;
1317
1318 *ccp = str;
1319
1320 return reg->number;
1321 }
1322
1323 /* Like arm_reg_parse, but allow allow the following extra features:
1324 - If RTYPE is non-zero, return the (possibly restricted) type of the
1325 register (e.g. Neon double or quad reg when either has been requested).
1326 - If this is a Neon vector type with additional type information, fill
1327 in the struct pointed to by VECTYPE (if non-NULL).
1328 This function will fault on encountering a scalar.
1329 */
1330
1331 static int
1332 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1333 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1334 {
1335 struct neon_typed_alias atype;
1336 char *str = *ccp;
1337 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1338
1339 if (reg == FAIL)
1340 return FAIL;
1341
1342 /* Do not allow a scalar (reg+index) to parse as a register. */
1343 if ((atype.defined & NTA_HASINDEX) != 0)
1344 {
1345 first_error (_("register operand expected, but got scalar"));
1346 return FAIL;
1347 }
1348
1349 if (vectype)
1350 *vectype = atype.eltype;
1351
1352 *ccp = str;
1353
1354 return reg;
1355 }
1356
1357 #define NEON_SCALAR_REG(X) ((X) >> 4)
1358 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1359
1360 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1361 have enough information to be able to do a good job bounds-checking. So, we
1362 just do easy checks here, and do further checks later. */
1363
1364 static int
1365 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1366 {
1367 int reg;
1368 char *str = *ccp;
1369 struct neon_typed_alias atype;
1370
1371 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1372
1373 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1374 return FAIL;
1375
1376 if (atype.index == NEON_ALL_LANES)
1377 {
1378 first_error (_("scalar must have an index"));
1379 return FAIL;
1380 }
1381 else if (atype.index >= 64 / elsize)
1382 {
1383 first_error (_("scalar index out of range"));
1384 return FAIL;
1385 }
1386
1387 if (type)
1388 *type = atype.eltype;
1389
1390 *ccp = str;
1391
1392 return reg * 16 + atype.index;
1393 }
1394
1395 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1396 static long
1397 parse_reg_list (char ** strp)
1398 {
1399 char * str = * strp;
1400 long range = 0;
1401 int another_range;
1402
1403 /* We come back here if we get ranges concatenated by '+' or '|'. */
1404 do
1405 {
1406 another_range = 0;
1407
1408 if (*str == '{')
1409 {
1410 int in_range = 0;
1411 int cur_reg = -1;
1412
1413 str++;
1414 do
1415 {
1416 int reg;
1417
1418 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1419 {
1420 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1421 return FAIL;
1422 }
1423
1424 if (in_range)
1425 {
1426 int i;
1427
1428 if (reg <= cur_reg)
1429 {
1430 first_error (_("bad range in register list"));
1431 return FAIL;
1432 }
1433
1434 for (i = cur_reg + 1; i < reg; i++)
1435 {
1436 if (range & (1 << i))
1437 as_tsktsk
1438 (_("Warning: duplicated register (r%d) in register list"),
1439 i);
1440 else
1441 range |= 1 << i;
1442 }
1443 in_range = 0;
1444 }
1445
1446 if (range & (1 << reg))
1447 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1448 reg);
1449 else if (reg <= cur_reg)
1450 as_tsktsk (_("Warning: register range not in ascending order"));
1451
1452 range |= 1 << reg;
1453 cur_reg = reg;
1454 }
1455 while (skip_past_comma (&str) != FAIL
1456 || (in_range = 1, *str++ == '-'));
1457 str--;
1458
1459 if (*str++ != '}')
1460 {
1461 first_error (_("missing `}'"));
1462 return FAIL;
1463 }
1464 }
1465 else
1466 {
1467 expressionS expr;
1468
1469 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1470 return FAIL;
1471
1472 if (expr.X_op == O_constant)
1473 {
1474 if (expr.X_add_number
1475 != (expr.X_add_number & 0x0000ffff))
1476 {
1477 inst.error = _("invalid register mask");
1478 return FAIL;
1479 }
1480
1481 if ((range & expr.X_add_number) != 0)
1482 {
1483 int regno = range & expr.X_add_number;
1484
1485 regno &= -regno;
1486 regno = (1 << regno) - 1;
1487 as_tsktsk
1488 (_("Warning: duplicated register (r%d) in register list"),
1489 regno);
1490 }
1491
1492 range |= expr.X_add_number;
1493 }
1494 else
1495 {
1496 if (inst.reloc.type != 0)
1497 {
1498 inst.error = _("expression too complex");
1499 return FAIL;
1500 }
1501
1502 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1503 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1504 inst.reloc.pc_rel = 0;
1505 }
1506 }
1507
1508 if (*str == '|' || *str == '+')
1509 {
1510 str++;
1511 another_range = 1;
1512 }
1513 }
1514 while (another_range);
1515
1516 *strp = str;
1517 return range;
1518 }
1519
1520 /* Types of registers in a list. */
1521
1522 enum reg_list_els
1523 {
1524 REGLIST_VFP_S,
1525 REGLIST_VFP_D,
1526 REGLIST_NEON_D
1527 };
1528
1529 /* Parse a VFP register list. If the string is invalid return FAIL.
1530 Otherwise return the number of registers, and set PBASE to the first
1531 register. Parses registers of type ETYPE.
1532 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1533 - Q registers can be used to specify pairs of D registers
1534 - { } can be omitted from around a singleton register list
1535 FIXME: This is not implemented, as it would require backtracking in
1536 some cases, e.g.:
1537 vtbl.8 d3,d4,d5
1538 This could be done (the meaning isn't really ambiguous), but doesn't
1539 fit in well with the current parsing framework.
1540 - 32 D registers may be used (also true for VFPv3).
1541 FIXME: Types are ignored in these register lists, which is probably a
1542 bug. */
1543
1544 static int
1545 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1546 {
1547 char *str = *ccp;
1548 int base_reg;
1549 int new_base;
1550 enum arm_reg_type regtype = 0;
1551 int max_regs = 0;
1552 int count = 0;
1553 int warned = 0;
1554 unsigned long mask = 0;
1555 int i;
1556
1557 if (*str != '{')
1558 {
1559 inst.error = _("expecting {");
1560 return FAIL;
1561 }
1562
1563 str++;
1564
1565 switch (etype)
1566 {
1567 case REGLIST_VFP_S:
1568 regtype = REG_TYPE_VFS;
1569 max_regs = 32;
1570 break;
1571
1572 case REGLIST_VFP_D:
1573 regtype = REG_TYPE_VFD;
1574 break;
1575
1576 case REGLIST_NEON_D:
1577 regtype = REG_TYPE_NDQ;
1578 break;
1579 }
1580
1581 if (etype != REGLIST_VFP_S)
1582 {
1583 /* VFPv3 allows 32 D registers. */
1584 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1585 {
1586 max_regs = 32;
1587 if (thumb_mode)
1588 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1589 fpu_vfp_ext_v3);
1590 else
1591 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1592 fpu_vfp_ext_v3);
1593 }
1594 else
1595 max_regs = 16;
1596 }
1597
1598 base_reg = max_regs;
1599
1600 do
1601 {
1602 int setmask = 1, addregs = 1;
1603
1604 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1605
1606 if (new_base == FAIL)
1607 {
1608 first_error (_(reg_expected_msgs[regtype]));
1609 return FAIL;
1610 }
1611
1612 if (new_base >= max_regs)
1613 {
1614 first_error (_("register out of range in list"));
1615 return FAIL;
1616 }
1617
1618 /* Note: a value of 2 * n is returned for the register Q<n>. */
1619 if (regtype == REG_TYPE_NQ)
1620 {
1621 setmask = 3;
1622 addregs = 2;
1623 }
1624
1625 if (new_base < base_reg)
1626 base_reg = new_base;
1627
1628 if (mask & (setmask << new_base))
1629 {
1630 first_error (_("invalid register list"));
1631 return FAIL;
1632 }
1633
1634 if ((mask >> new_base) != 0 && ! warned)
1635 {
1636 as_tsktsk (_("register list not in ascending order"));
1637 warned = 1;
1638 }
1639
1640 mask |= setmask << new_base;
1641 count += addregs;
1642
1643 if (*str == '-') /* We have the start of a range expression */
1644 {
1645 int high_range;
1646
1647 str++;
1648
1649 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1650 == FAIL)
1651 {
1652 inst.error = gettext (reg_expected_msgs[regtype]);
1653 return FAIL;
1654 }
1655
1656 if (high_range >= max_regs)
1657 {
1658 first_error (_("register out of range in list"));
1659 return FAIL;
1660 }
1661
1662 if (regtype == REG_TYPE_NQ)
1663 high_range = high_range + 1;
1664
1665 if (high_range <= new_base)
1666 {
1667 inst.error = _("register range not in ascending order");
1668 return FAIL;
1669 }
1670
1671 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1672 {
1673 if (mask & (setmask << new_base))
1674 {
1675 inst.error = _("invalid register list");
1676 return FAIL;
1677 }
1678
1679 mask |= setmask << new_base;
1680 count += addregs;
1681 }
1682 }
1683 }
1684 while (skip_past_comma (&str) != FAIL);
1685
1686 str++;
1687
1688 /* Sanity check -- should have raised a parse error above. */
1689 if (count == 0 || count > max_regs)
1690 abort ();
1691
1692 *pbase = base_reg;
1693
1694 /* Final test -- the registers must be consecutive. */
1695 mask >>= base_reg;
1696 for (i = 0; i < count; i++)
1697 {
1698 if ((mask & (1u << i)) == 0)
1699 {
1700 inst.error = _("non-contiguous register range");
1701 return FAIL;
1702 }
1703 }
1704
1705 *ccp = str;
1706
1707 return count;
1708 }
1709
1710 /* True if two alias types are the same. */
1711
1712 static int
1713 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1714 {
1715 if (!a && !b)
1716 return 1;
1717
1718 if (!a || !b)
1719 return 0;
1720
1721 if (a->defined != b->defined)
1722 return 0;
1723
1724 if ((a->defined & NTA_HASTYPE) != 0
1725 && (a->eltype.type != b->eltype.type
1726 || a->eltype.size != b->eltype.size))
1727 return 0;
1728
1729 if ((a->defined & NTA_HASINDEX) != 0
1730 && (a->index != b->index))
1731 return 0;
1732
1733 return 1;
1734 }
1735
1736 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1737 The base register is put in *PBASE.
1738 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1739 the return value.
1740 The register stride (minus one) is put in bit 4 of the return value.
1741 Bits [6:5] encode the list length (minus one).
1742 The type of the list elements is put in *ELTYPE, if non-NULL. */
1743
1744 #define NEON_LANE(X) ((X) & 0xf)
1745 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1746 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1747
1748 static int
1749 parse_neon_el_struct_list (char **str, unsigned *pbase,
1750 struct neon_type_el *eltype)
1751 {
1752 char *ptr = *str;
1753 int base_reg = -1;
1754 int reg_incr = -1;
1755 int count = 0;
1756 int lane = -1;
1757 int leading_brace = 0;
1758 enum arm_reg_type rtype = REG_TYPE_NDQ;
1759 int addregs = 1;
1760 const char *const incr_error = "register stride must be 1 or 2";
1761 const char *const type_error = "mismatched element/structure types in list";
1762 struct neon_typed_alias firsttype;
1763
1764 if (skip_past_char (&ptr, '{') == SUCCESS)
1765 leading_brace = 1;
1766
1767 do
1768 {
1769 struct neon_typed_alias atype;
1770 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1771
1772 if (getreg == FAIL)
1773 {
1774 first_error (_(reg_expected_msgs[rtype]));
1775 return FAIL;
1776 }
1777
1778 if (base_reg == -1)
1779 {
1780 base_reg = getreg;
1781 if (rtype == REG_TYPE_NQ)
1782 {
1783 reg_incr = 1;
1784 addregs = 2;
1785 }
1786 firsttype = atype;
1787 }
1788 else if (reg_incr == -1)
1789 {
1790 reg_incr = getreg - base_reg;
1791 if (reg_incr < 1 || reg_incr > 2)
1792 {
1793 first_error (_(incr_error));
1794 return FAIL;
1795 }
1796 }
1797 else if (getreg != base_reg + reg_incr * count)
1798 {
1799 first_error (_(incr_error));
1800 return FAIL;
1801 }
1802
1803 if (!neon_alias_types_same (&atype, &firsttype))
1804 {
1805 first_error (_(type_error));
1806 return FAIL;
1807 }
1808
1809 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1810 modes. */
1811 if (ptr[0] == '-')
1812 {
1813 struct neon_typed_alias htype;
1814 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1815 if (lane == -1)
1816 lane = NEON_INTERLEAVE_LANES;
1817 else if (lane != NEON_INTERLEAVE_LANES)
1818 {
1819 first_error (_(type_error));
1820 return FAIL;
1821 }
1822 if (reg_incr == -1)
1823 reg_incr = 1;
1824 else if (reg_incr != 1)
1825 {
1826 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1827 return FAIL;
1828 }
1829 ptr++;
1830 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1831 if (hireg == FAIL)
1832 {
1833 first_error (_(reg_expected_msgs[rtype]));
1834 return FAIL;
1835 }
1836 if (!neon_alias_types_same (&htype, &firsttype))
1837 {
1838 first_error (_(type_error));
1839 return FAIL;
1840 }
1841 count += hireg + dregs - getreg;
1842 continue;
1843 }
1844
1845 /* If we're using Q registers, we can't use [] or [n] syntax. */
1846 if (rtype == REG_TYPE_NQ)
1847 {
1848 count += 2;
1849 continue;
1850 }
1851
1852 if ((atype.defined & NTA_HASINDEX) != 0)
1853 {
1854 if (lane == -1)
1855 lane = atype.index;
1856 else if (lane != atype.index)
1857 {
1858 first_error (_(type_error));
1859 return FAIL;
1860 }
1861 }
1862 else if (lane == -1)
1863 lane = NEON_INTERLEAVE_LANES;
1864 else if (lane != NEON_INTERLEAVE_LANES)
1865 {
1866 first_error (_(type_error));
1867 return FAIL;
1868 }
1869 count++;
1870 }
1871 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1872
1873 /* No lane set by [x]. We must be interleaving structures. */
1874 if (lane == -1)
1875 lane = NEON_INTERLEAVE_LANES;
1876
1877 /* Sanity check. */
1878 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1879 || (count > 1 && reg_incr == -1))
1880 {
1881 first_error (_("error parsing element/structure list"));
1882 return FAIL;
1883 }
1884
1885 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1886 {
1887 first_error (_("expected }"));
1888 return FAIL;
1889 }
1890
1891 if (reg_incr == -1)
1892 reg_incr = 1;
1893
1894 if (eltype)
1895 *eltype = firsttype.eltype;
1896
1897 *pbase = base_reg;
1898 *str = ptr;
1899
1900 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1901 }
1902
1903 /* Parse an explicit relocation suffix on an expression. This is
1904 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1905 arm_reloc_hsh contains no entries, so this function can only
1906 succeed if there is no () after the word. Returns -1 on error,
1907 BFD_RELOC_UNUSED if there wasn't any suffix. */
1908 static int
1909 parse_reloc (char **str)
1910 {
1911 struct reloc_entry *r;
1912 char *p, *q;
1913
1914 if (**str != '(')
1915 return BFD_RELOC_UNUSED;
1916
1917 p = *str + 1;
1918 q = p;
1919
1920 while (*q && *q != ')' && *q != ',')
1921 q++;
1922 if (*q != ')')
1923 return -1;
1924
1925 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1926 return -1;
1927
1928 *str = q + 1;
1929 return r->reloc;
1930 }
1931
1932 /* Directives: register aliases. */
1933
1934 static struct reg_entry *
1935 insert_reg_alias (char *str, int number, int type)
1936 {
1937 struct reg_entry *new;
1938 const char *name;
1939
1940 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1941 {
1942 if (new->builtin)
1943 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1944
1945 /* Only warn about a redefinition if it's not defined as the
1946 same register. */
1947 else if (new->number != number || new->type != type)
1948 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1949
1950 return 0;
1951 }
1952
1953 name = xstrdup (str);
1954 new = xmalloc (sizeof (struct reg_entry));
1955
1956 new->name = name;
1957 new->number = number;
1958 new->type = type;
1959 new->builtin = FALSE;
1960 new->neon = NULL;
1961
1962 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1963 abort ();
1964
1965 return new;
1966 }
1967
1968 static void
1969 insert_neon_reg_alias (char *str, int number, int type,
1970 struct neon_typed_alias *atype)
1971 {
1972 struct reg_entry *reg = insert_reg_alias (str, number, type);
1973
1974 if (!reg)
1975 {
1976 first_error (_("attempt to redefine typed alias"));
1977 return;
1978 }
1979
1980 if (atype)
1981 {
1982 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1983 *reg->neon = *atype;
1984 }
1985 }
1986
1987 /* Look for the .req directive. This is of the form:
1988
1989 new_register_name .req existing_register_name
1990
1991 If we find one, or if it looks sufficiently like one that we want to
1992 handle any error here, return non-zero. Otherwise return zero. */
1993
1994 static int
1995 create_register_alias (char * newname, char *p)
1996 {
1997 struct reg_entry *old;
1998 char *oldname, *nbuf;
1999 size_t nlen;
2000
2001 /* The input scrubber ensures that whitespace after the mnemonic is
2002 collapsed to single spaces. */
2003 oldname = p;
2004 if (strncmp (oldname, " .req ", 6) != 0)
2005 return 0;
2006
2007 oldname += 6;
2008 if (*oldname == '\0')
2009 return 0;
2010
2011 old = hash_find (arm_reg_hsh, oldname);
2012 if (!old)
2013 {
2014 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2015 return 1;
2016 }
2017
2018 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2019 the desired alias name, and p points to its end. If not, then
2020 the desired alias name is in the global original_case_string. */
2021 #ifdef TC_CASE_SENSITIVE
2022 nlen = p - newname;
2023 #else
2024 newname = original_case_string;
2025 nlen = strlen (newname);
2026 #endif
2027
2028 nbuf = alloca (nlen + 1);
2029 memcpy (nbuf, newname, nlen);
2030 nbuf[nlen] = '\0';
2031
2032 /* Create aliases under the new name as stated; an all-lowercase
2033 version of the new name; and an all-uppercase version of the new
2034 name. */
2035 insert_reg_alias (nbuf, old->number, old->type);
2036
2037 for (p = nbuf; *p; p++)
2038 *p = TOUPPER (*p);
2039
2040 if (strncmp (nbuf, newname, nlen))
2041 insert_reg_alias (nbuf, old->number, old->type);
2042
2043 for (p = nbuf; *p; p++)
2044 *p = TOLOWER (*p);
2045
2046 if (strncmp (nbuf, newname, nlen))
2047 insert_reg_alias (nbuf, old->number, old->type);
2048
2049 return 1;
2050 }
2051
2052 /* Create a Neon typed/indexed register alias using directives, e.g.:
2053 X .dn d5.s32[1]
2054 Y .qn 6.s16
2055 Z .dn d7
2056 T .dn Z[0]
2057 These typed registers can be used instead of the types specified after the
2058 Neon mnemonic, so long as all operands given have types. Types can also be
2059 specified directly, e.g.:
2060 vadd d0.s32, d1.s32, d2.s32
2061 */
2062
2063 static int
2064 create_neon_reg_alias (char *newname, char *p)
2065 {
2066 enum arm_reg_type basetype;
2067 struct reg_entry *basereg;
2068 struct reg_entry mybasereg;
2069 struct neon_type ntype;
2070 struct neon_typed_alias typeinfo;
2071 char *namebuf, *nameend;
2072 int namelen;
2073
2074 typeinfo.defined = 0;
2075 typeinfo.eltype.type = NT_invtype;
2076 typeinfo.eltype.size = -1;
2077 typeinfo.index = -1;
2078
2079 nameend = p;
2080
2081 if (strncmp (p, " .dn ", 5) == 0)
2082 basetype = REG_TYPE_VFD;
2083 else if (strncmp (p, " .qn ", 5) == 0)
2084 basetype = REG_TYPE_NQ;
2085 else
2086 return 0;
2087
2088 p += 5;
2089
2090 if (*p == '\0')
2091 return 0;
2092
2093 basereg = arm_reg_parse_multi (&p);
2094
2095 if (basereg && basereg->type != basetype)
2096 {
2097 as_bad (_("bad type for register"));
2098 return 0;
2099 }
2100
2101 if (basereg == NULL)
2102 {
2103 expressionS exp;
2104 /* Try parsing as an integer. */
2105 my_get_expression (&exp, &p, GE_NO_PREFIX);
2106 if (exp.X_op != O_constant)
2107 {
2108 as_bad (_("expression must be constant"));
2109 return 0;
2110 }
2111 basereg = &mybasereg;
2112 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2113 : exp.X_add_number;
2114 basereg->neon = 0;
2115 }
2116
2117 if (basereg->neon)
2118 typeinfo = *basereg->neon;
2119
2120 if (parse_neon_type (&ntype, &p) == SUCCESS)
2121 {
2122 /* We got a type. */
2123 if (typeinfo.defined & NTA_HASTYPE)
2124 {
2125 as_bad (_("can't redefine the type of a register alias"));
2126 return 0;
2127 }
2128
2129 typeinfo.defined |= NTA_HASTYPE;
2130 if (ntype.elems != 1)
2131 {
2132 as_bad (_("you must specify a single type only"));
2133 return 0;
2134 }
2135 typeinfo.eltype = ntype.el[0];
2136 }
2137
2138 if (skip_past_char (&p, '[') == SUCCESS)
2139 {
2140 expressionS exp;
2141 /* We got a scalar index. */
2142
2143 if (typeinfo.defined & NTA_HASINDEX)
2144 {
2145 as_bad (_("can't redefine the index of a scalar alias"));
2146 return 0;
2147 }
2148
2149 my_get_expression (&exp, &p, GE_NO_PREFIX);
2150
2151 if (exp.X_op != O_constant)
2152 {
2153 as_bad (_("scalar index must be constant"));
2154 return 0;
2155 }
2156
2157 typeinfo.defined |= NTA_HASINDEX;
2158 typeinfo.index = exp.X_add_number;
2159
2160 if (skip_past_char (&p, ']') == FAIL)
2161 {
2162 as_bad (_("expecting ]"));
2163 return 0;
2164 }
2165 }
2166
2167 namelen = nameend - newname;
2168 namebuf = alloca (namelen + 1);
2169 strncpy (namebuf, newname, namelen);
2170 namebuf[namelen] = '\0';
2171
2172 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2173 typeinfo.defined != 0 ? &typeinfo : NULL);
2174
2175 /* Insert name in all uppercase. */
2176 for (p = namebuf; *p; p++)
2177 *p = TOUPPER (*p);
2178
2179 if (strncmp (namebuf, newname, namelen))
2180 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2181 typeinfo.defined != 0 ? &typeinfo : NULL);
2182
2183 /* Insert name in all lowercase. */
2184 for (p = namebuf; *p; p++)
2185 *p = TOLOWER (*p);
2186
2187 if (strncmp (namebuf, newname, namelen))
2188 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2189 typeinfo.defined != 0 ? &typeinfo : NULL);
2190
2191 return 1;
2192 }
2193
2194 /* Should never be called, as .req goes between the alias and the
2195 register name, not at the beginning of the line. */
2196 static void
2197 s_req (int a ATTRIBUTE_UNUSED)
2198 {
2199 as_bad (_("invalid syntax for .req directive"));
2200 }
2201
2202 static void
2203 s_dn (int a ATTRIBUTE_UNUSED)
2204 {
2205 as_bad (_("invalid syntax for .dn directive"));
2206 }
2207
2208 static void
2209 s_qn (int a ATTRIBUTE_UNUSED)
2210 {
2211 as_bad (_("invalid syntax for .qn directive"));
2212 }
2213
2214 /* The .unreq directive deletes an alias which was previously defined
2215 by .req. For example:
2216
2217 my_alias .req r11
2218 .unreq my_alias */
2219
2220 static void
2221 s_unreq (int a ATTRIBUTE_UNUSED)
2222 {
2223 char * name;
2224 char saved_char;
2225
2226 name = input_line_pointer;
2227
2228 while (*input_line_pointer != 0
2229 && *input_line_pointer != ' '
2230 && *input_line_pointer != '\n')
2231 ++input_line_pointer;
2232
2233 saved_char = *input_line_pointer;
2234 *input_line_pointer = 0;
2235
2236 if (!*name)
2237 as_bad (_("invalid syntax for .unreq directive"));
2238 else
2239 {
2240 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2241
2242 if (!reg)
2243 as_bad (_("unknown register alias '%s'"), name);
2244 else if (reg->builtin)
2245 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2246 name);
2247 else
2248 {
2249 hash_delete (arm_reg_hsh, name);
2250 free ((char *) reg->name);
2251 if (reg->neon)
2252 free (reg->neon);
2253 free (reg);
2254 }
2255 }
2256
2257 *input_line_pointer = saved_char;
2258 demand_empty_rest_of_line ();
2259 }
2260
2261 /* Directives: Instruction set selection. */
2262
2263 #ifdef OBJ_ELF
2264 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2265 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2266 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2267 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2268
2269 static enum mstate mapstate = MAP_UNDEFINED;
2270
2271 static void
2272 mapping_state (enum mstate state)
2273 {
2274 symbolS * symbolP;
2275 const char * symname;
2276 int type;
2277
2278 if (mapstate == state)
2279 /* The mapping symbol has already been emitted.
2280 There is nothing else to do. */
2281 return;
2282
2283 mapstate = state;
2284
2285 switch (state)
2286 {
2287 case MAP_DATA:
2288 symname = "$d";
2289 type = BSF_NO_FLAGS;
2290 break;
2291 case MAP_ARM:
2292 symname = "$a";
2293 type = BSF_NO_FLAGS;
2294 break;
2295 case MAP_THUMB:
2296 symname = "$t";
2297 type = BSF_NO_FLAGS;
2298 break;
2299 case MAP_UNDEFINED:
2300 return;
2301 default:
2302 abort ();
2303 }
2304
2305 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2306
2307 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2308 symbol_table_insert (symbolP);
2309 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2310
2311 switch (state)
2312 {
2313 case MAP_ARM:
2314 THUMB_SET_FUNC (symbolP, 0);
2315 ARM_SET_THUMB (symbolP, 0);
2316 ARM_SET_INTERWORK (symbolP, support_interwork);
2317 break;
2318
2319 case MAP_THUMB:
2320 THUMB_SET_FUNC (symbolP, 1);
2321 ARM_SET_THUMB (symbolP, 1);
2322 ARM_SET_INTERWORK (symbolP, support_interwork);
2323 break;
2324
2325 case MAP_DATA:
2326 default:
2327 return;
2328 }
2329 }
2330 #else
2331 #define mapping_state(x) /* nothing */
2332 #endif
2333
2334 /* Find the real, Thumb encoded start of a Thumb function. */
2335
2336 static symbolS *
2337 find_real_start (symbolS * symbolP)
2338 {
2339 char * real_start;
2340 const char * name = S_GET_NAME (symbolP);
2341 symbolS * new_target;
2342
2343 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2344 #define STUB_NAME ".real_start_of"
2345
2346 if (name == NULL)
2347 abort ();
2348
2349 /* The compiler may generate BL instructions to local labels because
2350 it needs to perform a branch to a far away location. These labels
2351 do not have a corresponding ".real_start_of" label. We check
2352 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2353 the ".real_start_of" convention for nonlocal branches. */
2354 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2355 return symbolP;
2356
2357 real_start = ACONCAT ((STUB_NAME, name, NULL));
2358 new_target = symbol_find (real_start);
2359
2360 if (new_target == NULL)
2361 {
2362 as_warn ("Failed to find real start of function: %s\n", name);
2363 new_target = symbolP;
2364 }
2365
2366 return new_target;
2367 }
2368
2369 static void
2370 opcode_select (int width)
2371 {
2372 switch (width)
2373 {
2374 case 16:
2375 if (! thumb_mode)
2376 {
2377 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2378 as_bad (_("selected processor does not support THUMB opcodes"));
2379
2380 thumb_mode = 1;
2381 /* No need to force the alignment, since we will have been
2382 coming from ARM mode, which is word-aligned. */
2383 record_alignment (now_seg, 1);
2384 }
2385 mapping_state (MAP_THUMB);
2386 break;
2387
2388 case 32:
2389 if (thumb_mode)
2390 {
2391 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2392 as_bad (_("selected processor does not support ARM opcodes"));
2393
2394 thumb_mode = 0;
2395
2396 if (!need_pass_2)
2397 frag_align (2, 0, 0);
2398
2399 record_alignment (now_seg, 1);
2400 }
2401 mapping_state (MAP_ARM);
2402 break;
2403
2404 default:
2405 as_bad (_("invalid instruction size selected (%d)"), width);
2406 }
2407 }
2408
2409 static void
2410 s_arm (int ignore ATTRIBUTE_UNUSED)
2411 {
2412 opcode_select (32);
2413 demand_empty_rest_of_line ();
2414 }
2415
2416 static void
2417 s_thumb (int ignore ATTRIBUTE_UNUSED)
2418 {
2419 opcode_select (16);
2420 demand_empty_rest_of_line ();
2421 }
2422
2423 static void
2424 s_code (int unused ATTRIBUTE_UNUSED)
2425 {
2426 int temp;
2427
2428 temp = get_absolute_expression ();
2429 switch (temp)
2430 {
2431 case 16:
2432 case 32:
2433 opcode_select (temp);
2434 break;
2435
2436 default:
2437 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2438 }
2439 }
2440
2441 static void
2442 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2443 {
2444 /* If we are not already in thumb mode go into it, EVEN if
2445 the target processor does not support thumb instructions.
2446 This is used by gcc/config/arm/lib1funcs.asm for example
2447 to compile interworking support functions even if the
2448 target processor should not support interworking. */
2449 if (! thumb_mode)
2450 {
2451 thumb_mode = 2;
2452 record_alignment (now_seg, 1);
2453 }
2454
2455 demand_empty_rest_of_line ();
2456 }
2457
2458 static void
2459 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2460 {
2461 s_thumb (0);
2462
2463 /* The following label is the name/address of the start of a Thumb function.
2464 We need to know this for the interworking support. */
2465 label_is_thumb_function_name = TRUE;
2466 }
2467
2468 /* Perform a .set directive, but also mark the alias as
2469 being a thumb function. */
2470
2471 static void
2472 s_thumb_set (int equiv)
2473 {
2474 /* XXX the following is a duplicate of the code for s_set() in read.c
2475 We cannot just call that code as we need to get at the symbol that
2476 is created. */
2477 char * name;
2478 char delim;
2479 char * end_name;
2480 symbolS * symbolP;
2481
2482 /* Especial apologies for the random logic:
2483 This just grew, and could be parsed much more simply!
2484 Dean - in haste. */
2485 name = input_line_pointer;
2486 delim = get_symbol_end ();
2487 end_name = input_line_pointer;
2488 *end_name = delim;
2489
2490 if (*input_line_pointer != ',')
2491 {
2492 *end_name = 0;
2493 as_bad (_("expected comma after name \"%s\""), name);
2494 *end_name = delim;
2495 ignore_rest_of_line ();
2496 return;
2497 }
2498
2499 input_line_pointer++;
2500 *end_name = 0;
2501
2502 if (name[0] == '.' && name[1] == '\0')
2503 {
2504 /* XXX - this should not happen to .thumb_set. */
2505 abort ();
2506 }
2507
2508 if ((symbolP = symbol_find (name)) == NULL
2509 && (symbolP = md_undefined_symbol (name)) == NULL)
2510 {
2511 #ifndef NO_LISTING
2512 /* When doing symbol listings, play games with dummy fragments living
2513 outside the normal fragment chain to record the file and line info
2514 for this symbol. */
2515 if (listing & LISTING_SYMBOLS)
2516 {
2517 extern struct list_info_struct * listing_tail;
2518 fragS * dummy_frag = xmalloc (sizeof (fragS));
2519
2520 memset (dummy_frag, 0, sizeof (fragS));
2521 dummy_frag->fr_type = rs_fill;
2522 dummy_frag->line = listing_tail;
2523 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2524 dummy_frag->fr_symbol = symbolP;
2525 }
2526 else
2527 #endif
2528 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2529
2530 #ifdef OBJ_COFF
2531 /* "set" symbols are local unless otherwise specified. */
2532 SF_SET_LOCAL (symbolP);
2533 #endif /* OBJ_COFF */
2534 } /* Make a new symbol. */
2535
2536 symbol_table_insert (symbolP);
2537
2538 * end_name = delim;
2539
2540 if (equiv
2541 && S_IS_DEFINED (symbolP)
2542 && S_GET_SEGMENT (symbolP) != reg_section)
2543 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2544
2545 pseudo_set (symbolP);
2546
2547 demand_empty_rest_of_line ();
2548
2549 /* XXX Now we come to the Thumb specific bit of code. */
2550
2551 THUMB_SET_FUNC (symbolP, 1);
2552 ARM_SET_THUMB (symbolP, 1);
2553 #if defined OBJ_ELF || defined OBJ_COFF
2554 ARM_SET_INTERWORK (symbolP, support_interwork);
2555 #endif
2556 }
2557
2558 /* Directives: Mode selection. */
2559
2560 /* .syntax [unified|divided] - choose the new unified syntax
2561 (same for Arm and Thumb encoding, modulo slight differences in what
2562 can be represented) or the old divergent syntax for each mode. */
2563 static void
2564 s_syntax (int unused ATTRIBUTE_UNUSED)
2565 {
2566 char *name, delim;
2567
2568 name = input_line_pointer;
2569 delim = get_symbol_end ();
2570
2571 if (!strcasecmp (name, "unified"))
2572 unified_syntax = TRUE;
2573 else if (!strcasecmp (name, "divided"))
2574 unified_syntax = FALSE;
2575 else
2576 {
2577 as_bad (_("unrecognized syntax mode \"%s\""), name);
2578 return;
2579 }
2580 *input_line_pointer = delim;
2581 demand_empty_rest_of_line ();
2582 }
2583
2584 /* Directives: sectioning and alignment. */
2585
2586 /* Same as s_align_ptwo but align 0 => align 2. */
2587
2588 static void
2589 s_align (int unused ATTRIBUTE_UNUSED)
2590 {
2591 int temp;
2592 long temp_fill;
2593 long max_alignment = 15;
2594
2595 temp = get_absolute_expression ();
2596 if (temp > max_alignment)
2597 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2598 else if (temp < 0)
2599 {
2600 as_bad (_("alignment negative. 0 assumed."));
2601 temp = 0;
2602 }
2603
2604 if (*input_line_pointer == ',')
2605 {
2606 input_line_pointer++;
2607 temp_fill = get_absolute_expression ();
2608 }
2609 else
2610 temp_fill = 0;
2611
2612 if (!temp)
2613 temp = 2;
2614
2615 /* Only make a frag if we HAVE to. */
2616 if (temp && !need_pass_2)
2617 frag_align (temp, (int) temp_fill, 0);
2618 demand_empty_rest_of_line ();
2619
2620 record_alignment (now_seg, temp);
2621 }
2622
2623 static void
2624 s_bss (int ignore ATTRIBUTE_UNUSED)
2625 {
2626 /* We don't support putting frags in the BSS segment, we fake it by
2627 marking in_bss, then looking at s_skip for clues. */
2628 subseg_set (bss_section, 0);
2629 demand_empty_rest_of_line ();
2630 mapping_state (MAP_DATA);
2631 }
2632
2633 static void
2634 s_even (int ignore ATTRIBUTE_UNUSED)
2635 {
2636 /* Never make frag if expect extra pass. */
2637 if (!need_pass_2)
2638 frag_align (1, 0, 0);
2639
2640 record_alignment (now_seg, 1);
2641
2642 demand_empty_rest_of_line ();
2643 }
2644
2645 /* Directives: Literal pools. */
2646
2647 static literal_pool *
2648 find_literal_pool (void)
2649 {
2650 literal_pool * pool;
2651
2652 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2653 {
2654 if (pool->section == now_seg
2655 && pool->sub_section == now_subseg)
2656 break;
2657 }
2658
2659 return pool;
2660 }
2661
2662 static literal_pool *
2663 find_or_make_literal_pool (void)
2664 {
2665 /* Next literal pool ID number. */
2666 static unsigned int latest_pool_num = 1;
2667 literal_pool * pool;
2668
2669 pool = find_literal_pool ();
2670
2671 if (pool == NULL)
2672 {
2673 /* Create a new pool. */
2674 pool = xmalloc (sizeof (* pool));
2675 if (! pool)
2676 return NULL;
2677
2678 pool->next_free_entry = 0;
2679 pool->section = now_seg;
2680 pool->sub_section = now_subseg;
2681 pool->next = list_of_pools;
2682 pool->symbol = NULL;
2683
2684 /* Add it to the list. */
2685 list_of_pools = pool;
2686 }
2687
2688 /* New pools, and emptied pools, will have a NULL symbol. */
2689 if (pool->symbol == NULL)
2690 {
2691 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2692 (valueT) 0, &zero_address_frag);
2693 pool->id = latest_pool_num ++;
2694 }
2695
2696 /* Done. */
2697 return pool;
2698 }
2699
2700 /* Add the literal in the global 'inst'
2701 structure to the relevent literal pool. */
2702
2703 static int
2704 add_to_lit_pool (void)
2705 {
2706 literal_pool * pool;
2707 unsigned int entry;
2708
2709 pool = find_or_make_literal_pool ();
2710
2711 /* Check if this literal value is already in the pool. */
2712 for (entry = 0; entry < pool->next_free_entry; entry ++)
2713 {
2714 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2715 && (inst.reloc.exp.X_op == O_constant)
2716 && (pool->literals[entry].X_add_number
2717 == inst.reloc.exp.X_add_number)
2718 && (pool->literals[entry].X_unsigned
2719 == inst.reloc.exp.X_unsigned))
2720 break;
2721
2722 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2723 && (inst.reloc.exp.X_op == O_symbol)
2724 && (pool->literals[entry].X_add_number
2725 == inst.reloc.exp.X_add_number)
2726 && (pool->literals[entry].X_add_symbol
2727 == inst.reloc.exp.X_add_symbol)
2728 && (pool->literals[entry].X_op_symbol
2729 == inst.reloc.exp.X_op_symbol))
2730 break;
2731 }
2732
2733 /* Do we need to create a new entry? */
2734 if (entry == pool->next_free_entry)
2735 {
2736 if (entry >= MAX_LITERAL_POOL_SIZE)
2737 {
2738 inst.error = _("literal pool overflow");
2739 return FAIL;
2740 }
2741
2742 pool->literals[entry] = inst.reloc.exp;
2743 pool->next_free_entry += 1;
2744 }
2745
2746 inst.reloc.exp.X_op = O_symbol;
2747 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2748 inst.reloc.exp.X_add_symbol = pool->symbol;
2749
2750 return SUCCESS;
2751 }
2752
2753 /* Can't use symbol_new here, so have to create a symbol and then at
2754 a later date assign it a value. Thats what these functions do. */
2755
2756 static void
2757 symbol_locate (symbolS * symbolP,
2758 const char * name, /* It is copied, the caller can modify. */
2759 segT segment, /* Segment identifier (SEG_<something>). */
2760 valueT valu, /* Symbol value. */
2761 fragS * frag) /* Associated fragment. */
2762 {
2763 unsigned int name_length;
2764 char * preserved_copy_of_name;
2765
2766 name_length = strlen (name) + 1; /* +1 for \0. */
2767 obstack_grow (&notes, name, name_length);
2768 preserved_copy_of_name = obstack_finish (&notes);
2769
2770 #ifdef tc_canonicalize_symbol_name
2771 preserved_copy_of_name =
2772 tc_canonicalize_symbol_name (preserved_copy_of_name);
2773 #endif
2774
2775 S_SET_NAME (symbolP, preserved_copy_of_name);
2776
2777 S_SET_SEGMENT (symbolP, segment);
2778 S_SET_VALUE (symbolP, valu);
2779 symbol_clear_list_pointers (symbolP);
2780
2781 symbol_set_frag (symbolP, frag);
2782
2783 /* Link to end of symbol chain. */
2784 {
2785 extern int symbol_table_frozen;
2786
2787 if (symbol_table_frozen)
2788 abort ();
2789 }
2790
2791 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2792
2793 obj_symbol_new_hook (symbolP);
2794
2795 #ifdef tc_symbol_new_hook
2796 tc_symbol_new_hook (symbolP);
2797 #endif
2798
2799 #ifdef DEBUG_SYMS
2800 verify_symbol_chain (symbol_rootP, symbol_lastP);
2801 #endif /* DEBUG_SYMS */
2802 }
2803
2804
2805 static void
2806 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2807 {
2808 unsigned int entry;
2809 literal_pool * pool;
2810 char sym_name[20];
2811
2812 pool = find_literal_pool ();
2813 if (pool == NULL
2814 || pool->symbol == NULL
2815 || pool->next_free_entry == 0)
2816 return;
2817
2818 mapping_state (MAP_DATA);
2819
2820 /* Align pool as you have word accesses.
2821 Only make a frag if we have to. */
2822 if (!need_pass_2)
2823 frag_align (2, 0, 0);
2824
2825 record_alignment (now_seg, 2);
2826
2827 sprintf (sym_name, "$$lit_\002%x", pool->id);
2828
2829 symbol_locate (pool->symbol, sym_name, now_seg,
2830 (valueT) frag_now_fix (), frag_now);
2831 symbol_table_insert (pool->symbol);
2832
2833 ARM_SET_THUMB (pool->symbol, thumb_mode);
2834
2835 #if defined OBJ_COFF || defined OBJ_ELF
2836 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2837 #endif
2838
2839 for (entry = 0; entry < pool->next_free_entry; entry ++)
2840 /* First output the expression in the instruction to the pool. */
2841 emit_expr (&(pool->literals[entry]), 4); /* .word */
2842
2843 /* Mark the pool as empty. */
2844 pool->next_free_entry = 0;
2845 pool->symbol = NULL;
2846 }
2847
2848 #ifdef OBJ_ELF
2849 /* Forward declarations for functions below, in the MD interface
2850 section. */
2851 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2852 static valueT create_unwind_entry (int);
2853 static void start_unwind_section (const segT, int);
2854 static void add_unwind_opcode (valueT, int);
2855 static void flush_pending_unwind (void);
2856
2857 /* Directives: Data. */
2858
2859 static void
2860 s_arm_elf_cons (int nbytes)
2861 {
2862 expressionS exp;
2863
2864 #ifdef md_flush_pending_output
2865 md_flush_pending_output ();
2866 #endif
2867
2868 if (is_it_end_of_statement ())
2869 {
2870 demand_empty_rest_of_line ();
2871 return;
2872 }
2873
2874 #ifdef md_cons_align
2875 md_cons_align (nbytes);
2876 #endif
2877
2878 mapping_state (MAP_DATA);
2879 do
2880 {
2881 int reloc;
2882 char *base = input_line_pointer;
2883
2884 expression (& exp);
2885
2886 if (exp.X_op != O_symbol)
2887 emit_expr (&exp, (unsigned int) nbytes);
2888 else
2889 {
2890 char *before_reloc = input_line_pointer;
2891 reloc = parse_reloc (&input_line_pointer);
2892 if (reloc == -1)
2893 {
2894 as_bad (_("unrecognized relocation suffix"));
2895 ignore_rest_of_line ();
2896 return;
2897 }
2898 else if (reloc == BFD_RELOC_UNUSED)
2899 emit_expr (&exp, (unsigned int) nbytes);
2900 else
2901 {
2902 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2903 int size = bfd_get_reloc_size (howto);
2904
2905 if (reloc == BFD_RELOC_ARM_PLT32)
2906 {
2907 as_bad (_("(plt) is only valid on branch targets"));
2908 reloc = BFD_RELOC_UNUSED;
2909 size = 0;
2910 }
2911
2912 if (size > nbytes)
2913 as_bad (_("%s relocations do not fit in %d bytes"),
2914 howto->name, nbytes);
2915 else
2916 {
2917 /* We've parsed an expression stopping at O_symbol.
2918 But there may be more expression left now that we
2919 have parsed the relocation marker. Parse it again.
2920 XXX Surely there is a cleaner way to do this. */
2921 char *p = input_line_pointer;
2922 int offset;
2923 char *save_buf = alloca (input_line_pointer - base);
2924 memcpy (save_buf, base, input_line_pointer - base);
2925 memmove (base + (input_line_pointer - before_reloc),
2926 base, before_reloc - base);
2927
2928 input_line_pointer = base + (input_line_pointer-before_reloc);
2929 expression (&exp);
2930 memcpy (base, save_buf, p - base);
2931
2932 offset = nbytes - size;
2933 p = frag_more ((int) nbytes);
2934 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2935 size, &exp, 0, reloc);
2936 }
2937 }
2938 }
2939 }
2940 while (*input_line_pointer++ == ',');
2941
2942 /* Put terminator back into stream. */
2943 input_line_pointer --;
2944 demand_empty_rest_of_line ();
2945 }
2946
2947
2948 /* Parse a .rel31 directive. */
2949
2950 static void
2951 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2952 {
2953 expressionS exp;
2954 char *p;
2955 valueT highbit;
2956
2957 highbit = 0;
2958 if (*input_line_pointer == '1')
2959 highbit = 0x80000000;
2960 else if (*input_line_pointer != '0')
2961 as_bad (_("expected 0 or 1"));
2962
2963 input_line_pointer++;
2964 if (*input_line_pointer != ',')
2965 as_bad (_("missing comma"));
2966 input_line_pointer++;
2967
2968 #ifdef md_flush_pending_output
2969 md_flush_pending_output ();
2970 #endif
2971
2972 #ifdef md_cons_align
2973 md_cons_align (4);
2974 #endif
2975
2976 mapping_state (MAP_DATA);
2977
2978 expression (&exp);
2979
2980 p = frag_more (4);
2981 md_number_to_chars (p, highbit, 4);
2982 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2983 BFD_RELOC_ARM_PREL31);
2984
2985 demand_empty_rest_of_line ();
2986 }
2987
2988 /* Directives: AEABI stack-unwind tables. */
2989
2990 /* Parse an unwind_fnstart directive. Simply records the current location. */
2991
2992 static void
2993 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2994 {
2995 demand_empty_rest_of_line ();
2996 /* Mark the start of the function. */
2997 unwind.proc_start = expr_build_dot ();
2998
2999 /* Reset the rest of the unwind info. */
3000 unwind.opcode_count = 0;
3001 unwind.table_entry = NULL;
3002 unwind.personality_routine = NULL;
3003 unwind.personality_index = -1;
3004 unwind.frame_size = 0;
3005 unwind.fp_offset = 0;
3006 unwind.fp_reg = 13;
3007 unwind.fp_used = 0;
3008 unwind.sp_restored = 0;
3009 }
3010
3011
3012 /* Parse a handlerdata directive. Creates the exception handling table entry
3013 for the function. */
3014
3015 static void
3016 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3017 {
3018 demand_empty_rest_of_line ();
3019 if (unwind.table_entry)
3020 as_bad (_("dupicate .handlerdata directive"));
3021
3022 create_unwind_entry (1);
3023 }
3024
3025 /* Parse an unwind_fnend directive. Generates the index table entry. */
3026
3027 static void
3028 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3029 {
3030 long where;
3031 char *ptr;
3032 valueT val;
3033
3034 demand_empty_rest_of_line ();
3035
3036 /* Add eh table entry. */
3037 if (unwind.table_entry == NULL)
3038 val = create_unwind_entry (0);
3039 else
3040 val = 0;
3041
3042 /* Add index table entry. This is two words. */
3043 start_unwind_section (unwind.saved_seg, 1);
3044 frag_align (2, 0, 0);
3045 record_alignment (now_seg, 2);
3046
3047 ptr = frag_more (8);
3048 where = frag_now_fix () - 8;
3049
3050 /* Self relative offset of the function start. */
3051 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3052 BFD_RELOC_ARM_PREL31);
3053
3054 /* Indicate dependency on EHABI-defined personality routines to the
3055 linker, if it hasn't been done already. */
3056 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3057 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3058 {
3059 static const char *const name[] = {
3060 "__aeabi_unwind_cpp_pr0",
3061 "__aeabi_unwind_cpp_pr1",
3062 "__aeabi_unwind_cpp_pr2"
3063 };
3064 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3065 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3066 marked_pr_dependency |= 1 << unwind.personality_index;
3067 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3068 = marked_pr_dependency;
3069 }
3070
3071 if (val)
3072 /* Inline exception table entry. */
3073 md_number_to_chars (ptr + 4, val, 4);
3074 else
3075 /* Self relative offset of the table entry. */
3076 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3077 BFD_RELOC_ARM_PREL31);
3078
3079 /* Restore the original section. */
3080 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3081 }
3082
3083
3084 /* Parse an unwind_cantunwind directive. */
3085
3086 static void
3087 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3088 {
3089 demand_empty_rest_of_line ();
3090 if (unwind.personality_routine || unwind.personality_index != -1)
3091 as_bad (_("personality routine specified for cantunwind frame"));
3092
3093 unwind.personality_index = -2;
3094 }
3095
3096
3097 /* Parse a personalityindex directive. */
3098
3099 static void
3100 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3101 {
3102 expressionS exp;
3103
3104 if (unwind.personality_routine || unwind.personality_index != -1)
3105 as_bad (_("duplicate .personalityindex directive"));
3106
3107 expression (&exp);
3108
3109 if (exp.X_op != O_constant
3110 || exp.X_add_number < 0 || exp.X_add_number > 15)
3111 {
3112 as_bad (_("bad personality routine number"));
3113 ignore_rest_of_line ();
3114 return;
3115 }
3116
3117 unwind.personality_index = exp.X_add_number;
3118
3119 demand_empty_rest_of_line ();
3120 }
3121
3122
3123 /* Parse a personality directive. */
3124
3125 static void
3126 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3127 {
3128 char *name, *p, c;
3129
3130 if (unwind.personality_routine || unwind.personality_index != -1)
3131 as_bad (_("duplicate .personality directive"));
3132
3133 name = input_line_pointer;
3134 c = get_symbol_end ();
3135 p = input_line_pointer;
3136 unwind.personality_routine = symbol_find_or_make (name);
3137 *p = c;
3138 demand_empty_rest_of_line ();
3139 }
3140
3141
3142 /* Parse a directive saving core registers. */
3143
3144 static void
3145 s_arm_unwind_save_core (void)
3146 {
3147 valueT op;
3148 long range;
3149 int n;
3150
3151 range = parse_reg_list (&input_line_pointer);
3152 if (range == FAIL)
3153 {
3154 as_bad (_("expected register list"));
3155 ignore_rest_of_line ();
3156 return;
3157 }
3158
3159 demand_empty_rest_of_line ();
3160
3161 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3162 into .unwind_save {..., sp...}. We aren't bothered about the value of
3163 ip because it is clobbered by calls. */
3164 if (unwind.sp_restored && unwind.fp_reg == 12
3165 && (range & 0x3000) == 0x1000)
3166 {
3167 unwind.opcode_count--;
3168 unwind.sp_restored = 0;
3169 range = (range | 0x2000) & ~0x1000;
3170 unwind.pending_offset = 0;
3171 }
3172
3173 /* Pop r4-r15. */
3174 if (range & 0xfff0)
3175 {
3176 /* See if we can use the short opcodes. These pop a block of up to 8
3177 registers starting with r4, plus maybe r14. */
3178 for (n = 0; n < 8; n++)
3179 {
3180 /* Break at the first non-saved register. */
3181 if ((range & (1 << (n + 4))) == 0)
3182 break;
3183 }
3184 /* See if there are any other bits set. */
3185 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3186 {
3187 /* Use the long form. */
3188 op = 0x8000 | ((range >> 4) & 0xfff);
3189 add_unwind_opcode (op, 2);
3190 }
3191 else
3192 {
3193 /* Use the short form. */
3194 if (range & 0x4000)
3195 op = 0xa8; /* Pop r14. */
3196 else
3197 op = 0xa0; /* Do not pop r14. */
3198 op |= (n - 1);
3199 add_unwind_opcode (op, 1);
3200 }
3201 }
3202
3203 /* Pop r0-r3. */
3204 if (range & 0xf)
3205 {
3206 op = 0xb100 | (range & 0xf);
3207 add_unwind_opcode (op, 2);
3208 }
3209
3210 /* Record the number of bytes pushed. */
3211 for (n = 0; n < 16; n++)
3212 {
3213 if (range & (1 << n))
3214 unwind.frame_size += 4;
3215 }
3216 }
3217
3218
3219 /* Parse a directive saving FPA registers. */
3220
3221 static void
3222 s_arm_unwind_save_fpa (int reg)
3223 {
3224 expressionS exp;
3225 int num_regs;
3226 valueT op;
3227
3228 /* Get Number of registers to transfer. */
3229 if (skip_past_comma (&input_line_pointer) != FAIL)
3230 expression (&exp);
3231 else
3232 exp.X_op = O_illegal;
3233
3234 if (exp.X_op != O_constant)
3235 {
3236 as_bad (_("expected , <constant>"));
3237 ignore_rest_of_line ();
3238 return;
3239 }
3240
3241 num_regs = exp.X_add_number;
3242
3243 if (num_regs < 1 || num_regs > 4)
3244 {
3245 as_bad (_("number of registers must be in the range [1:4]"));
3246 ignore_rest_of_line ();
3247 return;
3248 }
3249
3250 demand_empty_rest_of_line ();
3251
3252 if (reg == 4)
3253 {
3254 /* Short form. */
3255 op = 0xb4 | (num_regs - 1);
3256 add_unwind_opcode (op, 1);
3257 }
3258 else
3259 {
3260 /* Long form. */
3261 op = 0xc800 | (reg << 4) | (num_regs - 1);
3262 add_unwind_opcode (op, 2);
3263 }
3264 unwind.frame_size += num_regs * 12;
3265 }
3266
3267
3268 /* Parse a directive saving VFP registers. */
3269
3270 static void
3271 s_arm_unwind_save_vfp (void)
3272 {
3273 int count;
3274 unsigned int reg;
3275 valueT op;
3276
3277 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3278 if (count == FAIL)
3279 {
3280 as_bad (_("expected register list"));
3281 ignore_rest_of_line ();
3282 return;
3283 }
3284
3285 demand_empty_rest_of_line ();
3286
3287 if (reg == 8)
3288 {
3289 /* Short form. */
3290 op = 0xb8 | (count - 1);
3291 add_unwind_opcode (op, 1);
3292 }
3293 else
3294 {
3295 /* Long form. */
3296 op = 0xb300 | (reg << 4) | (count - 1);
3297 add_unwind_opcode (op, 2);
3298 }
3299 unwind.frame_size += count * 8 + 4;
3300 }
3301
3302
3303 /* Parse a directive saving iWMMXt data registers. */
3304
3305 static void
3306 s_arm_unwind_save_mmxwr (void)
3307 {
3308 int reg;
3309 int hi_reg;
3310 int i;
3311 unsigned mask = 0;
3312 valueT op;
3313
3314 if (*input_line_pointer == '{')
3315 input_line_pointer++;
3316
3317 do
3318 {
3319 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3320
3321 if (reg == FAIL)
3322 {
3323 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3324 goto error;
3325 }
3326
3327 if (mask >> reg)
3328 as_tsktsk (_("register list not in ascending order"));
3329 mask |= 1 << reg;
3330
3331 if (*input_line_pointer == '-')
3332 {
3333 input_line_pointer++;
3334 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3335 if (hi_reg == FAIL)
3336 {
3337 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3338 goto error;
3339 }
3340 else if (reg >= hi_reg)
3341 {
3342 as_bad (_("bad register range"));
3343 goto error;
3344 }
3345 for (; reg < hi_reg; reg++)
3346 mask |= 1 << reg;
3347 }
3348 }
3349 while (skip_past_comma (&input_line_pointer) != FAIL);
3350
3351 if (*input_line_pointer == '}')
3352 input_line_pointer++;
3353
3354 demand_empty_rest_of_line ();
3355
3356 /* Generate any deferred opcodes because we're going to be looking at
3357 the list. */
3358 flush_pending_unwind ();
3359
3360 for (i = 0; i < 16; i++)
3361 {
3362 if (mask & (1 << i))
3363 unwind.frame_size += 8;
3364 }
3365
3366 /* Attempt to combine with a previous opcode. We do this because gcc
3367 likes to output separate unwind directives for a single block of
3368 registers. */
3369 if (unwind.opcode_count > 0)
3370 {
3371 i = unwind.opcodes[unwind.opcode_count - 1];
3372 if ((i & 0xf8) == 0xc0)
3373 {
3374 i &= 7;
3375 /* Only merge if the blocks are contiguous. */
3376 if (i < 6)
3377 {
3378 if ((mask & 0xfe00) == (1 << 9))
3379 {
3380 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3381 unwind.opcode_count--;
3382 }
3383 }
3384 else if (i == 6 && unwind.opcode_count >= 2)
3385 {
3386 i = unwind.opcodes[unwind.opcode_count - 2];
3387 reg = i >> 4;
3388 i &= 0xf;
3389
3390 op = 0xffff << (reg - 1);
3391 if (reg > 0
3392 || ((mask & op) == (1u << (reg - 1))))
3393 {
3394 op = (1 << (reg + i + 1)) - 1;
3395 op &= ~((1 << reg) - 1);
3396 mask |= op;
3397 unwind.opcode_count -= 2;
3398 }
3399 }
3400 }
3401 }
3402
3403 hi_reg = 15;
3404 /* We want to generate opcodes in the order the registers have been
3405 saved, ie. descending order. */
3406 for (reg = 15; reg >= -1; reg--)
3407 {
3408 /* Save registers in blocks. */
3409 if (reg < 0
3410 || !(mask & (1 << reg)))
3411 {
3412 /* We found an unsaved reg. Generate opcodes to save the
3413 preceeding block. */
3414 if (reg != hi_reg)
3415 {
3416 if (reg == 9)
3417 {
3418 /* Short form. */
3419 op = 0xc0 | (hi_reg - 10);
3420 add_unwind_opcode (op, 1);
3421 }
3422 else
3423 {
3424 /* Long form. */
3425 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3426 add_unwind_opcode (op, 2);
3427 }
3428 }
3429 hi_reg = reg - 1;
3430 }
3431 }
3432
3433 return;
3434 error:
3435 ignore_rest_of_line ();
3436 }
3437
3438 static void
3439 s_arm_unwind_save_mmxwcg (void)
3440 {
3441 int reg;
3442 int hi_reg;
3443 unsigned mask = 0;
3444 valueT op;
3445
3446 if (*input_line_pointer == '{')
3447 input_line_pointer++;
3448
3449 do
3450 {
3451 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3452
3453 if (reg == FAIL)
3454 {
3455 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3456 goto error;
3457 }
3458
3459 reg -= 8;
3460 if (mask >> reg)
3461 as_tsktsk (_("register list not in ascending order"));
3462 mask |= 1 << reg;
3463
3464 if (*input_line_pointer == '-')
3465 {
3466 input_line_pointer++;
3467 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3468 if (hi_reg == FAIL)
3469 {
3470 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3471 goto error;
3472 }
3473 else if (reg >= hi_reg)
3474 {
3475 as_bad (_("bad register range"));
3476 goto error;
3477 }
3478 for (; reg < hi_reg; reg++)
3479 mask |= 1 << reg;
3480 }
3481 }
3482 while (skip_past_comma (&input_line_pointer) != FAIL);
3483
3484 if (*input_line_pointer == '}')
3485 input_line_pointer++;
3486
3487 demand_empty_rest_of_line ();
3488
3489 /* Generate any deferred opcodes because we're going to be looking at
3490 the list. */
3491 flush_pending_unwind ();
3492
3493 for (reg = 0; reg < 16; reg++)
3494 {
3495 if (mask & (1 << reg))
3496 unwind.frame_size += 4;
3497 }
3498 op = 0xc700 | mask;
3499 add_unwind_opcode (op, 2);
3500 return;
3501 error:
3502 ignore_rest_of_line ();
3503 }
3504
3505
3506 /* Parse an unwind_save directive. */
3507
3508 static void
3509 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3510 {
3511 char *peek;
3512 struct reg_entry *reg;
3513 bfd_boolean had_brace = FALSE;
3514
3515 /* Figure out what sort of save we have. */
3516 peek = input_line_pointer;
3517
3518 if (*peek == '{')
3519 {
3520 had_brace = TRUE;
3521 peek++;
3522 }
3523
3524 reg = arm_reg_parse_multi (&peek);
3525
3526 if (!reg)
3527 {
3528 as_bad (_("register expected"));
3529 ignore_rest_of_line ();
3530 return;
3531 }
3532
3533 switch (reg->type)
3534 {
3535 case REG_TYPE_FN:
3536 if (had_brace)
3537 {
3538 as_bad (_("FPA .unwind_save does not take a register list"));
3539 ignore_rest_of_line ();
3540 return;
3541 }
3542 s_arm_unwind_save_fpa (reg->number);
3543 return;
3544
3545 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3546 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3547 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3548 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3549
3550 default:
3551 as_bad (_(".unwind_save does not support this kind of register"));
3552 ignore_rest_of_line ();
3553 }
3554 }
3555
3556
3557 /* Parse an unwind_movsp directive. */
3558
3559 static void
3560 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3561 {
3562 int reg;
3563 valueT op;
3564
3565 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3566 if (reg == FAIL)
3567 {
3568 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3569 ignore_rest_of_line ();
3570 return;
3571 }
3572 demand_empty_rest_of_line ();
3573
3574 if (reg == REG_SP || reg == REG_PC)
3575 {
3576 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3577 return;
3578 }
3579
3580 if (unwind.fp_reg != REG_SP)
3581 as_bad (_("unexpected .unwind_movsp directive"));
3582
3583 /* Generate opcode to restore the value. */
3584 op = 0x90 | reg;
3585 add_unwind_opcode (op, 1);
3586
3587 /* Record the information for later. */
3588 unwind.fp_reg = reg;
3589 unwind.fp_offset = unwind.frame_size;
3590 unwind.sp_restored = 1;
3591 }
3592
3593 /* Parse an unwind_pad directive. */
3594
3595 static void
3596 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3597 {
3598 int offset;
3599
3600 if (immediate_for_directive (&offset) == FAIL)
3601 return;
3602
3603 if (offset & 3)
3604 {
3605 as_bad (_("stack increment must be multiple of 4"));
3606 ignore_rest_of_line ();
3607 return;
3608 }
3609
3610 /* Don't generate any opcodes, just record the details for later. */
3611 unwind.frame_size += offset;
3612 unwind.pending_offset += offset;
3613
3614 demand_empty_rest_of_line ();
3615 }
3616
3617 /* Parse an unwind_setfp directive. */
3618
3619 static void
3620 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3621 {
3622 int sp_reg;
3623 int fp_reg;
3624 int offset;
3625
3626 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3627 if (skip_past_comma (&input_line_pointer) == FAIL)
3628 sp_reg = FAIL;
3629 else
3630 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3631
3632 if (fp_reg == FAIL || sp_reg == FAIL)
3633 {
3634 as_bad (_("expected <reg>, <reg>"));
3635 ignore_rest_of_line ();
3636 return;
3637 }
3638
3639 /* Optional constant. */
3640 if (skip_past_comma (&input_line_pointer) != FAIL)
3641 {
3642 if (immediate_for_directive (&offset) == FAIL)
3643 return;
3644 }
3645 else
3646 offset = 0;
3647
3648 demand_empty_rest_of_line ();
3649
3650 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3651 {
3652 as_bad (_("register must be either sp or set by a previous"
3653 "unwind_movsp directive"));
3654 return;
3655 }
3656
3657 /* Don't generate any opcodes, just record the information for later. */
3658 unwind.fp_reg = fp_reg;
3659 unwind.fp_used = 1;
3660 if (sp_reg == 13)
3661 unwind.fp_offset = unwind.frame_size - offset;
3662 else
3663 unwind.fp_offset -= offset;
3664 }
3665
3666 /* Parse an unwind_raw directive. */
3667
3668 static void
3669 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3670 {
3671 expressionS exp;
3672 /* This is an arbitrary limit. */
3673 unsigned char op[16];
3674 int count;
3675
3676 expression (&exp);
3677 if (exp.X_op == O_constant
3678 && skip_past_comma (&input_line_pointer) != FAIL)
3679 {
3680 unwind.frame_size += exp.X_add_number;
3681 expression (&exp);
3682 }
3683 else
3684 exp.X_op = O_illegal;
3685
3686 if (exp.X_op != O_constant)
3687 {
3688 as_bad (_("expected <offset>, <opcode>"));
3689 ignore_rest_of_line ();
3690 return;
3691 }
3692
3693 count = 0;
3694
3695 /* Parse the opcode. */
3696 for (;;)
3697 {
3698 if (count >= 16)
3699 {
3700 as_bad (_("unwind opcode too long"));
3701 ignore_rest_of_line ();
3702 }
3703 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3704 {
3705 as_bad (_("invalid unwind opcode"));
3706 ignore_rest_of_line ();
3707 return;
3708 }
3709 op[count++] = exp.X_add_number;
3710
3711 /* Parse the next byte. */
3712 if (skip_past_comma (&input_line_pointer) == FAIL)
3713 break;
3714
3715 expression (&exp);
3716 }
3717
3718 /* Add the opcode bytes in reverse order. */
3719 while (count--)
3720 add_unwind_opcode (op[count], 1);
3721
3722 demand_empty_rest_of_line ();
3723 }
3724
3725
3726 /* Parse a .eabi_attribute directive. */
3727
3728 static void
3729 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3730 {
3731 expressionS exp;
3732 bfd_boolean is_string;
3733 int tag;
3734 unsigned int i = 0;
3735 char *s = NULL;
3736 char saved_char;
3737
3738 expression (& exp);
3739 if (exp.X_op != O_constant)
3740 goto bad;
3741
3742 tag = exp.X_add_number;
3743 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3744 is_string = 1;
3745 else
3746 is_string = 0;
3747
3748 if (skip_past_comma (&input_line_pointer) == FAIL)
3749 goto bad;
3750 if (tag == 32 || !is_string)
3751 {
3752 expression (& exp);
3753 if (exp.X_op != O_constant)
3754 {
3755 as_bad (_("expected numeric constant"));
3756 ignore_rest_of_line ();
3757 return;
3758 }
3759 i = exp.X_add_number;
3760 }
3761 if (tag == Tag_compatibility
3762 && skip_past_comma (&input_line_pointer) == FAIL)
3763 {
3764 as_bad (_("expected comma"));
3765 ignore_rest_of_line ();
3766 return;
3767 }
3768 if (is_string)
3769 {
3770 skip_whitespace(input_line_pointer);
3771 if (*input_line_pointer != '"')
3772 goto bad_string;
3773 input_line_pointer++;
3774 s = input_line_pointer;
3775 while (*input_line_pointer && *input_line_pointer != '"')
3776 input_line_pointer++;
3777 if (*input_line_pointer != '"')
3778 goto bad_string;
3779 saved_char = *input_line_pointer;
3780 *input_line_pointer = 0;
3781 }
3782 else
3783 {
3784 s = NULL;
3785 saved_char = 0;
3786 }
3787
3788 if (tag == Tag_compatibility)
3789 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3790 else if (is_string)
3791 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3792 else
3793 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3794
3795 if (s)
3796 {
3797 *input_line_pointer = saved_char;
3798 input_line_pointer++;
3799 }
3800 demand_empty_rest_of_line ();
3801 return;
3802 bad_string:
3803 as_bad (_("bad string constant"));
3804 ignore_rest_of_line ();
3805 return;
3806 bad:
3807 as_bad (_("expected <tag> , <value>"));
3808 ignore_rest_of_line ();
3809 }
3810 #endif /* OBJ_ELF */
3811
3812 static void s_arm_arch (int);
3813 static void s_arm_cpu (int);
3814 static void s_arm_fpu (int);
3815
3816 /* This table describes all the machine specific pseudo-ops the assembler
3817 has to support. The fields are:
3818 pseudo-op name without dot
3819 function to call to execute this pseudo-op
3820 Integer arg to pass to the function. */
3821
3822 const pseudo_typeS md_pseudo_table[] =
3823 {
3824 /* Never called because '.req' does not start a line. */
3825 { "req", s_req, 0 },
3826 /* Following two are likewise never called. */
3827 { "dn", s_dn, 0 },
3828 { "qn", s_qn, 0 },
3829 { "unreq", s_unreq, 0 },
3830 { "bss", s_bss, 0 },
3831 { "align", s_align, 0 },
3832 { "arm", s_arm, 0 },
3833 { "thumb", s_thumb, 0 },
3834 { "code", s_code, 0 },
3835 { "force_thumb", s_force_thumb, 0 },
3836 { "thumb_func", s_thumb_func, 0 },
3837 { "thumb_set", s_thumb_set, 0 },
3838 { "even", s_even, 0 },
3839 { "ltorg", s_ltorg, 0 },
3840 { "pool", s_ltorg, 0 },
3841 { "syntax", s_syntax, 0 },
3842 { "cpu", s_arm_cpu, 0 },
3843 { "arch", s_arm_arch, 0 },
3844 { "fpu", s_arm_fpu, 0 },
3845 #ifdef OBJ_ELF
3846 { "word", s_arm_elf_cons, 4 },
3847 { "long", s_arm_elf_cons, 4 },
3848 { "rel31", s_arm_rel31, 0 },
3849 { "fnstart", s_arm_unwind_fnstart, 0 },
3850 { "fnend", s_arm_unwind_fnend, 0 },
3851 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3852 { "personality", s_arm_unwind_personality, 0 },
3853 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3854 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3855 { "save", s_arm_unwind_save, 0 },
3856 { "movsp", s_arm_unwind_movsp, 0 },
3857 { "pad", s_arm_unwind_pad, 0 },
3858 { "setfp", s_arm_unwind_setfp, 0 },
3859 { "unwind_raw", s_arm_unwind_raw, 0 },
3860 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3861 #else
3862 { "word", cons, 4},
3863 #endif
3864 { "extend", float_cons, 'x' },
3865 { "ldouble", float_cons, 'x' },
3866 { "packed", float_cons, 'p' },
3867 { 0, 0, 0 }
3868 };
3869 \f
3870 /* Parser functions used exclusively in instruction operands. */
3871
3872 /* Generic immediate-value read function for use in insn parsing.
3873 STR points to the beginning of the immediate (the leading #);
3874 VAL receives the value; if the value is outside [MIN, MAX]
3875 issue an error. PREFIX_OPT is true if the immediate prefix is
3876 optional. */
3877
3878 static int
3879 parse_immediate (char **str, int *val, int min, int max,
3880 bfd_boolean prefix_opt)
3881 {
3882 expressionS exp;
3883 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3884 if (exp.X_op != O_constant)
3885 {
3886 inst.error = _("constant expression required");
3887 return FAIL;
3888 }
3889
3890 if (exp.X_add_number < min || exp.X_add_number > max)
3891 {
3892 inst.error = _("immediate value out of range");
3893 return FAIL;
3894 }
3895
3896 *val = exp.X_add_number;
3897 return SUCCESS;
3898 }
3899
3900 /* Less-generic immediate-value read function with the possibility of loading a
3901 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3902 instructions. Puts the result directly in inst.operands[i]. */
3903
3904 static int
3905 parse_big_immediate (char **str, int i)
3906 {
3907 expressionS exp;
3908 char *ptr = *str;
3909
3910 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3911
3912 if (exp.X_op == O_constant)
3913 inst.operands[i].imm = exp.X_add_number;
3914 else if (exp.X_op == O_big
3915 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3916 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3917 {
3918 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3919 /* Bignums have their least significant bits in
3920 generic_bignum[0]. Make sure we put 32 bits in imm and
3921 32 bits in reg, in a (hopefully) portable way. */
3922 assert (parts != 0);
3923 inst.operands[i].imm = 0;
3924 for (j = 0; j < parts; j++, idx++)
3925 inst.operands[i].imm |= generic_bignum[idx]
3926 << (LITTLENUM_NUMBER_OF_BITS * j);
3927 inst.operands[i].reg = 0;
3928 for (j = 0; j < parts; j++, idx++)
3929 inst.operands[i].reg |= generic_bignum[idx]
3930 << (LITTLENUM_NUMBER_OF_BITS * j);
3931 inst.operands[i].regisimm = 1;
3932 }
3933 else
3934 return FAIL;
3935
3936 *str = ptr;
3937
3938 return SUCCESS;
3939 }
3940
3941 /* Returns the pseudo-register number of an FPA immediate constant,
3942 or FAIL if there isn't a valid constant here. */
3943
3944 static int
3945 parse_fpa_immediate (char ** str)
3946 {
3947 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3948 char * save_in;
3949 expressionS exp;
3950 int i;
3951 int j;
3952
3953 /* First try and match exact strings, this is to guarantee
3954 that some formats will work even for cross assembly. */
3955
3956 for (i = 0; fp_const[i]; i++)
3957 {
3958 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3959 {
3960 char *start = *str;
3961
3962 *str += strlen (fp_const[i]);
3963 if (is_end_of_line[(unsigned char) **str])
3964 return i + 8;
3965 *str = start;
3966 }
3967 }
3968
3969 /* Just because we didn't get a match doesn't mean that the constant
3970 isn't valid, just that it is in a format that we don't
3971 automatically recognize. Try parsing it with the standard
3972 expression routines. */
3973
3974 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3975
3976 /* Look for a raw floating point number. */
3977 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3978 && is_end_of_line[(unsigned char) *save_in])
3979 {
3980 for (i = 0; i < NUM_FLOAT_VALS; i++)
3981 {
3982 for (j = 0; j < MAX_LITTLENUMS; j++)
3983 {
3984 if (words[j] != fp_values[i][j])
3985 break;
3986 }
3987
3988 if (j == MAX_LITTLENUMS)
3989 {
3990 *str = save_in;
3991 return i + 8;
3992 }
3993 }
3994 }
3995
3996 /* Try and parse a more complex expression, this will probably fail
3997 unless the code uses a floating point prefix (eg "0f"). */
3998 save_in = input_line_pointer;
3999 input_line_pointer = *str;
4000 if (expression (&exp) == absolute_section
4001 && exp.X_op == O_big
4002 && exp.X_add_number < 0)
4003 {
4004 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4005 Ditto for 15. */
4006 if (gen_to_words (words, 5, (long) 15) == 0)
4007 {
4008 for (i = 0; i < NUM_FLOAT_VALS; i++)
4009 {
4010 for (j = 0; j < MAX_LITTLENUMS; j++)
4011 {
4012 if (words[j] != fp_values[i][j])
4013 break;
4014 }
4015
4016 if (j == MAX_LITTLENUMS)
4017 {
4018 *str = input_line_pointer;
4019 input_line_pointer = save_in;
4020 return i + 8;
4021 }
4022 }
4023 }
4024 }
4025
4026 *str = input_line_pointer;
4027 input_line_pointer = save_in;
4028 inst.error = _("invalid FPA immediate expression");
4029 return FAIL;
4030 }
4031
4032 /* Returns 1 if a number has "quarter-precision" float format
4033 0baBbbbbbc defgh000 00000000 00000000. */
4034
4035 static int
4036 is_quarter_float (unsigned imm)
4037 {
4038 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4039 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4040 }
4041
4042 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4043 0baBbbbbbc defgh000 00000000 00000000.
4044 The minus-zero case needs special handling, since it can't be encoded in the
4045 "quarter-precision" float format, but can nonetheless be loaded as an integer
4046 constant. */
4047
4048 static unsigned
4049 parse_qfloat_immediate (char **ccp, int *immed)
4050 {
4051 char *str = *ccp;
4052 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4053
4054 skip_past_char (&str, '#');
4055
4056 if ((str = atof_ieee (str, 's', words)) != NULL)
4057 {
4058 unsigned fpword = 0;
4059 int i;
4060
4061 /* Our FP word must be 32 bits (single-precision FP). */
4062 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4063 {
4064 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4065 fpword |= words[i];
4066 }
4067
4068 if (is_quarter_float (fpword) || fpword == 0x80000000)
4069 *immed = fpword;
4070 else
4071 return FAIL;
4072
4073 *ccp = str;
4074
4075 return SUCCESS;
4076 }
4077
4078 return FAIL;
4079 }
4080
4081 /* Shift operands. */
4082 enum shift_kind
4083 {
4084 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4085 };
4086
4087 struct asm_shift_name
4088 {
4089 const char *name;
4090 enum shift_kind kind;
4091 };
4092
4093 /* Third argument to parse_shift. */
4094 enum parse_shift_mode
4095 {
4096 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4097 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4098 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4099 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4100 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4101 };
4102
4103 /* Parse a <shift> specifier on an ARM data processing instruction.
4104 This has three forms:
4105
4106 (LSL|LSR|ASL|ASR|ROR) Rs
4107 (LSL|LSR|ASL|ASR|ROR) #imm
4108 RRX
4109
4110 Note that ASL is assimilated to LSL in the instruction encoding, and
4111 RRX to ROR #0 (which cannot be written as such). */
4112
4113 static int
4114 parse_shift (char **str, int i, enum parse_shift_mode mode)
4115 {
4116 const struct asm_shift_name *shift_name;
4117 enum shift_kind shift;
4118 char *s = *str;
4119 char *p = s;
4120 int reg;
4121
4122 for (p = *str; ISALPHA (*p); p++)
4123 ;
4124
4125 if (p == *str)
4126 {
4127 inst.error = _("shift expression expected");
4128 return FAIL;
4129 }
4130
4131 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4132
4133 if (shift_name == NULL)
4134 {
4135 inst.error = _("shift expression expected");
4136 return FAIL;
4137 }
4138
4139 shift = shift_name->kind;
4140
4141 switch (mode)
4142 {
4143 case NO_SHIFT_RESTRICT:
4144 case SHIFT_IMMEDIATE: break;
4145
4146 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4147 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4148 {
4149 inst.error = _("'LSL' or 'ASR' required");
4150 return FAIL;
4151 }
4152 break;
4153
4154 case SHIFT_LSL_IMMEDIATE:
4155 if (shift != SHIFT_LSL)
4156 {
4157 inst.error = _("'LSL' required");
4158 return FAIL;
4159 }
4160 break;
4161
4162 case SHIFT_ASR_IMMEDIATE:
4163 if (shift != SHIFT_ASR)
4164 {
4165 inst.error = _("'ASR' required");
4166 return FAIL;
4167 }
4168 break;
4169
4170 default: abort ();
4171 }
4172
4173 if (shift != SHIFT_RRX)
4174 {
4175 /* Whitespace can appear here if the next thing is a bare digit. */
4176 skip_whitespace (p);
4177
4178 if (mode == NO_SHIFT_RESTRICT
4179 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4180 {
4181 inst.operands[i].imm = reg;
4182 inst.operands[i].immisreg = 1;
4183 }
4184 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4185 return FAIL;
4186 }
4187 inst.operands[i].shift_kind = shift;
4188 inst.operands[i].shifted = 1;
4189 *str = p;
4190 return SUCCESS;
4191 }
4192
4193 /* Parse a <shifter_operand> for an ARM data processing instruction:
4194
4195 #<immediate>
4196 #<immediate>, <rotate>
4197 <Rm>
4198 <Rm>, <shift>
4199
4200 where <shift> is defined by parse_shift above, and <rotate> is a
4201 multiple of 2 between 0 and 30. Validation of immediate operands
4202 is deferred to md_apply_fix. */
4203
4204 static int
4205 parse_shifter_operand (char **str, int i)
4206 {
4207 int value;
4208 expressionS expr;
4209
4210 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4211 {
4212 inst.operands[i].reg = value;
4213 inst.operands[i].isreg = 1;
4214
4215 /* parse_shift will override this if appropriate */
4216 inst.reloc.exp.X_op = O_constant;
4217 inst.reloc.exp.X_add_number = 0;
4218
4219 if (skip_past_comma (str) == FAIL)
4220 return SUCCESS;
4221
4222 /* Shift operation on register. */
4223 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4224 }
4225
4226 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4227 return FAIL;
4228
4229 if (skip_past_comma (str) == SUCCESS)
4230 {
4231 /* #x, y -- ie explicit rotation by Y. */
4232 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4233 return FAIL;
4234
4235 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4236 {
4237 inst.error = _("constant expression expected");
4238 return FAIL;
4239 }
4240
4241 value = expr.X_add_number;
4242 if (value < 0 || value > 30 || value % 2 != 0)
4243 {
4244 inst.error = _("invalid rotation");
4245 return FAIL;
4246 }
4247 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4248 {
4249 inst.error = _("invalid constant");
4250 return FAIL;
4251 }
4252
4253 /* Convert to decoded value. md_apply_fix will put it back. */
4254 inst.reloc.exp.X_add_number
4255 = (((inst.reloc.exp.X_add_number << (32 - value))
4256 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4257 }
4258
4259 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4260 inst.reloc.pc_rel = 0;
4261 return SUCCESS;
4262 }
4263
4264 /* Parse all forms of an ARM address expression. Information is written
4265 to inst.operands[i] and/or inst.reloc.
4266
4267 Preindexed addressing (.preind=1):
4268
4269 [Rn, #offset] .reg=Rn .reloc.exp=offset
4270 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4271 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4272 .shift_kind=shift .reloc.exp=shift_imm
4273
4274 These three may have a trailing ! which causes .writeback to be set also.
4275
4276 Postindexed addressing (.postind=1, .writeback=1):
4277
4278 [Rn], #offset .reg=Rn .reloc.exp=offset
4279 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4280 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4281 .shift_kind=shift .reloc.exp=shift_imm
4282
4283 Unindexed addressing (.preind=0, .postind=0):
4284
4285 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4286
4287 Other:
4288
4289 [Rn]{!} shorthand for [Rn,#0]{!}
4290 =immediate .isreg=0 .reloc.exp=immediate
4291 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4292
4293 It is the caller's responsibility to check for addressing modes not
4294 supported by the instruction, and to set inst.reloc.type. */
4295
4296 static int
4297 parse_address (char **str, int i)
4298 {
4299 char *p = *str;
4300 int reg;
4301
4302 if (skip_past_char (&p, '[') == FAIL)
4303 {
4304 if (skip_past_char (&p, '=') == FAIL)
4305 {
4306 /* bare address - translate to PC-relative offset */
4307 inst.reloc.pc_rel = 1;
4308 inst.operands[i].reg = REG_PC;
4309 inst.operands[i].isreg = 1;
4310 inst.operands[i].preind = 1;
4311 }
4312 /* else a load-constant pseudo op, no special treatment needed here */
4313
4314 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4315 return FAIL;
4316
4317 *str = p;
4318 return SUCCESS;
4319 }
4320
4321 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4322 {
4323 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4324 return FAIL;
4325 }
4326 inst.operands[i].reg = reg;
4327 inst.operands[i].isreg = 1;
4328
4329 if (skip_past_comma (&p) == SUCCESS)
4330 {
4331 inst.operands[i].preind = 1;
4332
4333 if (*p == '+') p++;
4334 else if (*p == '-') p++, inst.operands[i].negative = 1;
4335
4336 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4337 {
4338 inst.operands[i].imm = reg;
4339 inst.operands[i].immisreg = 1;
4340
4341 if (skip_past_comma (&p) == SUCCESS)
4342 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4343 return FAIL;
4344 }
4345 else if (skip_past_char (&p, ':') == SUCCESS)
4346 {
4347 /* FIXME: '@' should be used here, but it's filtered out by generic
4348 code before we get to see it here. This may be subject to
4349 change. */
4350 expressionS exp;
4351 my_get_expression (&exp, &p, GE_NO_PREFIX);
4352 if (exp.X_op != O_constant)
4353 {
4354 inst.error = _("alignment must be constant");
4355 return FAIL;
4356 }
4357 inst.operands[i].imm = exp.X_add_number << 8;
4358 inst.operands[i].immisalign = 1;
4359 /* Alignments are not pre-indexes. */
4360 inst.operands[i].preind = 0;
4361 }
4362 else
4363 {
4364 if (inst.operands[i].negative)
4365 {
4366 inst.operands[i].negative = 0;
4367 p--;
4368 }
4369 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4370 return FAIL;
4371 }
4372 }
4373
4374 if (skip_past_char (&p, ']') == FAIL)
4375 {
4376 inst.error = _("']' expected");
4377 return FAIL;
4378 }
4379
4380 if (skip_past_char (&p, '!') == SUCCESS)
4381 inst.operands[i].writeback = 1;
4382
4383 else if (skip_past_comma (&p) == SUCCESS)
4384 {
4385 if (skip_past_char (&p, '{') == SUCCESS)
4386 {
4387 /* [Rn], {expr} - unindexed, with option */
4388 if (parse_immediate (&p, &inst.operands[i].imm,
4389 0, 255, TRUE) == FAIL)
4390 return FAIL;
4391
4392 if (skip_past_char (&p, '}') == FAIL)
4393 {
4394 inst.error = _("'}' expected at end of 'option' field");
4395 return FAIL;
4396 }
4397 if (inst.operands[i].preind)
4398 {
4399 inst.error = _("cannot combine index with option");
4400 return FAIL;
4401 }
4402 *str = p;
4403 return SUCCESS;
4404 }
4405 else
4406 {
4407 inst.operands[i].postind = 1;
4408 inst.operands[i].writeback = 1;
4409
4410 if (inst.operands[i].preind)
4411 {
4412 inst.error = _("cannot combine pre- and post-indexing");
4413 return FAIL;
4414 }
4415
4416 if (*p == '+') p++;
4417 else if (*p == '-') p++, inst.operands[i].negative = 1;
4418
4419 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4420 {
4421 /* We might be using the immediate for alignment already. If we
4422 are, OR the register number into the low-order bits. */
4423 if (inst.operands[i].immisalign)
4424 inst.operands[i].imm |= reg;
4425 else
4426 inst.operands[i].imm = reg;
4427 inst.operands[i].immisreg = 1;
4428
4429 if (skip_past_comma (&p) == SUCCESS)
4430 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4431 return FAIL;
4432 }
4433 else
4434 {
4435 if (inst.operands[i].negative)
4436 {
4437 inst.operands[i].negative = 0;
4438 p--;
4439 }
4440 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4441 return FAIL;
4442 }
4443 }
4444 }
4445
4446 /* If at this point neither .preind nor .postind is set, we have a
4447 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4448 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4449 {
4450 inst.operands[i].preind = 1;
4451 inst.reloc.exp.X_op = O_constant;
4452 inst.reloc.exp.X_add_number = 0;
4453 }
4454 *str = p;
4455 return SUCCESS;
4456 }
4457
4458 /* Parse an operand for a MOVW or MOVT instruction. */
4459 static int
4460 parse_half (char **str)
4461 {
4462 char * p;
4463
4464 p = *str;
4465 skip_past_char (&p, '#');
4466 if (strncasecmp (p, ":lower16:", 9) == 0)
4467 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4468 else if (strncasecmp (p, ":upper16:", 9) == 0)
4469 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4470
4471 if (inst.reloc.type != BFD_RELOC_UNUSED)
4472 {
4473 p += 9;
4474 skip_whitespace(p);
4475 }
4476
4477 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4478 return FAIL;
4479
4480 if (inst.reloc.type == BFD_RELOC_UNUSED)
4481 {
4482 if (inst.reloc.exp.X_op != O_constant)
4483 {
4484 inst.error = _("constant expression expected");
4485 return FAIL;
4486 }
4487 if (inst.reloc.exp.X_add_number < 0
4488 || inst.reloc.exp.X_add_number > 0xffff)
4489 {
4490 inst.error = _("immediate value out of range");
4491 return FAIL;
4492 }
4493 }
4494 *str = p;
4495 return SUCCESS;
4496 }
4497
4498 /* Miscellaneous. */
4499
4500 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4501 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4502 static int
4503 parse_psr (char **str)
4504 {
4505 char *p;
4506 unsigned long psr_field;
4507 const struct asm_psr *psr;
4508 char *start;
4509
4510 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4511 feature for ease of use and backwards compatibility. */
4512 p = *str;
4513 if (strncasecmp (p, "SPSR", 4) == 0)
4514 psr_field = SPSR_BIT;
4515 else if (strncasecmp (p, "CPSR", 4) == 0)
4516 psr_field = 0;
4517 else
4518 {
4519 start = p;
4520 do
4521 p++;
4522 while (ISALNUM (*p) || *p == '_');
4523
4524 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4525 if (!psr)
4526 return FAIL;
4527
4528 *str = p;
4529 return psr->field;
4530 }
4531
4532 p += 4;
4533 if (*p == '_')
4534 {
4535 /* A suffix follows. */
4536 p++;
4537 start = p;
4538
4539 do
4540 p++;
4541 while (ISALNUM (*p) || *p == '_');
4542
4543 psr = hash_find_n (arm_psr_hsh, start, p - start);
4544 if (!psr)
4545 goto error;
4546
4547 psr_field |= psr->field;
4548 }
4549 else
4550 {
4551 if (ISALNUM (*p))
4552 goto error; /* Garbage after "[CS]PSR". */
4553
4554 psr_field |= (PSR_c | PSR_f);
4555 }
4556 *str = p;
4557 return psr_field;
4558
4559 error:
4560 inst.error = _("flag for {c}psr instruction expected");
4561 return FAIL;
4562 }
4563
4564 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4565 value suitable for splatting into the AIF field of the instruction. */
4566
4567 static int
4568 parse_cps_flags (char **str)
4569 {
4570 int val = 0;
4571 int saw_a_flag = 0;
4572 char *s = *str;
4573
4574 for (;;)
4575 switch (*s++)
4576 {
4577 case '\0': case ',':
4578 goto done;
4579
4580 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4581 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4582 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4583
4584 default:
4585 inst.error = _("unrecognized CPS flag");
4586 return FAIL;
4587 }
4588
4589 done:
4590 if (saw_a_flag == 0)
4591 {
4592 inst.error = _("missing CPS flags");
4593 return FAIL;
4594 }
4595
4596 *str = s - 1;
4597 return val;
4598 }
4599
4600 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4601 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4602
4603 static int
4604 parse_endian_specifier (char **str)
4605 {
4606 int little_endian;
4607 char *s = *str;
4608
4609 if (strncasecmp (s, "BE", 2))
4610 little_endian = 0;
4611 else if (strncasecmp (s, "LE", 2))
4612 little_endian = 1;
4613 else
4614 {
4615 inst.error = _("valid endian specifiers are be or le");
4616 return FAIL;
4617 }
4618
4619 if (ISALNUM (s[2]) || s[2] == '_')
4620 {
4621 inst.error = _("valid endian specifiers are be or le");
4622 return FAIL;
4623 }
4624
4625 *str = s + 2;
4626 return little_endian;
4627 }
4628
4629 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4630 value suitable for poking into the rotate field of an sxt or sxta
4631 instruction, or FAIL on error. */
4632
4633 static int
4634 parse_ror (char **str)
4635 {
4636 int rot;
4637 char *s = *str;
4638
4639 if (strncasecmp (s, "ROR", 3) == 0)
4640 s += 3;
4641 else
4642 {
4643 inst.error = _("missing rotation field after comma");
4644 return FAIL;
4645 }
4646
4647 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4648 return FAIL;
4649
4650 switch (rot)
4651 {
4652 case 0: *str = s; return 0x0;
4653 case 8: *str = s; return 0x1;
4654 case 16: *str = s; return 0x2;
4655 case 24: *str = s; return 0x3;
4656
4657 default:
4658 inst.error = _("rotation can only be 0, 8, 16, or 24");
4659 return FAIL;
4660 }
4661 }
4662
4663 /* Parse a conditional code (from conds[] below). The value returned is in the
4664 range 0 .. 14, or FAIL. */
4665 static int
4666 parse_cond (char **str)
4667 {
4668 char *p, *q;
4669 const struct asm_cond *c;
4670
4671 p = q = *str;
4672 while (ISALPHA (*q))
4673 q++;
4674
4675 c = hash_find_n (arm_cond_hsh, p, q - p);
4676 if (!c)
4677 {
4678 inst.error = _("condition required");
4679 return FAIL;
4680 }
4681
4682 *str = q;
4683 return c->value;
4684 }
4685
4686 /* Parse an option for a barrier instruction. Returns the encoding for the
4687 option, or FAIL. */
4688 static int
4689 parse_barrier (char **str)
4690 {
4691 char *p, *q;
4692 const struct asm_barrier_opt *o;
4693
4694 p = q = *str;
4695 while (ISALPHA (*q))
4696 q++;
4697
4698 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4699 if (!o)
4700 return FAIL;
4701
4702 *str = q;
4703 return o->value;
4704 }
4705
4706 /* Parse the operands of a table branch instruction. Similar to a memory
4707 operand. */
4708 static int
4709 parse_tb (char **str)
4710 {
4711 char * p = *str;
4712 int reg;
4713
4714 if (skip_past_char (&p, '[') == FAIL)
4715 {
4716 inst.error = _("'[' expected");
4717 return FAIL;
4718 }
4719
4720 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4721 {
4722 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4723 return FAIL;
4724 }
4725 inst.operands[0].reg = reg;
4726
4727 if (skip_past_comma (&p) == FAIL)
4728 {
4729 inst.error = _("',' expected");
4730 return FAIL;
4731 }
4732
4733 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4734 {
4735 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4736 return FAIL;
4737 }
4738 inst.operands[0].imm = reg;
4739
4740 if (skip_past_comma (&p) == SUCCESS)
4741 {
4742 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4743 return FAIL;
4744 if (inst.reloc.exp.X_add_number != 1)
4745 {
4746 inst.error = _("invalid shift");
4747 return FAIL;
4748 }
4749 inst.operands[0].shifted = 1;
4750 }
4751
4752 if (skip_past_char (&p, ']') == FAIL)
4753 {
4754 inst.error = _("']' expected");
4755 return FAIL;
4756 }
4757 *str = p;
4758 return SUCCESS;
4759 }
4760
4761 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4762 information on the types the operands can take and how they are encoded.
4763 Up to four operands may be read; this function handles setting the
4764 ".present" field for each read operand itself.
4765 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4766 else returns FAIL. */
4767
4768 static int
4769 parse_neon_mov (char **str, int *which_operand)
4770 {
4771 int i = *which_operand, val;
4772 enum arm_reg_type rtype;
4773 char *ptr = *str;
4774 struct neon_type_el optype;
4775
4776 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4777 {
4778 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4779 inst.operands[i].reg = val;
4780 inst.operands[i].isscalar = 1;
4781 inst.operands[i].vectype = optype;
4782 inst.operands[i++].present = 1;
4783
4784 if (skip_past_comma (&ptr) == FAIL)
4785 goto wanted_comma;
4786
4787 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4788 goto wanted_arm;
4789
4790 inst.operands[i].reg = val;
4791 inst.operands[i].isreg = 1;
4792 inst.operands[i].present = 1;
4793 }
4794 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
4795 != FAIL)
4796 {
4797 /* Cases 0, 1, 2, 3, 5 (D only). */
4798 if (skip_past_comma (&ptr) == FAIL)
4799 goto wanted_comma;
4800
4801 inst.operands[i].reg = val;
4802 inst.operands[i].isreg = 1;
4803 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4804 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
4805 inst.operands[i].isvec = 1;
4806 inst.operands[i].vectype = optype;
4807 inst.operands[i++].present = 1;
4808
4809 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4810 {
4811 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
4812 Case 13: VMOV <Sd>, <Rm> */
4813 inst.operands[i].reg = val;
4814 inst.operands[i].isreg = 1;
4815 inst.operands[i].present = 1;
4816
4817 if (rtype == REG_TYPE_NQ)
4818 {
4819 first_error (_("can't use Neon quad register here"));
4820 return FAIL;
4821 }
4822 else if (rtype != REG_TYPE_VFS)
4823 {
4824 i++;
4825 if (skip_past_comma (&ptr) == FAIL)
4826 goto wanted_comma;
4827 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4828 goto wanted_arm;
4829 inst.operands[i].reg = val;
4830 inst.operands[i].isreg = 1;
4831 inst.operands[i].present = 1;
4832 }
4833 }
4834 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4835 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4836 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
4837 Case 10: VMOV.F32 <Sd>, #<imm>
4838 Case 11: VMOV.F64 <Dd>, #<imm> */
4839 ;
4840 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4841 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4842 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4843 ;
4844 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
4845 &optype)) != FAIL)
4846 {
4847 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4848 Case 1: VMOV<c><q> <Dd>, <Dm>
4849 Case 8: VMOV.F32 <Sd>, <Sm>
4850 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
4851
4852 inst.operands[i].reg = val;
4853 inst.operands[i].isreg = 1;
4854 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4855 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
4856 inst.operands[i].isvec = 1;
4857 inst.operands[i].vectype = optype;
4858 inst.operands[i].present = 1;
4859
4860 if (skip_past_comma (&ptr) == SUCCESS)
4861 {
4862 /* Case 15. */
4863 i++;
4864
4865 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4866 goto wanted_arm;
4867
4868 inst.operands[i].reg = val;
4869 inst.operands[i].isreg = 1;
4870 inst.operands[i++].present = 1;
4871
4872 if (skip_past_comma (&ptr) == FAIL)
4873 goto wanted_comma;
4874
4875 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4876 goto wanted_arm;
4877
4878 inst.operands[i].reg = val;
4879 inst.operands[i].isreg = 1;
4880 inst.operands[i++].present = 1;
4881 }
4882 }
4883 else
4884 {
4885 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4886 return FAIL;
4887 }
4888 }
4889 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4890 {
4891 /* Cases 6, 7. */
4892 inst.operands[i].reg = val;
4893 inst.operands[i].isreg = 1;
4894 inst.operands[i++].present = 1;
4895
4896 if (skip_past_comma (&ptr) == FAIL)
4897 goto wanted_comma;
4898
4899 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4900 {
4901 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4902 inst.operands[i].reg = val;
4903 inst.operands[i].isscalar = 1;
4904 inst.operands[i].present = 1;
4905 inst.operands[i].vectype = optype;
4906 }
4907 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4908 {
4909 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4910 inst.operands[i].reg = val;
4911 inst.operands[i].isreg = 1;
4912 inst.operands[i++].present = 1;
4913
4914 if (skip_past_comma (&ptr) == FAIL)
4915 goto wanted_comma;
4916
4917 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
4918 == FAIL)
4919 {
4920 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
4921 return FAIL;
4922 }
4923
4924 inst.operands[i].reg = val;
4925 inst.operands[i].isreg = 1;
4926 inst.operands[i].isvec = 1;
4927 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
4928 inst.operands[i].vectype = optype;
4929 inst.operands[i].present = 1;
4930
4931 if (rtype == REG_TYPE_VFS)
4932 {
4933 /* Case 14. */
4934 i++;
4935 if (skip_past_comma (&ptr) == FAIL)
4936 goto wanted_comma;
4937 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
4938 &optype)) == FAIL)
4939 {
4940 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
4941 return FAIL;
4942 }
4943 inst.operands[i].reg = val;
4944 inst.operands[i].isreg = 1;
4945 inst.operands[i].isvec = 1;
4946 inst.operands[i].issingle = 1;
4947 inst.operands[i].vectype = optype;
4948 inst.operands[i].present = 1;
4949 }
4950 }
4951 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
4952 != FAIL)
4953 {
4954 /* Case 13. */
4955 inst.operands[i].reg = val;
4956 inst.operands[i].isreg = 1;
4957 inst.operands[i].isvec = 1;
4958 inst.operands[i].issingle = 1;
4959 inst.operands[i].vectype = optype;
4960 inst.operands[i++].present = 1;
4961 }
4962 }
4963 else
4964 {
4965 first_error (_("parse error"));
4966 return FAIL;
4967 }
4968
4969 /* Successfully parsed the operands. Update args. */
4970 *which_operand = i;
4971 *str = ptr;
4972 return SUCCESS;
4973
4974 wanted_comma:
4975 first_error (_("expected comma"));
4976 return FAIL;
4977
4978 wanted_arm:
4979 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4980 return FAIL;
4981 }
4982
4983 /* Matcher codes for parse_operands. */
4984 enum operand_parse_code
4985 {
4986 OP_stop, /* end of line */
4987
4988 OP_RR, /* ARM register */
4989 OP_RRnpc, /* ARM register, not r15 */
4990 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4991 OP_RRw, /* ARM register, not r15, optional trailing ! */
4992 OP_RCP, /* Coprocessor number */
4993 OP_RCN, /* Coprocessor register */
4994 OP_RF, /* FPA register */
4995 OP_RVS, /* VFP single precision register */
4996 OP_RVD, /* VFP double precision register (0..15) */
4997 OP_RND, /* Neon double precision register (0..31) */
4998 OP_RNQ, /* Neon quad precision register */
4999 OP_RVSD, /* VFP single or double precision register */
5000 OP_RNDQ, /* Neon double or quad precision register */
5001 OP_RNSDQ, /* Neon single, double or quad precision register */
5002 OP_RNSC, /* Neon scalar D[X] */
5003 OP_RVC, /* VFP control register */
5004 OP_RMF, /* Maverick F register */
5005 OP_RMD, /* Maverick D register */
5006 OP_RMFX, /* Maverick FX register */
5007 OP_RMDX, /* Maverick DX register */
5008 OP_RMAX, /* Maverick AX register */
5009 OP_RMDS, /* Maverick DSPSC register */
5010 OP_RIWR, /* iWMMXt wR register */
5011 OP_RIWC, /* iWMMXt wC register */
5012 OP_RIWG, /* iWMMXt wCG register */
5013 OP_RXA, /* XScale accumulator register */
5014
5015 OP_REGLST, /* ARM register list */
5016 OP_VRSLST, /* VFP single-precision register list */
5017 OP_VRDLST, /* VFP double-precision register list */
5018 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5019 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5020 OP_NSTRLST, /* Neon element/structure list */
5021
5022 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5023 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5024 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5025 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5026 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5027 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5028 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5029 OP_VMOV, /* Neon VMOV operands. */
5030 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5031 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5032
5033 OP_I0, /* immediate zero */
5034 OP_I7, /* immediate value 0 .. 7 */
5035 OP_I15, /* 0 .. 15 */
5036 OP_I16, /* 1 .. 16 */
5037 OP_I16z, /* 0 .. 16 */
5038 OP_I31, /* 0 .. 31 */
5039 OP_I31w, /* 0 .. 31, optional trailing ! */
5040 OP_I32, /* 1 .. 32 */
5041 OP_I32z, /* 0 .. 32 */
5042 OP_I63, /* 0 .. 63 */
5043 OP_I63s, /* -64 .. 63 */
5044 OP_I64, /* 1 .. 64 */
5045 OP_I64z, /* 0 .. 64 */
5046 OP_I255, /* 0 .. 255 */
5047
5048 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5049 OP_I7b, /* 0 .. 7 */
5050 OP_I15b, /* 0 .. 15 */
5051 OP_I31b, /* 0 .. 31 */
5052
5053 OP_SH, /* shifter operand */
5054 OP_ADDR, /* Memory address expression (any mode) */
5055 OP_EXP, /* arbitrary expression */
5056 OP_EXPi, /* same, with optional immediate prefix */
5057 OP_EXPr, /* same, with optional relocation suffix */
5058 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5059
5060 OP_CPSF, /* CPS flags */
5061 OP_ENDI, /* Endianness specifier */
5062 OP_PSR, /* CPSR/SPSR mask for msr */
5063 OP_COND, /* conditional code */
5064 OP_TB, /* Table branch. */
5065
5066 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5067 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5068
5069 OP_RRnpc_I0, /* ARM register or literal 0 */
5070 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5071 OP_RR_EXi, /* ARM register or expression with imm prefix */
5072 OP_RF_IF, /* FPA register or immediate */
5073 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5074
5075 /* Optional operands. */
5076 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5077 OP_oI31b, /* 0 .. 31 */
5078 OP_oI32b, /* 1 .. 32 */
5079 OP_oIffffb, /* 0 .. 65535 */
5080 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5081
5082 OP_oRR, /* ARM register */
5083 OP_oRRnpc, /* ARM register, not the PC */
5084 OP_oRND, /* Optional Neon double precision register */
5085 OP_oRNQ, /* Optional Neon quad precision register */
5086 OP_oRNDQ, /* Optional Neon double or quad precision register */
5087 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5088 OP_oSHll, /* LSL immediate */
5089 OP_oSHar, /* ASR immediate */
5090 OP_oSHllar, /* LSL or ASR immediate */
5091 OP_oROR, /* ROR 0/8/16/24 */
5092 OP_oBARRIER, /* Option argument for a barrier instruction. */
5093
5094 OP_FIRST_OPTIONAL = OP_oI7b
5095 };
5096
5097 /* Generic instruction operand parser. This does no encoding and no
5098 semantic validation; it merely squirrels values away in the inst
5099 structure. Returns SUCCESS or FAIL depending on whether the
5100 specified grammar matched. */
5101 static int
5102 parse_operands (char *str, const unsigned char *pattern)
5103 {
5104 unsigned const char *upat = pattern;
5105 char *backtrack_pos = 0;
5106 const char *backtrack_error = 0;
5107 int i, val, backtrack_index = 0;
5108 enum arm_reg_type rtype;
5109
5110 #define po_char_or_fail(chr) do { \
5111 if (skip_past_char (&str, chr) == FAIL) \
5112 goto bad_args; \
5113 } while (0)
5114
5115 #define po_reg_or_fail(regtype) do { \
5116 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5117 &inst.operands[i].vectype); \
5118 if (val == FAIL) \
5119 { \
5120 first_error (_(reg_expected_msgs[regtype])); \
5121 goto failure; \
5122 } \
5123 inst.operands[i].reg = val; \
5124 inst.operands[i].isreg = 1; \
5125 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5126 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5127 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5128 || rtype == REG_TYPE_VFD \
5129 || rtype == REG_TYPE_NQ); \
5130 } while (0)
5131
5132 #define po_reg_or_goto(regtype, label) do { \
5133 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5134 &inst.operands[i].vectype); \
5135 if (val == FAIL) \
5136 goto label; \
5137 \
5138 inst.operands[i].reg = val; \
5139 inst.operands[i].isreg = 1; \
5140 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5141 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5142 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5143 || rtype == REG_TYPE_VFD \
5144 || rtype == REG_TYPE_NQ); \
5145 } while (0)
5146
5147 #define po_imm_or_fail(min, max, popt) do { \
5148 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5149 goto failure; \
5150 inst.operands[i].imm = val; \
5151 } while (0)
5152
5153 #define po_scalar_or_goto(elsz, label) do { \
5154 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5155 if (val == FAIL) \
5156 goto label; \
5157 inst.operands[i].reg = val; \
5158 inst.operands[i].isscalar = 1; \
5159 } while (0)
5160
5161 #define po_misc_or_fail(expr) do { \
5162 if (expr) \
5163 goto failure; \
5164 } while (0)
5165
5166 skip_whitespace (str);
5167
5168 for (i = 0; upat[i] != OP_stop; i++)
5169 {
5170 if (upat[i] >= OP_FIRST_OPTIONAL)
5171 {
5172 /* Remember where we are in case we need to backtrack. */
5173 assert (!backtrack_pos);
5174 backtrack_pos = str;
5175 backtrack_error = inst.error;
5176 backtrack_index = i;
5177 }
5178
5179 if (i > 0)
5180 po_char_or_fail (',');
5181
5182 switch (upat[i])
5183 {
5184 /* Registers */
5185 case OP_oRRnpc:
5186 case OP_RRnpc:
5187 case OP_oRR:
5188 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5189 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5190 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5191 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5192 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5193 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5194 case OP_oRND:
5195 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5196 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5197 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5198 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5199 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5200 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5201 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5202 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5203 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5204 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5205 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5206 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5207 case OP_oRNQ:
5208 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5209 case OP_oRNDQ:
5210 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5211 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5212 case OP_oRNSDQ:
5213 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5214
5215 /* Neon scalar. Using an element size of 8 means that some invalid
5216 scalars are accepted here, so deal with those in later code. */
5217 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5218
5219 /* WARNING: We can expand to two operands here. This has the potential
5220 to totally confuse the backtracking mechanism! It will be OK at
5221 least as long as we don't try to use optional args as well,
5222 though. */
5223 case OP_NILO:
5224 {
5225 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5226 i++;
5227 skip_past_comma (&str);
5228 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5229 break;
5230 one_reg_only:
5231 /* Optional register operand was omitted. Unfortunately, it's in
5232 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5233 here (this is a bit grotty). */
5234 inst.operands[i] = inst.operands[i-1];
5235 inst.operands[i-1].present = 0;
5236 break;
5237 try_imm:
5238 /* Immediate gets verified properly later, so accept any now. */
5239 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5240 }
5241 break;
5242
5243 case OP_RNDQ_I0:
5244 {
5245 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5246 break;
5247 try_imm0:
5248 po_imm_or_fail (0, 0, TRUE);
5249 }
5250 break;
5251
5252 case OP_RVSD_I0:
5253 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5254 break;
5255
5256 case OP_RR_RNSC:
5257 {
5258 po_scalar_or_goto (8, try_rr);
5259 break;
5260 try_rr:
5261 po_reg_or_fail (REG_TYPE_RN);
5262 }
5263 break;
5264
5265 case OP_RNSDQ_RNSC:
5266 {
5267 po_scalar_or_goto (8, try_nsdq);
5268 break;
5269 try_nsdq:
5270 po_reg_or_fail (REG_TYPE_NSDQ);
5271 }
5272 break;
5273
5274 case OP_RNDQ_RNSC:
5275 {
5276 po_scalar_or_goto (8, try_ndq);
5277 break;
5278 try_ndq:
5279 po_reg_or_fail (REG_TYPE_NDQ);
5280 }
5281 break;
5282
5283 case OP_RND_RNSC:
5284 {
5285 po_scalar_or_goto (8, try_vfd);
5286 break;
5287 try_vfd:
5288 po_reg_or_fail (REG_TYPE_VFD);
5289 }
5290 break;
5291
5292 case OP_VMOV:
5293 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5294 not careful then bad things might happen. */
5295 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5296 break;
5297
5298 case OP_RNDQ_IMVNb:
5299 {
5300 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5301 break;
5302 try_mvnimm:
5303 /* There's a possibility of getting a 64-bit immediate here, so
5304 we need special handling. */
5305 if (parse_big_immediate (&str, i) == FAIL)
5306 {
5307 inst.error = _("immediate value is out of range");
5308 goto failure;
5309 }
5310 }
5311 break;
5312
5313 case OP_RNDQ_I63b:
5314 {
5315 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5316 break;
5317 try_shimm:
5318 po_imm_or_fail (0, 63, TRUE);
5319 }
5320 break;
5321
5322 case OP_RRnpcb:
5323 po_char_or_fail ('[');
5324 po_reg_or_fail (REG_TYPE_RN);
5325 po_char_or_fail (']');
5326 break;
5327
5328 case OP_RRw:
5329 po_reg_or_fail (REG_TYPE_RN);
5330 if (skip_past_char (&str, '!') == SUCCESS)
5331 inst.operands[i].writeback = 1;
5332 break;
5333
5334 /* Immediates */
5335 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5336 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5337 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5338 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5339 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5340 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5341 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5342 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5343 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5344 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5345 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5346 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5347
5348 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5349 case OP_oI7b:
5350 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5351 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5352 case OP_oI31b:
5353 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5354 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5355 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5356
5357 /* Immediate variants */
5358 case OP_oI255c:
5359 po_char_or_fail ('{');
5360 po_imm_or_fail (0, 255, TRUE);
5361 po_char_or_fail ('}');
5362 break;
5363
5364 case OP_I31w:
5365 /* The expression parser chokes on a trailing !, so we have
5366 to find it first and zap it. */
5367 {
5368 char *s = str;
5369 while (*s && *s != ',')
5370 s++;
5371 if (s[-1] == '!')
5372 {
5373 s[-1] = '\0';
5374 inst.operands[i].writeback = 1;
5375 }
5376 po_imm_or_fail (0, 31, TRUE);
5377 if (str == s - 1)
5378 str = s;
5379 }
5380 break;
5381
5382 /* Expressions */
5383 case OP_EXPi: EXPi:
5384 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5385 GE_OPT_PREFIX));
5386 break;
5387
5388 case OP_EXP:
5389 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5390 GE_NO_PREFIX));
5391 break;
5392
5393 case OP_EXPr: EXPr:
5394 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5395 GE_NO_PREFIX));
5396 if (inst.reloc.exp.X_op == O_symbol)
5397 {
5398 val = parse_reloc (&str);
5399 if (val == -1)
5400 {
5401 inst.error = _("unrecognized relocation suffix");
5402 goto failure;
5403 }
5404 else if (val != BFD_RELOC_UNUSED)
5405 {
5406 inst.operands[i].imm = val;
5407 inst.operands[i].hasreloc = 1;
5408 }
5409 }
5410 break;
5411
5412 /* Operand for MOVW or MOVT. */
5413 case OP_HALF:
5414 po_misc_or_fail (parse_half (&str));
5415 break;
5416
5417 /* Register or expression */
5418 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5419 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5420
5421 /* Register or immediate */
5422 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5423 I0: po_imm_or_fail (0, 0, FALSE); break;
5424
5425 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5426 IF:
5427 if (!is_immediate_prefix (*str))
5428 goto bad_args;
5429 str++;
5430 val = parse_fpa_immediate (&str);
5431 if (val == FAIL)
5432 goto failure;
5433 /* FPA immediates are encoded as registers 8-15.
5434 parse_fpa_immediate has already applied the offset. */
5435 inst.operands[i].reg = val;
5436 inst.operands[i].isreg = 1;
5437 break;
5438
5439 /* Two kinds of register */
5440 case OP_RIWR_RIWC:
5441 {
5442 struct reg_entry *rege = arm_reg_parse_multi (&str);
5443 if (rege->type != REG_TYPE_MMXWR
5444 && rege->type != REG_TYPE_MMXWC
5445 && rege->type != REG_TYPE_MMXWCG)
5446 {
5447 inst.error = _("iWMMXt data or control register expected");
5448 goto failure;
5449 }
5450 inst.operands[i].reg = rege->number;
5451 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5452 }
5453 break;
5454
5455 /* Misc */
5456 case OP_CPSF: val = parse_cps_flags (&str); break;
5457 case OP_ENDI: val = parse_endian_specifier (&str); break;
5458 case OP_oROR: val = parse_ror (&str); break;
5459 case OP_PSR: val = parse_psr (&str); break;
5460 case OP_COND: val = parse_cond (&str); break;
5461 case OP_oBARRIER:val = parse_barrier (&str); break;
5462
5463 case OP_RVC_PSR:
5464 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5465 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5466 break;
5467 try_psr:
5468 val = parse_psr (&str);
5469 break;
5470
5471 case OP_APSR_RR:
5472 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5473 break;
5474 try_apsr:
5475 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5476 instruction). */
5477 if (strncasecmp (str, "APSR_", 5) == 0)
5478 {
5479 unsigned found = 0;
5480 str += 5;
5481 while (found < 15)
5482 switch (*str++)
5483 {
5484 case 'c': found = (found & 1) ? 16 : found | 1; break;
5485 case 'n': found = (found & 2) ? 16 : found | 2; break;
5486 case 'z': found = (found & 4) ? 16 : found | 4; break;
5487 case 'v': found = (found & 8) ? 16 : found | 8; break;
5488 default: found = 16;
5489 }
5490 if (found != 15)
5491 goto failure;
5492 inst.operands[i].isvec = 1;
5493 }
5494 else
5495 goto failure;
5496 break;
5497
5498 case OP_TB:
5499 po_misc_or_fail (parse_tb (&str));
5500 break;
5501
5502 /* Register lists */
5503 case OP_REGLST:
5504 val = parse_reg_list (&str);
5505 if (*str == '^')
5506 {
5507 inst.operands[1].writeback = 1;
5508 str++;
5509 }
5510 break;
5511
5512 case OP_VRSLST:
5513 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5514 break;
5515
5516 case OP_VRDLST:
5517 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5518 break;
5519
5520 case OP_VRSDLST:
5521 /* Allow Q registers too. */
5522 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5523 REGLIST_NEON_D);
5524 if (val == FAIL)
5525 {
5526 inst.error = NULL;
5527 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5528 REGLIST_VFP_S);
5529 inst.operands[i].issingle = 1;
5530 }
5531 break;
5532
5533 case OP_NRDLST:
5534 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5535 REGLIST_NEON_D);
5536 break;
5537
5538 case OP_NSTRLST:
5539 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5540 &inst.operands[i].vectype);
5541 break;
5542
5543 /* Addressing modes */
5544 case OP_ADDR:
5545 po_misc_or_fail (parse_address (&str, i));
5546 break;
5547
5548 case OP_SH:
5549 po_misc_or_fail (parse_shifter_operand (&str, i));
5550 break;
5551
5552 case OP_oSHll:
5553 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5554 break;
5555
5556 case OP_oSHar:
5557 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5558 break;
5559
5560 case OP_oSHllar:
5561 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5562 break;
5563
5564 default:
5565 as_fatal ("unhandled operand code %d", upat[i]);
5566 }
5567
5568 /* Various value-based sanity checks and shared operations. We
5569 do not signal immediate failures for the register constraints;
5570 this allows a syntax error to take precedence. */
5571 switch (upat[i])
5572 {
5573 case OP_oRRnpc:
5574 case OP_RRnpc:
5575 case OP_RRnpcb:
5576 case OP_RRw:
5577 case OP_RRnpc_I0:
5578 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5579 inst.error = BAD_PC;
5580 break;
5581
5582 case OP_CPSF:
5583 case OP_ENDI:
5584 case OP_oROR:
5585 case OP_PSR:
5586 case OP_RVC_PSR:
5587 case OP_COND:
5588 case OP_oBARRIER:
5589 case OP_REGLST:
5590 case OP_VRSLST:
5591 case OP_VRDLST:
5592 case OP_VRSDLST:
5593 case OP_NRDLST:
5594 case OP_NSTRLST:
5595 if (val == FAIL)
5596 goto failure;
5597 inst.operands[i].imm = val;
5598 break;
5599
5600 default:
5601 break;
5602 }
5603
5604 /* If we get here, this operand was successfully parsed. */
5605 inst.operands[i].present = 1;
5606 continue;
5607
5608 bad_args:
5609 inst.error = BAD_ARGS;
5610
5611 failure:
5612 if (!backtrack_pos)
5613 {
5614 /* The parse routine should already have set inst.error, but set a
5615 defaut here just in case. */
5616 if (!inst.error)
5617 inst.error = _("syntax error");
5618 return FAIL;
5619 }
5620
5621 /* Do not backtrack over a trailing optional argument that
5622 absorbed some text. We will only fail again, with the
5623 'garbage following instruction' error message, which is
5624 probably less helpful than the current one. */
5625 if (backtrack_index == i && backtrack_pos != str
5626 && upat[i+1] == OP_stop)
5627 {
5628 if (!inst.error)
5629 inst.error = _("syntax error");
5630 return FAIL;
5631 }
5632
5633 /* Try again, skipping the optional argument at backtrack_pos. */
5634 str = backtrack_pos;
5635 inst.error = backtrack_error;
5636 inst.operands[backtrack_index].present = 0;
5637 i = backtrack_index;
5638 backtrack_pos = 0;
5639 }
5640
5641 /* Check that we have parsed all the arguments. */
5642 if (*str != '\0' && !inst.error)
5643 inst.error = _("garbage following instruction");
5644
5645 return inst.error ? FAIL : SUCCESS;
5646 }
5647
5648 #undef po_char_or_fail
5649 #undef po_reg_or_fail
5650 #undef po_reg_or_goto
5651 #undef po_imm_or_fail
5652 #undef po_scalar_or_fail
5653 \f
5654 /* Shorthand macro for instruction encoding functions issuing errors. */
5655 #define constraint(expr, err) do { \
5656 if (expr) \
5657 { \
5658 inst.error = err; \
5659 return; \
5660 } \
5661 } while (0)
5662
5663 /* Functions for operand encoding. ARM, then Thumb. */
5664
5665 #define rotate_left(v, n) (v << n | v >> (32 - n))
5666
5667 /* If VAL can be encoded in the immediate field of an ARM instruction,
5668 return the encoded form. Otherwise, return FAIL. */
5669
5670 static unsigned int
5671 encode_arm_immediate (unsigned int val)
5672 {
5673 unsigned int a, i;
5674
5675 for (i = 0; i < 32; i += 2)
5676 if ((a = rotate_left (val, i)) <= 0xff)
5677 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5678
5679 return FAIL;
5680 }
5681
5682 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5683 return the encoded form. Otherwise, return FAIL. */
5684 static unsigned int
5685 encode_thumb32_immediate (unsigned int val)
5686 {
5687 unsigned int a, i;
5688
5689 if (val <= 0xff)
5690 return val;
5691
5692 for (i = 1; i <= 24; i++)
5693 {
5694 a = val >> i;
5695 if ((val & ~(0xff << i)) == 0)
5696 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5697 }
5698
5699 a = val & 0xff;
5700 if (val == ((a << 16) | a))
5701 return 0x100 | a;
5702 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5703 return 0x300 | a;
5704
5705 a = val & 0xff00;
5706 if (val == ((a << 16) | a))
5707 return 0x200 | (a >> 8);
5708
5709 return FAIL;
5710 }
5711 /* Encode a VFP SP or DP register number into inst.instruction. */
5712
5713 static void
5714 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5715 {
5716 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5717 && reg > 15)
5718 {
5719 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5720 {
5721 if (thumb_mode)
5722 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5723 fpu_vfp_ext_v3);
5724 else
5725 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5726 fpu_vfp_ext_v3);
5727 }
5728 else
5729 {
5730 first_error (_("D register out of range for selected VFP version"));
5731 return;
5732 }
5733 }
5734
5735 switch (pos)
5736 {
5737 case VFP_REG_Sd:
5738 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5739 break;
5740
5741 case VFP_REG_Sn:
5742 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5743 break;
5744
5745 case VFP_REG_Sm:
5746 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5747 break;
5748
5749 case VFP_REG_Dd:
5750 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5751 break;
5752
5753 case VFP_REG_Dn:
5754 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5755 break;
5756
5757 case VFP_REG_Dm:
5758 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5759 break;
5760
5761 default:
5762 abort ();
5763 }
5764 }
5765
5766 /* Encode a <shift> in an ARM-format instruction. The immediate,
5767 if any, is handled by md_apply_fix. */
5768 static void
5769 encode_arm_shift (int i)
5770 {
5771 if (inst.operands[i].shift_kind == SHIFT_RRX)
5772 inst.instruction |= SHIFT_ROR << 5;
5773 else
5774 {
5775 inst.instruction |= inst.operands[i].shift_kind << 5;
5776 if (inst.operands[i].immisreg)
5777 {
5778 inst.instruction |= SHIFT_BY_REG;
5779 inst.instruction |= inst.operands[i].imm << 8;
5780 }
5781 else
5782 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5783 }
5784 }
5785
5786 static void
5787 encode_arm_shifter_operand (int i)
5788 {
5789 if (inst.operands[i].isreg)
5790 {
5791 inst.instruction |= inst.operands[i].reg;
5792 encode_arm_shift (i);
5793 }
5794 else
5795 inst.instruction |= INST_IMMEDIATE;
5796 }
5797
5798 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5799 static void
5800 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5801 {
5802 assert (inst.operands[i].isreg);
5803 inst.instruction |= inst.operands[i].reg << 16;
5804
5805 if (inst.operands[i].preind)
5806 {
5807 if (is_t)
5808 {
5809 inst.error = _("instruction does not accept preindexed addressing");
5810 return;
5811 }
5812 inst.instruction |= PRE_INDEX;
5813 if (inst.operands[i].writeback)
5814 inst.instruction |= WRITE_BACK;
5815
5816 }
5817 else if (inst.operands[i].postind)
5818 {
5819 assert (inst.operands[i].writeback);
5820 if (is_t)
5821 inst.instruction |= WRITE_BACK;
5822 }
5823 else /* unindexed - only for coprocessor */
5824 {
5825 inst.error = _("instruction does not accept unindexed addressing");
5826 return;
5827 }
5828
5829 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5830 && (((inst.instruction & 0x000f0000) >> 16)
5831 == ((inst.instruction & 0x0000f000) >> 12)))
5832 as_warn ((inst.instruction & LOAD_BIT)
5833 ? _("destination register same as write-back base")
5834 : _("source register same as write-back base"));
5835 }
5836
5837 /* inst.operands[i] was set up by parse_address. Encode it into an
5838 ARM-format mode 2 load or store instruction. If is_t is true,
5839 reject forms that cannot be used with a T instruction (i.e. not
5840 post-indexed). */
5841 static void
5842 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5843 {
5844 encode_arm_addr_mode_common (i, is_t);
5845
5846 if (inst.operands[i].immisreg)
5847 {
5848 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5849 inst.instruction |= inst.operands[i].imm;
5850 if (!inst.operands[i].negative)
5851 inst.instruction |= INDEX_UP;
5852 if (inst.operands[i].shifted)
5853 {
5854 if (inst.operands[i].shift_kind == SHIFT_RRX)
5855 inst.instruction |= SHIFT_ROR << 5;
5856 else
5857 {
5858 inst.instruction |= inst.operands[i].shift_kind << 5;
5859 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5860 }
5861 }
5862 }
5863 else /* immediate offset in inst.reloc */
5864 {
5865 if (inst.reloc.type == BFD_RELOC_UNUSED)
5866 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5867 }
5868 }
5869
5870 /* inst.operands[i] was set up by parse_address. Encode it into an
5871 ARM-format mode 3 load or store instruction. Reject forms that
5872 cannot be used with such instructions. If is_t is true, reject
5873 forms that cannot be used with a T instruction (i.e. not
5874 post-indexed). */
5875 static void
5876 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5877 {
5878 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5879 {
5880 inst.error = _("instruction does not accept scaled register index");
5881 return;
5882 }
5883
5884 encode_arm_addr_mode_common (i, is_t);
5885
5886 if (inst.operands[i].immisreg)
5887 {
5888 inst.instruction |= inst.operands[i].imm;
5889 if (!inst.operands[i].negative)
5890 inst.instruction |= INDEX_UP;
5891 }
5892 else /* immediate offset in inst.reloc */
5893 {
5894 inst.instruction |= HWOFFSET_IMM;
5895 if (inst.reloc.type == BFD_RELOC_UNUSED)
5896 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5897 }
5898 }
5899
5900 /* inst.operands[i] was set up by parse_address. Encode it into an
5901 ARM-format instruction. Reject all forms which cannot be encoded
5902 into a coprocessor load/store instruction. If wb_ok is false,
5903 reject use of writeback; if unind_ok is false, reject use of
5904 unindexed addressing. If reloc_override is not 0, use it instead
5905 of BFD_ARM_CP_OFF_IMM. */
5906
5907 static int
5908 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5909 {
5910 inst.instruction |= inst.operands[i].reg << 16;
5911
5912 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5913
5914 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5915 {
5916 assert (!inst.operands[i].writeback);
5917 if (!unind_ok)
5918 {
5919 inst.error = _("instruction does not support unindexed addressing");
5920 return FAIL;
5921 }
5922 inst.instruction |= inst.operands[i].imm;
5923 inst.instruction |= INDEX_UP;
5924 return SUCCESS;
5925 }
5926
5927 if (inst.operands[i].preind)
5928 inst.instruction |= PRE_INDEX;
5929
5930 if (inst.operands[i].writeback)
5931 {
5932 if (inst.operands[i].reg == REG_PC)
5933 {
5934 inst.error = _("pc may not be used with write-back");
5935 return FAIL;
5936 }
5937 if (!wb_ok)
5938 {
5939 inst.error = _("instruction does not support writeback");
5940 return FAIL;
5941 }
5942 inst.instruction |= WRITE_BACK;
5943 }
5944
5945 if (reloc_override)
5946 inst.reloc.type = reloc_override;
5947 else if (thumb_mode)
5948 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5949 else
5950 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5951 return SUCCESS;
5952 }
5953
5954 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5955 Determine whether it can be performed with a move instruction; if
5956 it can, convert inst.instruction to that move instruction and
5957 return 1; if it can't, convert inst.instruction to a literal-pool
5958 load and return 0. If this is not a valid thing to do in the
5959 current context, set inst.error and return 1.
5960
5961 inst.operands[i] describes the destination register. */
5962
5963 static int
5964 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5965 {
5966 unsigned long tbit;
5967
5968 if (thumb_p)
5969 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5970 else
5971 tbit = LOAD_BIT;
5972
5973 if ((inst.instruction & tbit) == 0)
5974 {
5975 inst.error = _("invalid pseudo operation");
5976 return 1;
5977 }
5978 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5979 {
5980 inst.error = _("constant expression expected");
5981 return 1;
5982 }
5983 if (inst.reloc.exp.X_op == O_constant)
5984 {
5985 if (thumb_p)
5986 {
5987 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5988 {
5989 /* This can be done with a mov(1) instruction. */
5990 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5991 inst.instruction |= inst.reloc.exp.X_add_number;
5992 return 1;
5993 }
5994 }
5995 else
5996 {
5997 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5998 if (value != FAIL)
5999 {
6000 /* This can be done with a mov instruction. */
6001 inst.instruction &= LITERAL_MASK;
6002 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6003 inst.instruction |= value & 0xfff;
6004 return 1;
6005 }
6006
6007 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6008 if (value != FAIL)
6009 {
6010 /* This can be done with a mvn instruction. */
6011 inst.instruction &= LITERAL_MASK;
6012 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6013 inst.instruction |= value & 0xfff;
6014 return 1;
6015 }
6016 }
6017 }
6018
6019 if (add_to_lit_pool () == FAIL)
6020 {
6021 inst.error = _("literal pool insertion failed");
6022 return 1;
6023 }
6024 inst.operands[1].reg = REG_PC;
6025 inst.operands[1].isreg = 1;
6026 inst.operands[1].preind = 1;
6027 inst.reloc.pc_rel = 1;
6028 inst.reloc.type = (thumb_p
6029 ? BFD_RELOC_ARM_THUMB_OFFSET
6030 : (mode_3
6031 ? BFD_RELOC_ARM_HWLITERAL
6032 : BFD_RELOC_ARM_LITERAL));
6033 return 0;
6034 }
6035
6036 /* Functions for instruction encoding, sorted by subarchitecture.
6037 First some generics; their names are taken from the conventional
6038 bit positions for register arguments in ARM format instructions. */
6039
6040 static void
6041 do_noargs (void)
6042 {
6043 }
6044
6045 static void
6046 do_rd (void)
6047 {
6048 inst.instruction |= inst.operands[0].reg << 12;
6049 }
6050
6051 static void
6052 do_rd_rm (void)
6053 {
6054 inst.instruction |= inst.operands[0].reg << 12;
6055 inst.instruction |= inst.operands[1].reg;
6056 }
6057
6058 static void
6059 do_rd_rn (void)
6060 {
6061 inst.instruction |= inst.operands[0].reg << 12;
6062 inst.instruction |= inst.operands[1].reg << 16;
6063 }
6064
6065 static void
6066 do_rn_rd (void)
6067 {
6068 inst.instruction |= inst.operands[0].reg << 16;
6069 inst.instruction |= inst.operands[1].reg << 12;
6070 }
6071
6072 static void
6073 do_rd_rm_rn (void)
6074 {
6075 unsigned Rn = inst.operands[2].reg;
6076 /* Enforce restrictions on SWP instruction. */
6077 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6078 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6079 _("Rn must not overlap other operands"));
6080 inst.instruction |= inst.operands[0].reg << 12;
6081 inst.instruction |= inst.operands[1].reg;
6082 inst.instruction |= Rn << 16;
6083 }
6084
6085 static void
6086 do_rd_rn_rm (void)
6087 {
6088 inst.instruction |= inst.operands[0].reg << 12;
6089 inst.instruction |= inst.operands[1].reg << 16;
6090 inst.instruction |= inst.operands[2].reg;
6091 }
6092
6093 static void
6094 do_rm_rd_rn (void)
6095 {
6096 inst.instruction |= inst.operands[0].reg;
6097 inst.instruction |= inst.operands[1].reg << 12;
6098 inst.instruction |= inst.operands[2].reg << 16;
6099 }
6100
6101 static void
6102 do_imm0 (void)
6103 {
6104 inst.instruction |= inst.operands[0].imm;
6105 }
6106
6107 static void
6108 do_rd_cpaddr (void)
6109 {
6110 inst.instruction |= inst.operands[0].reg << 12;
6111 encode_arm_cp_address (1, TRUE, TRUE, 0);
6112 }
6113
6114 /* ARM instructions, in alphabetical order by function name (except
6115 that wrapper functions appear immediately after the function they
6116 wrap). */
6117
6118 /* This is a pseudo-op of the form "adr rd, label" to be converted
6119 into a relative address of the form "add rd, pc, #label-.-8". */
6120
6121 static void
6122 do_adr (void)
6123 {
6124 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6125
6126 /* Frag hacking will turn this into a sub instruction if the offset turns
6127 out to be negative. */
6128 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6129 inst.reloc.pc_rel = 1;
6130 inst.reloc.exp.X_add_number -= 8;
6131 }
6132
6133 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6134 into a relative address of the form:
6135 add rd, pc, #low(label-.-8)"
6136 add rd, rd, #high(label-.-8)" */
6137
6138 static void
6139 do_adrl (void)
6140 {
6141 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6142
6143 /* Frag hacking will turn this into a sub instruction if the offset turns
6144 out to be negative. */
6145 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6146 inst.reloc.pc_rel = 1;
6147 inst.size = INSN_SIZE * 2;
6148 inst.reloc.exp.X_add_number -= 8;
6149 }
6150
6151 static void
6152 do_arit (void)
6153 {
6154 if (!inst.operands[1].present)
6155 inst.operands[1].reg = inst.operands[0].reg;
6156 inst.instruction |= inst.operands[0].reg << 12;
6157 inst.instruction |= inst.operands[1].reg << 16;
6158 encode_arm_shifter_operand (2);
6159 }
6160
6161 static void
6162 do_barrier (void)
6163 {
6164 if (inst.operands[0].present)
6165 {
6166 constraint ((inst.instruction & 0xf0) != 0x40
6167 && inst.operands[0].imm != 0xf,
6168 "bad barrier type");
6169 inst.instruction |= inst.operands[0].imm;
6170 }
6171 else
6172 inst.instruction |= 0xf;
6173 }
6174
6175 static void
6176 do_bfc (void)
6177 {
6178 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6179 constraint (msb > 32, _("bit-field extends past end of register"));
6180 /* The instruction encoding stores the LSB and MSB,
6181 not the LSB and width. */
6182 inst.instruction |= inst.operands[0].reg << 12;
6183 inst.instruction |= inst.operands[1].imm << 7;
6184 inst.instruction |= (msb - 1) << 16;
6185 }
6186
6187 static void
6188 do_bfi (void)
6189 {
6190 unsigned int msb;
6191
6192 /* #0 in second position is alternative syntax for bfc, which is
6193 the same instruction but with REG_PC in the Rm field. */
6194 if (!inst.operands[1].isreg)
6195 inst.operands[1].reg = REG_PC;
6196
6197 msb = inst.operands[2].imm + inst.operands[3].imm;
6198 constraint (msb > 32, _("bit-field extends past end of register"));
6199 /* The instruction encoding stores the LSB and MSB,
6200 not the LSB and width. */
6201 inst.instruction |= inst.operands[0].reg << 12;
6202 inst.instruction |= inst.operands[1].reg;
6203 inst.instruction |= inst.operands[2].imm << 7;
6204 inst.instruction |= (msb - 1) << 16;
6205 }
6206
6207 static void
6208 do_bfx (void)
6209 {
6210 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6211 _("bit-field extends past end of register"));
6212 inst.instruction |= inst.operands[0].reg << 12;
6213 inst.instruction |= inst.operands[1].reg;
6214 inst.instruction |= inst.operands[2].imm << 7;
6215 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6216 }
6217
6218 /* ARM V5 breakpoint instruction (argument parse)
6219 BKPT <16 bit unsigned immediate>
6220 Instruction is not conditional.
6221 The bit pattern given in insns[] has the COND_ALWAYS condition,
6222 and it is an error if the caller tried to override that. */
6223
6224 static void
6225 do_bkpt (void)
6226 {
6227 /* Top 12 of 16 bits to bits 19:8. */
6228 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6229
6230 /* Bottom 4 of 16 bits to bits 3:0. */
6231 inst.instruction |= inst.operands[0].imm & 0xf;
6232 }
6233
6234 static void
6235 encode_branch (int default_reloc)
6236 {
6237 if (inst.operands[0].hasreloc)
6238 {
6239 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6240 _("the only suffix valid here is '(plt)'"));
6241 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6242 }
6243 else
6244 {
6245 inst.reloc.type = default_reloc;
6246 }
6247 inst.reloc.pc_rel = 1;
6248 }
6249
6250 static void
6251 do_branch (void)
6252 {
6253 #ifdef OBJ_ELF
6254 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6255 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6256 else
6257 #endif
6258 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6259 }
6260
6261 static void
6262 do_bl (void)
6263 {
6264 #ifdef OBJ_ELF
6265 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6266 {
6267 if (inst.cond == COND_ALWAYS)
6268 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6269 else
6270 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6271 }
6272 else
6273 #endif
6274 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6275 }
6276
6277 /* ARM V5 branch-link-exchange instruction (argument parse)
6278 BLX <target_addr> ie BLX(1)
6279 BLX{<condition>} <Rm> ie BLX(2)
6280 Unfortunately, there are two different opcodes for this mnemonic.
6281 So, the insns[].value is not used, and the code here zaps values
6282 into inst.instruction.
6283 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6284
6285 static void
6286 do_blx (void)
6287 {
6288 if (inst.operands[0].isreg)
6289 {
6290 /* Arg is a register; the opcode provided by insns[] is correct.
6291 It is not illegal to do "blx pc", just useless. */
6292 if (inst.operands[0].reg == REG_PC)
6293 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6294
6295 inst.instruction |= inst.operands[0].reg;
6296 }
6297 else
6298 {
6299 /* Arg is an address; this instruction cannot be executed
6300 conditionally, and the opcode must be adjusted. */
6301 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6302 inst.instruction = 0xfa000000;
6303 #ifdef OBJ_ELF
6304 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6305 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6306 else
6307 #endif
6308 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6309 }
6310 }
6311
6312 static void
6313 do_bx (void)
6314 {
6315 if (inst.operands[0].reg == REG_PC)
6316 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6317
6318 inst.instruction |= inst.operands[0].reg;
6319 }
6320
6321
6322 /* ARM v5TEJ. Jump to Jazelle code. */
6323
6324 static void
6325 do_bxj (void)
6326 {
6327 if (inst.operands[0].reg == REG_PC)
6328 as_tsktsk (_("use of r15 in bxj is not really useful"));
6329
6330 inst.instruction |= inst.operands[0].reg;
6331 }
6332
6333 /* Co-processor data operation:
6334 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6335 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6336 static void
6337 do_cdp (void)
6338 {
6339 inst.instruction |= inst.operands[0].reg << 8;
6340 inst.instruction |= inst.operands[1].imm << 20;
6341 inst.instruction |= inst.operands[2].reg << 12;
6342 inst.instruction |= inst.operands[3].reg << 16;
6343 inst.instruction |= inst.operands[4].reg;
6344 inst.instruction |= inst.operands[5].imm << 5;
6345 }
6346
6347 static void
6348 do_cmp (void)
6349 {
6350 inst.instruction |= inst.operands[0].reg << 16;
6351 encode_arm_shifter_operand (1);
6352 }
6353
6354 /* Transfer between coprocessor and ARM registers.
6355 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6356 MRC2
6357 MCR{cond}
6358 MCR2
6359
6360 No special properties. */
6361
6362 static void
6363 do_co_reg (void)
6364 {
6365 inst.instruction |= inst.operands[0].reg << 8;
6366 inst.instruction |= inst.operands[1].imm << 21;
6367 inst.instruction |= inst.operands[2].reg << 12;
6368 inst.instruction |= inst.operands[3].reg << 16;
6369 inst.instruction |= inst.operands[4].reg;
6370 inst.instruction |= inst.operands[5].imm << 5;
6371 }
6372
6373 /* Transfer between coprocessor register and pair of ARM registers.
6374 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6375 MCRR2
6376 MRRC{cond}
6377 MRRC2
6378
6379 Two XScale instructions are special cases of these:
6380
6381 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6382 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6383
6384 Result unpredicatable if Rd or Rn is R15. */
6385
6386 static void
6387 do_co_reg2c (void)
6388 {
6389 inst.instruction |= inst.operands[0].reg << 8;
6390 inst.instruction |= inst.operands[1].imm << 4;
6391 inst.instruction |= inst.operands[2].reg << 12;
6392 inst.instruction |= inst.operands[3].reg << 16;
6393 inst.instruction |= inst.operands[4].reg;
6394 }
6395
6396 static void
6397 do_cpsi (void)
6398 {
6399 inst.instruction |= inst.operands[0].imm << 6;
6400 inst.instruction |= inst.operands[1].imm;
6401 }
6402
6403 static void
6404 do_dbg (void)
6405 {
6406 inst.instruction |= inst.operands[0].imm;
6407 }
6408
6409 static void
6410 do_it (void)
6411 {
6412 /* There is no IT instruction in ARM mode. We
6413 process it but do not generate code for it. */
6414 inst.size = 0;
6415 }
6416
6417 static void
6418 do_ldmstm (void)
6419 {
6420 int base_reg = inst.operands[0].reg;
6421 int range = inst.operands[1].imm;
6422
6423 inst.instruction |= base_reg << 16;
6424 inst.instruction |= range;
6425
6426 if (inst.operands[1].writeback)
6427 inst.instruction |= LDM_TYPE_2_OR_3;
6428
6429 if (inst.operands[0].writeback)
6430 {
6431 inst.instruction |= WRITE_BACK;
6432 /* Check for unpredictable uses of writeback. */
6433 if (inst.instruction & LOAD_BIT)
6434 {
6435 /* Not allowed in LDM type 2. */
6436 if ((inst.instruction & LDM_TYPE_2_OR_3)
6437 && ((range & (1 << REG_PC)) == 0))
6438 as_warn (_("writeback of base register is UNPREDICTABLE"));
6439 /* Only allowed if base reg not in list for other types. */
6440 else if (range & (1 << base_reg))
6441 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6442 }
6443 else /* STM. */
6444 {
6445 /* Not allowed for type 2. */
6446 if (inst.instruction & LDM_TYPE_2_OR_3)
6447 as_warn (_("writeback of base register is UNPREDICTABLE"));
6448 /* Only allowed if base reg not in list, or first in list. */
6449 else if ((range & (1 << base_reg))
6450 && (range & ((1 << base_reg) - 1)))
6451 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6452 }
6453 }
6454 }
6455
6456 /* ARMv5TE load-consecutive (argument parse)
6457 Mode is like LDRH.
6458
6459 LDRccD R, mode
6460 STRccD R, mode. */
6461
6462 static void
6463 do_ldrd (void)
6464 {
6465 constraint (inst.operands[0].reg % 2 != 0,
6466 _("first destination register must be even"));
6467 constraint (inst.operands[1].present
6468 && inst.operands[1].reg != inst.operands[0].reg + 1,
6469 _("can only load two consecutive registers"));
6470 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6471 constraint (!inst.operands[2].isreg, _("'[' expected"));
6472
6473 if (!inst.operands[1].present)
6474 inst.operands[1].reg = inst.operands[0].reg + 1;
6475
6476 if (inst.instruction & LOAD_BIT)
6477 {
6478 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6479 register and the first register written; we have to diagnose
6480 overlap between the base and the second register written here. */
6481
6482 if (inst.operands[2].reg == inst.operands[1].reg
6483 && (inst.operands[2].writeback || inst.operands[2].postind))
6484 as_warn (_("base register written back, and overlaps "
6485 "second destination register"));
6486
6487 /* For an index-register load, the index register must not overlap the
6488 destination (even if not write-back). */
6489 else if (inst.operands[2].immisreg
6490 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6491 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6492 as_warn (_("index register overlaps destination register"));
6493 }
6494
6495 inst.instruction |= inst.operands[0].reg << 12;
6496 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6497 }
6498
6499 static void
6500 do_ldrex (void)
6501 {
6502 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6503 || inst.operands[1].postind || inst.operands[1].writeback
6504 || inst.operands[1].immisreg || inst.operands[1].shifted
6505 || inst.operands[1].negative
6506 /* This can arise if the programmer has written
6507 strex rN, rM, foo
6508 or if they have mistakenly used a register name as the last
6509 operand, eg:
6510 strex rN, rM, rX
6511 It is very difficult to distinguish between these two cases
6512 because "rX" might actually be a label. ie the register
6513 name has been occluded by a symbol of the same name. So we
6514 just generate a general 'bad addressing mode' type error
6515 message and leave it up to the programmer to discover the
6516 true cause and fix their mistake. */
6517 || (inst.operands[1].reg == REG_PC),
6518 BAD_ADDR_MODE);
6519
6520 constraint (inst.reloc.exp.X_op != O_constant
6521 || inst.reloc.exp.X_add_number != 0,
6522 _("offset must be zero in ARM encoding"));
6523
6524 inst.instruction |= inst.operands[0].reg << 12;
6525 inst.instruction |= inst.operands[1].reg << 16;
6526 inst.reloc.type = BFD_RELOC_UNUSED;
6527 }
6528
6529 static void
6530 do_ldrexd (void)
6531 {
6532 constraint (inst.operands[0].reg % 2 != 0,
6533 _("even register required"));
6534 constraint (inst.operands[1].present
6535 && inst.operands[1].reg != inst.operands[0].reg + 1,
6536 _("can only load two consecutive registers"));
6537 /* If op 1 were present and equal to PC, this function wouldn't
6538 have been called in the first place. */
6539 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6540
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 inst.instruction |= inst.operands[2].reg << 16;
6543 }
6544
6545 static void
6546 do_ldst (void)
6547 {
6548 inst.instruction |= inst.operands[0].reg << 12;
6549 if (!inst.operands[1].isreg)
6550 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6551 return;
6552 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6553 }
6554
6555 static void
6556 do_ldstt (void)
6557 {
6558 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6559 reject [Rn,...]. */
6560 if (inst.operands[1].preind)
6561 {
6562 constraint (inst.reloc.exp.X_op != O_constant ||
6563 inst.reloc.exp.X_add_number != 0,
6564 _("this instruction requires a post-indexed address"));
6565
6566 inst.operands[1].preind = 0;
6567 inst.operands[1].postind = 1;
6568 inst.operands[1].writeback = 1;
6569 }
6570 inst.instruction |= inst.operands[0].reg << 12;
6571 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6572 }
6573
6574 /* Halfword and signed-byte load/store operations. */
6575
6576 static void
6577 do_ldstv4 (void)
6578 {
6579 inst.instruction |= inst.operands[0].reg << 12;
6580 if (!inst.operands[1].isreg)
6581 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6582 return;
6583 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6584 }
6585
6586 static void
6587 do_ldsttv4 (void)
6588 {
6589 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6590 reject [Rn,...]. */
6591 if (inst.operands[1].preind)
6592 {
6593 constraint (inst.reloc.exp.X_op != O_constant ||
6594 inst.reloc.exp.X_add_number != 0,
6595 _("this instruction requires a post-indexed address"));
6596
6597 inst.operands[1].preind = 0;
6598 inst.operands[1].postind = 1;
6599 inst.operands[1].writeback = 1;
6600 }
6601 inst.instruction |= inst.operands[0].reg << 12;
6602 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6603 }
6604
6605 /* Co-processor register load/store.
6606 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6607 static void
6608 do_lstc (void)
6609 {
6610 inst.instruction |= inst.operands[0].reg << 8;
6611 inst.instruction |= inst.operands[1].reg << 12;
6612 encode_arm_cp_address (2, TRUE, TRUE, 0);
6613 }
6614
6615 static void
6616 do_mlas (void)
6617 {
6618 /* This restriction does not apply to mls (nor to mla in v6, but
6619 that's hard to detect at present). */
6620 if (inst.operands[0].reg == inst.operands[1].reg
6621 && !(inst.instruction & 0x00400000))
6622 as_tsktsk (_("rd and rm should be different in mla"));
6623
6624 inst.instruction |= inst.operands[0].reg << 16;
6625 inst.instruction |= inst.operands[1].reg;
6626 inst.instruction |= inst.operands[2].reg << 8;
6627 inst.instruction |= inst.operands[3].reg << 12;
6628
6629 }
6630
6631 static void
6632 do_mov (void)
6633 {
6634 inst.instruction |= inst.operands[0].reg << 12;
6635 encode_arm_shifter_operand (1);
6636 }
6637
6638 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6639 static void
6640 do_mov16 (void)
6641 {
6642 bfd_vma imm;
6643 bfd_boolean top;
6644
6645 top = (inst.instruction & 0x00400000) != 0;
6646 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
6647 _(":lower16: not allowed this instruction"));
6648 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
6649 _(":upper16: not allowed instruction"));
6650 inst.instruction |= inst.operands[0].reg << 12;
6651 if (inst.reloc.type == BFD_RELOC_UNUSED)
6652 {
6653 imm = inst.reloc.exp.X_add_number;
6654 /* The value is in two pieces: 0:11, 16:19. */
6655 inst.instruction |= (imm & 0x00000fff);
6656 inst.instruction |= (imm & 0x0000f000) << 4;
6657 }
6658 }
6659
6660 static void do_vfp_nsyn_opcode (const char *);
6661
6662 static int
6663 do_vfp_nsyn_mrs (void)
6664 {
6665 if (inst.operands[0].isvec)
6666 {
6667 if (inst.operands[1].reg != 1)
6668 first_error (_("operand 1 must be FPSCR"));
6669 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
6670 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
6671 do_vfp_nsyn_opcode ("fmstat");
6672 }
6673 else if (inst.operands[1].isvec)
6674 do_vfp_nsyn_opcode ("fmrx");
6675 else
6676 return FAIL;
6677
6678 return SUCCESS;
6679 }
6680
6681 static int
6682 do_vfp_nsyn_msr (void)
6683 {
6684 if (inst.operands[0].isvec)
6685 do_vfp_nsyn_opcode ("fmxr");
6686 else
6687 return FAIL;
6688
6689 return SUCCESS;
6690 }
6691
6692 static void
6693 do_mrs (void)
6694 {
6695 if (do_vfp_nsyn_mrs () == SUCCESS)
6696 return;
6697
6698 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6699 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6700 != (PSR_c|PSR_f),
6701 _("'CPSR' or 'SPSR' expected"));
6702 inst.instruction |= inst.operands[0].reg << 12;
6703 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6704 }
6705
6706 /* Two possible forms:
6707 "{C|S}PSR_<field>, Rm",
6708 "{C|S}PSR_f, #expression". */
6709
6710 static void
6711 do_msr (void)
6712 {
6713 if (do_vfp_nsyn_msr () == SUCCESS)
6714 return;
6715
6716 inst.instruction |= inst.operands[0].imm;
6717 if (inst.operands[1].isreg)
6718 inst.instruction |= inst.operands[1].reg;
6719 else
6720 {
6721 inst.instruction |= INST_IMMEDIATE;
6722 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6723 inst.reloc.pc_rel = 0;
6724 }
6725 }
6726
6727 static void
6728 do_mul (void)
6729 {
6730 if (!inst.operands[2].present)
6731 inst.operands[2].reg = inst.operands[0].reg;
6732 inst.instruction |= inst.operands[0].reg << 16;
6733 inst.instruction |= inst.operands[1].reg;
6734 inst.instruction |= inst.operands[2].reg << 8;
6735
6736 if (inst.operands[0].reg == inst.operands[1].reg)
6737 as_tsktsk (_("rd and rm should be different in mul"));
6738 }
6739
6740 /* Long Multiply Parser
6741 UMULL RdLo, RdHi, Rm, Rs
6742 SMULL RdLo, RdHi, Rm, Rs
6743 UMLAL RdLo, RdHi, Rm, Rs
6744 SMLAL RdLo, RdHi, Rm, Rs. */
6745
6746 static void
6747 do_mull (void)
6748 {
6749 inst.instruction |= inst.operands[0].reg << 12;
6750 inst.instruction |= inst.operands[1].reg << 16;
6751 inst.instruction |= inst.operands[2].reg;
6752 inst.instruction |= inst.operands[3].reg << 8;
6753
6754 /* rdhi, rdlo and rm must all be different. */
6755 if (inst.operands[0].reg == inst.operands[1].reg
6756 || inst.operands[0].reg == inst.operands[2].reg
6757 || inst.operands[1].reg == inst.operands[2].reg)
6758 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6759 }
6760
6761 static void
6762 do_nop (void)
6763 {
6764 if (inst.operands[0].present)
6765 {
6766 /* Architectural NOP hints are CPSR sets with no bits selected. */
6767 inst.instruction &= 0xf0000000;
6768 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6769 }
6770 }
6771
6772 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6773 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6774 Condition defaults to COND_ALWAYS.
6775 Error if Rd, Rn or Rm are R15. */
6776
6777 static void
6778 do_pkhbt (void)
6779 {
6780 inst.instruction |= inst.operands[0].reg << 12;
6781 inst.instruction |= inst.operands[1].reg << 16;
6782 inst.instruction |= inst.operands[2].reg;
6783 if (inst.operands[3].present)
6784 encode_arm_shift (3);
6785 }
6786
6787 /* ARM V6 PKHTB (Argument Parse). */
6788
6789 static void
6790 do_pkhtb (void)
6791 {
6792 if (!inst.operands[3].present)
6793 {
6794 /* If the shift specifier is omitted, turn the instruction
6795 into pkhbt rd, rm, rn. */
6796 inst.instruction &= 0xfff00010;
6797 inst.instruction |= inst.operands[0].reg << 12;
6798 inst.instruction |= inst.operands[1].reg;
6799 inst.instruction |= inst.operands[2].reg << 16;
6800 }
6801 else
6802 {
6803 inst.instruction |= inst.operands[0].reg << 12;
6804 inst.instruction |= inst.operands[1].reg << 16;
6805 inst.instruction |= inst.operands[2].reg;
6806 encode_arm_shift (3);
6807 }
6808 }
6809
6810 /* ARMv5TE: Preload-Cache
6811
6812 PLD <addr_mode>
6813
6814 Syntactically, like LDR with B=1, W=0, L=1. */
6815
6816 static void
6817 do_pld (void)
6818 {
6819 constraint (!inst.operands[0].isreg,
6820 _("'[' expected after PLD mnemonic"));
6821 constraint (inst.operands[0].postind,
6822 _("post-indexed expression used in preload instruction"));
6823 constraint (inst.operands[0].writeback,
6824 _("writeback used in preload instruction"));
6825 constraint (!inst.operands[0].preind,
6826 _("unindexed addressing used in preload instruction"));
6827 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6828 }
6829
6830 /* ARMv7: PLI <addr_mode> */
6831 static void
6832 do_pli (void)
6833 {
6834 constraint (!inst.operands[0].isreg,
6835 _("'[' expected after PLI mnemonic"));
6836 constraint (inst.operands[0].postind,
6837 _("post-indexed expression used in preload instruction"));
6838 constraint (inst.operands[0].writeback,
6839 _("writeback used in preload instruction"));
6840 constraint (!inst.operands[0].preind,
6841 _("unindexed addressing used in preload instruction"));
6842 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6843 inst.instruction &= ~PRE_INDEX;
6844 }
6845
6846 static void
6847 do_push_pop (void)
6848 {
6849 inst.operands[1] = inst.operands[0];
6850 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6851 inst.operands[0].isreg = 1;
6852 inst.operands[0].writeback = 1;
6853 inst.operands[0].reg = REG_SP;
6854 do_ldmstm ();
6855 }
6856
6857 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6858 word at the specified address and the following word
6859 respectively.
6860 Unconditionally executed.
6861 Error if Rn is R15. */
6862
6863 static void
6864 do_rfe (void)
6865 {
6866 inst.instruction |= inst.operands[0].reg << 16;
6867 if (inst.operands[0].writeback)
6868 inst.instruction |= WRITE_BACK;
6869 }
6870
6871 /* ARM V6 ssat (argument parse). */
6872
6873 static void
6874 do_ssat (void)
6875 {
6876 inst.instruction |= inst.operands[0].reg << 12;
6877 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6878 inst.instruction |= inst.operands[2].reg;
6879
6880 if (inst.operands[3].present)
6881 encode_arm_shift (3);
6882 }
6883
6884 /* ARM V6 usat (argument parse). */
6885
6886 static void
6887 do_usat (void)
6888 {
6889 inst.instruction |= inst.operands[0].reg << 12;
6890 inst.instruction |= inst.operands[1].imm << 16;
6891 inst.instruction |= inst.operands[2].reg;
6892
6893 if (inst.operands[3].present)
6894 encode_arm_shift (3);
6895 }
6896
6897 /* ARM V6 ssat16 (argument parse). */
6898
6899 static void
6900 do_ssat16 (void)
6901 {
6902 inst.instruction |= inst.operands[0].reg << 12;
6903 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6904 inst.instruction |= inst.operands[2].reg;
6905 }
6906
6907 static void
6908 do_usat16 (void)
6909 {
6910 inst.instruction |= inst.operands[0].reg << 12;
6911 inst.instruction |= inst.operands[1].imm << 16;
6912 inst.instruction |= inst.operands[2].reg;
6913 }
6914
6915 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6916 preserving the other bits.
6917
6918 setend <endian_specifier>, where <endian_specifier> is either
6919 BE or LE. */
6920
6921 static void
6922 do_setend (void)
6923 {
6924 if (inst.operands[0].imm)
6925 inst.instruction |= 0x200;
6926 }
6927
6928 static void
6929 do_shift (void)
6930 {
6931 unsigned int Rm = (inst.operands[1].present
6932 ? inst.operands[1].reg
6933 : inst.operands[0].reg);
6934
6935 inst.instruction |= inst.operands[0].reg << 12;
6936 inst.instruction |= Rm;
6937 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6938 {
6939 inst.instruction |= inst.operands[2].reg << 8;
6940 inst.instruction |= SHIFT_BY_REG;
6941 }
6942 else
6943 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6944 }
6945
6946 static void
6947 do_smc (void)
6948 {
6949 inst.reloc.type = BFD_RELOC_ARM_SMC;
6950 inst.reloc.pc_rel = 0;
6951 }
6952
6953 static void
6954 do_swi (void)
6955 {
6956 inst.reloc.type = BFD_RELOC_ARM_SWI;
6957 inst.reloc.pc_rel = 0;
6958 }
6959
6960 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6961 SMLAxy{cond} Rd,Rm,Rs,Rn
6962 SMLAWy{cond} Rd,Rm,Rs,Rn
6963 Error if any register is R15. */
6964
6965 static void
6966 do_smla (void)
6967 {
6968 inst.instruction |= inst.operands[0].reg << 16;
6969 inst.instruction |= inst.operands[1].reg;
6970 inst.instruction |= inst.operands[2].reg << 8;
6971 inst.instruction |= inst.operands[3].reg << 12;
6972 }
6973
6974 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6975 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6976 Error if any register is R15.
6977 Warning if Rdlo == Rdhi. */
6978
6979 static void
6980 do_smlal (void)
6981 {
6982 inst.instruction |= inst.operands[0].reg << 12;
6983 inst.instruction |= inst.operands[1].reg << 16;
6984 inst.instruction |= inst.operands[2].reg;
6985 inst.instruction |= inst.operands[3].reg << 8;
6986
6987 if (inst.operands[0].reg == inst.operands[1].reg)
6988 as_tsktsk (_("rdhi and rdlo must be different"));
6989 }
6990
6991 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6992 SMULxy{cond} Rd,Rm,Rs
6993 Error if any register is R15. */
6994
6995 static void
6996 do_smul (void)
6997 {
6998 inst.instruction |= inst.operands[0].reg << 16;
6999 inst.instruction |= inst.operands[1].reg;
7000 inst.instruction |= inst.operands[2].reg << 8;
7001 }
7002
7003 /* ARM V6 srs (argument parse). */
7004
7005 static void
7006 do_srs (void)
7007 {
7008 inst.instruction |= inst.operands[0].imm;
7009 if (inst.operands[0].writeback)
7010 inst.instruction |= WRITE_BACK;
7011 }
7012
7013 /* ARM V6 strex (argument parse). */
7014
7015 static void
7016 do_strex (void)
7017 {
7018 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7019 || inst.operands[2].postind || inst.operands[2].writeback
7020 || inst.operands[2].immisreg || inst.operands[2].shifted
7021 || inst.operands[2].negative
7022 /* See comment in do_ldrex(). */
7023 || (inst.operands[2].reg == REG_PC),
7024 BAD_ADDR_MODE);
7025
7026 constraint (inst.operands[0].reg == inst.operands[1].reg
7027 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7028
7029 constraint (inst.reloc.exp.X_op != O_constant
7030 || inst.reloc.exp.X_add_number != 0,
7031 _("offset must be zero in ARM encoding"));
7032
7033 inst.instruction |= inst.operands[0].reg << 12;
7034 inst.instruction |= inst.operands[1].reg;
7035 inst.instruction |= inst.operands[2].reg << 16;
7036 inst.reloc.type = BFD_RELOC_UNUSED;
7037 }
7038
7039 static void
7040 do_strexd (void)
7041 {
7042 constraint (inst.operands[1].reg % 2 != 0,
7043 _("even register required"));
7044 constraint (inst.operands[2].present
7045 && inst.operands[2].reg != inst.operands[1].reg + 1,
7046 _("can only store two consecutive registers"));
7047 /* If op 2 were present and equal to PC, this function wouldn't
7048 have been called in the first place. */
7049 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7050
7051 constraint (inst.operands[0].reg == inst.operands[1].reg
7052 || inst.operands[0].reg == inst.operands[1].reg + 1
7053 || inst.operands[0].reg == inst.operands[3].reg,
7054 BAD_OVERLAP);
7055
7056 inst.instruction |= inst.operands[0].reg << 12;
7057 inst.instruction |= inst.operands[1].reg;
7058 inst.instruction |= inst.operands[3].reg << 16;
7059 }
7060
7061 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7062 extends it to 32-bits, and adds the result to a value in another
7063 register. You can specify a rotation by 0, 8, 16, or 24 bits
7064 before extracting the 16-bit value.
7065 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7066 Condition defaults to COND_ALWAYS.
7067 Error if any register uses R15. */
7068
7069 static void
7070 do_sxtah (void)
7071 {
7072 inst.instruction |= inst.operands[0].reg << 12;
7073 inst.instruction |= inst.operands[1].reg << 16;
7074 inst.instruction |= inst.operands[2].reg;
7075 inst.instruction |= inst.operands[3].imm << 10;
7076 }
7077
7078 /* ARM V6 SXTH.
7079
7080 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7081 Condition defaults to COND_ALWAYS.
7082 Error if any register uses R15. */
7083
7084 static void
7085 do_sxth (void)
7086 {
7087 inst.instruction |= inst.operands[0].reg << 12;
7088 inst.instruction |= inst.operands[1].reg;
7089 inst.instruction |= inst.operands[2].imm << 10;
7090 }
7091 \f
7092 /* VFP instructions. In a logical order: SP variant first, monad
7093 before dyad, arithmetic then move then load/store. */
7094
7095 static void
7096 do_vfp_sp_monadic (void)
7097 {
7098 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7099 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7100 }
7101
7102 static void
7103 do_vfp_sp_dyadic (void)
7104 {
7105 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7106 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7107 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7108 }
7109
7110 static void
7111 do_vfp_sp_compare_z (void)
7112 {
7113 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7114 }
7115
7116 static void
7117 do_vfp_dp_sp_cvt (void)
7118 {
7119 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7120 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7121 }
7122
7123 static void
7124 do_vfp_sp_dp_cvt (void)
7125 {
7126 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7127 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7128 }
7129
7130 static void
7131 do_vfp_reg_from_sp (void)
7132 {
7133 inst.instruction |= inst.operands[0].reg << 12;
7134 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7135 }
7136
7137 static void
7138 do_vfp_reg2_from_sp2 (void)
7139 {
7140 constraint (inst.operands[2].imm != 2,
7141 _("only two consecutive VFP SP registers allowed here"));
7142 inst.instruction |= inst.operands[0].reg << 12;
7143 inst.instruction |= inst.operands[1].reg << 16;
7144 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7145 }
7146
7147 static void
7148 do_vfp_sp_from_reg (void)
7149 {
7150 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7151 inst.instruction |= inst.operands[1].reg << 12;
7152 }
7153
7154 static void
7155 do_vfp_sp2_from_reg2 (void)
7156 {
7157 constraint (inst.operands[0].imm != 2,
7158 _("only two consecutive VFP SP registers allowed here"));
7159 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7160 inst.instruction |= inst.operands[1].reg << 12;
7161 inst.instruction |= inst.operands[2].reg << 16;
7162 }
7163
7164 static void
7165 do_vfp_sp_ldst (void)
7166 {
7167 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7168 encode_arm_cp_address (1, FALSE, TRUE, 0);
7169 }
7170
7171 static void
7172 do_vfp_dp_ldst (void)
7173 {
7174 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7175 encode_arm_cp_address (1, FALSE, TRUE, 0);
7176 }
7177
7178
7179 static void
7180 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7181 {
7182 if (inst.operands[0].writeback)
7183 inst.instruction |= WRITE_BACK;
7184 else
7185 constraint (ldstm_type != VFP_LDSTMIA,
7186 _("this addressing mode requires base-register writeback"));
7187 inst.instruction |= inst.operands[0].reg << 16;
7188 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7189 inst.instruction |= inst.operands[1].imm;
7190 }
7191
7192 static void
7193 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7194 {
7195 int count;
7196
7197 if (inst.operands[0].writeback)
7198 inst.instruction |= WRITE_BACK;
7199 else
7200 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7201 _("this addressing mode requires base-register writeback"));
7202
7203 inst.instruction |= inst.operands[0].reg << 16;
7204 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7205
7206 count = inst.operands[1].imm << 1;
7207 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7208 count += 1;
7209
7210 inst.instruction |= count;
7211 }
7212
7213 static void
7214 do_vfp_sp_ldstmia (void)
7215 {
7216 vfp_sp_ldstm (VFP_LDSTMIA);
7217 }
7218
7219 static void
7220 do_vfp_sp_ldstmdb (void)
7221 {
7222 vfp_sp_ldstm (VFP_LDSTMDB);
7223 }
7224
7225 static void
7226 do_vfp_dp_ldstmia (void)
7227 {
7228 vfp_dp_ldstm (VFP_LDSTMIA);
7229 }
7230
7231 static void
7232 do_vfp_dp_ldstmdb (void)
7233 {
7234 vfp_dp_ldstm (VFP_LDSTMDB);
7235 }
7236
7237 static void
7238 do_vfp_xp_ldstmia (void)
7239 {
7240 vfp_dp_ldstm (VFP_LDSTMIAX);
7241 }
7242
7243 static void
7244 do_vfp_xp_ldstmdb (void)
7245 {
7246 vfp_dp_ldstm (VFP_LDSTMDBX);
7247 }
7248
7249 static void
7250 do_vfp_dp_rd_rm (void)
7251 {
7252 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7253 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7254 }
7255
7256 static void
7257 do_vfp_dp_rn_rd (void)
7258 {
7259 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7260 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7261 }
7262
7263 static void
7264 do_vfp_dp_rd_rn (void)
7265 {
7266 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7267 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7268 }
7269
7270 static void
7271 do_vfp_dp_rd_rn_rm (void)
7272 {
7273 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7274 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7275 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7276 }
7277
7278 static void
7279 do_vfp_dp_rd (void)
7280 {
7281 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7282 }
7283
7284 static void
7285 do_vfp_dp_rm_rd_rn (void)
7286 {
7287 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7288 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7289 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7290 }
7291
7292 /* VFPv3 instructions. */
7293 static void
7294 do_vfp_sp_const (void)
7295 {
7296 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7297 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7298 inst.instruction |= (inst.operands[1].imm >> 4);
7299 }
7300
7301 static void
7302 do_vfp_dp_const (void)
7303 {
7304 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7305 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7306 inst.instruction |= (inst.operands[1].imm >> 4);
7307 }
7308
7309 static void
7310 vfp_conv (int srcsize)
7311 {
7312 unsigned immbits = srcsize - inst.operands[1].imm;
7313 inst.instruction |= (immbits & 1) << 5;
7314 inst.instruction |= (immbits >> 1);
7315 }
7316
7317 static void
7318 do_vfp_sp_conv_16 (void)
7319 {
7320 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7321 vfp_conv (16);
7322 }
7323
7324 static void
7325 do_vfp_dp_conv_16 (void)
7326 {
7327 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7328 vfp_conv (16);
7329 }
7330
7331 static void
7332 do_vfp_sp_conv_32 (void)
7333 {
7334 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7335 vfp_conv (32);
7336 }
7337
7338 static void
7339 do_vfp_dp_conv_32 (void)
7340 {
7341 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7342 vfp_conv (32);
7343 }
7344
7345 \f
7346 /* FPA instructions. Also in a logical order. */
7347
7348 static void
7349 do_fpa_cmp (void)
7350 {
7351 inst.instruction |= inst.operands[0].reg << 16;
7352 inst.instruction |= inst.operands[1].reg;
7353 }
7354
7355 static void
7356 do_fpa_ldmstm (void)
7357 {
7358 inst.instruction |= inst.operands[0].reg << 12;
7359 switch (inst.operands[1].imm)
7360 {
7361 case 1: inst.instruction |= CP_T_X; break;
7362 case 2: inst.instruction |= CP_T_Y; break;
7363 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7364 case 4: break;
7365 default: abort ();
7366 }
7367
7368 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7369 {
7370 /* The instruction specified "ea" or "fd", so we can only accept
7371 [Rn]{!}. The instruction does not really support stacking or
7372 unstacking, so we have to emulate these by setting appropriate
7373 bits and offsets. */
7374 constraint (inst.reloc.exp.X_op != O_constant
7375 || inst.reloc.exp.X_add_number != 0,
7376 _("this instruction does not support indexing"));
7377
7378 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7379 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7380
7381 if (!(inst.instruction & INDEX_UP))
7382 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7383
7384 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7385 {
7386 inst.operands[2].preind = 0;
7387 inst.operands[2].postind = 1;
7388 }
7389 }
7390
7391 encode_arm_cp_address (2, TRUE, TRUE, 0);
7392 }
7393
7394 \f
7395 /* iWMMXt instructions: strictly in alphabetical order. */
7396
7397 static void
7398 do_iwmmxt_tandorc (void)
7399 {
7400 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7401 }
7402
7403 static void
7404 do_iwmmxt_textrc (void)
7405 {
7406 inst.instruction |= inst.operands[0].reg << 12;
7407 inst.instruction |= inst.operands[1].imm;
7408 }
7409
7410 static void
7411 do_iwmmxt_textrm (void)
7412 {
7413 inst.instruction |= inst.operands[0].reg << 12;
7414 inst.instruction |= inst.operands[1].reg << 16;
7415 inst.instruction |= inst.operands[2].imm;
7416 }
7417
7418 static void
7419 do_iwmmxt_tinsr (void)
7420 {
7421 inst.instruction |= inst.operands[0].reg << 16;
7422 inst.instruction |= inst.operands[1].reg << 12;
7423 inst.instruction |= inst.operands[2].imm;
7424 }
7425
7426 static void
7427 do_iwmmxt_tmia (void)
7428 {
7429 inst.instruction |= inst.operands[0].reg << 5;
7430 inst.instruction |= inst.operands[1].reg;
7431 inst.instruction |= inst.operands[2].reg << 12;
7432 }
7433
7434 static void
7435 do_iwmmxt_waligni (void)
7436 {
7437 inst.instruction |= inst.operands[0].reg << 12;
7438 inst.instruction |= inst.operands[1].reg << 16;
7439 inst.instruction |= inst.operands[2].reg;
7440 inst.instruction |= inst.operands[3].imm << 20;
7441 }
7442
7443 static void
7444 do_iwmmxt_wmov (void)
7445 {
7446 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 inst.instruction |= inst.operands[1].reg << 16;
7449 inst.instruction |= inst.operands[1].reg;
7450 }
7451
7452 static void
7453 do_iwmmxt_wldstbh (void)
7454 {
7455 int reloc;
7456 inst.instruction |= inst.operands[0].reg << 12;
7457 if (thumb_mode)
7458 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7459 else
7460 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7461 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7462 }
7463
7464 static void
7465 do_iwmmxt_wldstw (void)
7466 {
7467 /* RIWR_RIWC clears .isreg for a control register. */
7468 if (!inst.operands[0].isreg)
7469 {
7470 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7471 inst.instruction |= 0xf0000000;
7472 }
7473
7474 inst.instruction |= inst.operands[0].reg << 12;
7475 encode_arm_cp_address (1, TRUE, TRUE, 0);
7476 }
7477
7478 static void
7479 do_iwmmxt_wldstd (void)
7480 {
7481 inst.instruction |= inst.operands[0].reg << 12;
7482 encode_arm_cp_address (1, TRUE, FALSE, 0);
7483 }
7484
7485 static void
7486 do_iwmmxt_wshufh (void)
7487 {
7488 inst.instruction |= inst.operands[0].reg << 12;
7489 inst.instruction |= inst.operands[1].reg << 16;
7490 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7491 inst.instruction |= (inst.operands[2].imm & 0x0f);
7492 }
7493
7494 static void
7495 do_iwmmxt_wzero (void)
7496 {
7497 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7498 inst.instruction |= inst.operands[0].reg;
7499 inst.instruction |= inst.operands[0].reg << 12;
7500 inst.instruction |= inst.operands[0].reg << 16;
7501 }
7502 \f
7503 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7504 operations first, then control, shift, and load/store. */
7505
7506 /* Insns like "foo X,Y,Z". */
7507
7508 static void
7509 do_mav_triple (void)
7510 {
7511 inst.instruction |= inst.operands[0].reg << 16;
7512 inst.instruction |= inst.operands[1].reg;
7513 inst.instruction |= inst.operands[2].reg << 12;
7514 }
7515
7516 /* Insns like "foo W,X,Y,Z".
7517 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7518
7519 static void
7520 do_mav_quad (void)
7521 {
7522 inst.instruction |= inst.operands[0].reg << 5;
7523 inst.instruction |= inst.operands[1].reg << 12;
7524 inst.instruction |= inst.operands[2].reg << 16;
7525 inst.instruction |= inst.operands[3].reg;
7526 }
7527
7528 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7529 static void
7530 do_mav_dspsc (void)
7531 {
7532 inst.instruction |= inst.operands[1].reg << 12;
7533 }
7534
7535 /* Maverick shift immediate instructions.
7536 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7537 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7538
7539 static void
7540 do_mav_shift (void)
7541 {
7542 int imm = inst.operands[2].imm;
7543
7544 inst.instruction |= inst.operands[0].reg << 12;
7545 inst.instruction |= inst.operands[1].reg << 16;
7546
7547 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7548 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7549 Bit 4 should be 0. */
7550 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7551
7552 inst.instruction |= imm;
7553 }
7554 \f
7555 /* XScale instructions. Also sorted arithmetic before move. */
7556
7557 /* Xscale multiply-accumulate (argument parse)
7558 MIAcc acc0,Rm,Rs
7559 MIAPHcc acc0,Rm,Rs
7560 MIAxycc acc0,Rm,Rs. */
7561
7562 static void
7563 do_xsc_mia (void)
7564 {
7565 inst.instruction |= inst.operands[1].reg;
7566 inst.instruction |= inst.operands[2].reg << 12;
7567 }
7568
7569 /* Xscale move-accumulator-register (argument parse)
7570
7571 MARcc acc0,RdLo,RdHi. */
7572
7573 static void
7574 do_xsc_mar (void)
7575 {
7576 inst.instruction |= inst.operands[1].reg << 12;
7577 inst.instruction |= inst.operands[2].reg << 16;
7578 }
7579
7580 /* Xscale move-register-accumulator (argument parse)
7581
7582 MRAcc RdLo,RdHi,acc0. */
7583
7584 static void
7585 do_xsc_mra (void)
7586 {
7587 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7588 inst.instruction |= inst.operands[0].reg << 12;
7589 inst.instruction |= inst.operands[1].reg << 16;
7590 }
7591 \f
7592 /* Encoding functions relevant only to Thumb. */
7593
7594 /* inst.operands[i] is a shifted-register operand; encode
7595 it into inst.instruction in the format used by Thumb32. */
7596
7597 static void
7598 encode_thumb32_shifted_operand (int i)
7599 {
7600 unsigned int value = inst.reloc.exp.X_add_number;
7601 unsigned int shift = inst.operands[i].shift_kind;
7602
7603 constraint (inst.operands[i].immisreg,
7604 _("shift by register not allowed in thumb mode"));
7605 inst.instruction |= inst.operands[i].reg;
7606 if (shift == SHIFT_RRX)
7607 inst.instruction |= SHIFT_ROR << 4;
7608 else
7609 {
7610 constraint (inst.reloc.exp.X_op != O_constant,
7611 _("expression too complex"));
7612
7613 constraint (value > 32
7614 || (value == 32 && (shift == SHIFT_LSL
7615 || shift == SHIFT_ROR)),
7616 _("shift expression is too large"));
7617
7618 if (value == 0)
7619 shift = SHIFT_LSL;
7620 else if (value == 32)
7621 value = 0;
7622
7623 inst.instruction |= shift << 4;
7624 inst.instruction |= (value & 0x1c) << 10;
7625 inst.instruction |= (value & 0x03) << 6;
7626 }
7627 }
7628
7629
7630 /* inst.operands[i] was set up by parse_address. Encode it into a
7631 Thumb32 format load or store instruction. Reject forms that cannot
7632 be used with such instructions. If is_t is true, reject forms that
7633 cannot be used with a T instruction; if is_d is true, reject forms
7634 that cannot be used with a D instruction. */
7635
7636 static void
7637 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7638 {
7639 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7640
7641 constraint (!inst.operands[i].isreg,
7642 _("Instruction does not support =N addresses"));
7643
7644 inst.instruction |= inst.operands[i].reg << 16;
7645 if (inst.operands[i].immisreg)
7646 {
7647 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7648 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7649 constraint (inst.operands[i].negative,
7650 _("Thumb does not support negative register indexing"));
7651 constraint (inst.operands[i].postind,
7652 _("Thumb does not support register post-indexing"));
7653 constraint (inst.operands[i].writeback,
7654 _("Thumb does not support register indexing with writeback"));
7655 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7656 _("Thumb supports only LSL in shifted register indexing"));
7657
7658 inst.instruction |= inst.operands[i].imm;
7659 if (inst.operands[i].shifted)
7660 {
7661 constraint (inst.reloc.exp.X_op != O_constant,
7662 _("expression too complex"));
7663 constraint (inst.reloc.exp.X_add_number < 0
7664 || inst.reloc.exp.X_add_number > 3,
7665 _("shift out of range"));
7666 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7667 }
7668 inst.reloc.type = BFD_RELOC_UNUSED;
7669 }
7670 else if (inst.operands[i].preind)
7671 {
7672 constraint (is_pc && inst.operands[i].writeback,
7673 _("cannot use writeback with PC-relative addressing"));
7674 constraint (is_t && inst.operands[i].writeback,
7675 _("cannot use writeback with this instruction"));
7676
7677 if (is_d)
7678 {
7679 inst.instruction |= 0x01000000;
7680 if (inst.operands[i].writeback)
7681 inst.instruction |= 0x00200000;
7682 }
7683 else
7684 {
7685 inst.instruction |= 0x00000c00;
7686 if (inst.operands[i].writeback)
7687 inst.instruction |= 0x00000100;
7688 }
7689 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7690 }
7691 else if (inst.operands[i].postind)
7692 {
7693 assert (inst.operands[i].writeback);
7694 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7695 constraint (is_t, _("cannot use post-indexing with this instruction"));
7696
7697 if (is_d)
7698 inst.instruction |= 0x00200000;
7699 else
7700 inst.instruction |= 0x00000900;
7701 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7702 }
7703 else /* unindexed - only for coprocessor */
7704 inst.error = _("instruction does not accept unindexed addressing");
7705 }
7706
7707 /* Table of Thumb instructions which exist in both 16- and 32-bit
7708 encodings (the latter only in post-V6T2 cores). The index is the
7709 value used in the insns table below. When there is more than one
7710 possible 16-bit encoding for the instruction, this table always
7711 holds variant (1).
7712 Also contains several pseudo-instructions used during relaxation. */
7713 #define T16_32_TAB \
7714 X(adc, 4140, eb400000), \
7715 X(adcs, 4140, eb500000), \
7716 X(add, 1c00, eb000000), \
7717 X(adds, 1c00, eb100000), \
7718 X(addi, 0000, f1000000), \
7719 X(addis, 0000, f1100000), \
7720 X(add_pc,000f, f20f0000), \
7721 X(add_sp,000d, f10d0000), \
7722 X(adr, 000f, f20f0000), \
7723 X(and, 4000, ea000000), \
7724 X(ands, 4000, ea100000), \
7725 X(asr, 1000, fa40f000), \
7726 X(asrs, 1000, fa50f000), \
7727 X(b, e000, f000b000), \
7728 X(bcond, d000, f0008000), \
7729 X(bic, 4380, ea200000), \
7730 X(bics, 4380, ea300000), \
7731 X(cmn, 42c0, eb100f00), \
7732 X(cmp, 2800, ebb00f00), \
7733 X(cpsie, b660, f3af8400), \
7734 X(cpsid, b670, f3af8600), \
7735 X(cpy, 4600, ea4f0000), \
7736 X(dec_sp,80dd, f1bd0d00), \
7737 X(eor, 4040, ea800000), \
7738 X(eors, 4040, ea900000), \
7739 X(inc_sp,00dd, f10d0d00), \
7740 X(ldmia, c800, e8900000), \
7741 X(ldr, 6800, f8500000), \
7742 X(ldrb, 7800, f8100000), \
7743 X(ldrh, 8800, f8300000), \
7744 X(ldrsb, 5600, f9100000), \
7745 X(ldrsh, 5e00, f9300000), \
7746 X(ldr_pc,4800, f85f0000), \
7747 X(ldr_pc2,4800, f85f0000), \
7748 X(ldr_sp,9800, f85d0000), \
7749 X(lsl, 0000, fa00f000), \
7750 X(lsls, 0000, fa10f000), \
7751 X(lsr, 0800, fa20f000), \
7752 X(lsrs, 0800, fa30f000), \
7753 X(mov, 2000, ea4f0000), \
7754 X(movs, 2000, ea5f0000), \
7755 X(mul, 4340, fb00f000), \
7756 X(muls, 4340, ffffffff), /* no 32b muls */ \
7757 X(mvn, 43c0, ea6f0000), \
7758 X(mvns, 43c0, ea7f0000), \
7759 X(neg, 4240, f1c00000), /* rsb #0 */ \
7760 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7761 X(orr, 4300, ea400000), \
7762 X(orrs, 4300, ea500000), \
7763 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7764 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7765 X(rev, ba00, fa90f080), \
7766 X(rev16, ba40, fa90f090), \
7767 X(revsh, bac0, fa90f0b0), \
7768 X(ror, 41c0, fa60f000), \
7769 X(rors, 41c0, fa70f000), \
7770 X(sbc, 4180, eb600000), \
7771 X(sbcs, 4180, eb700000), \
7772 X(stmia, c000, e8800000), \
7773 X(str, 6000, f8400000), \
7774 X(strb, 7000, f8000000), \
7775 X(strh, 8000, f8200000), \
7776 X(str_sp,9000, f84d0000), \
7777 X(sub, 1e00, eba00000), \
7778 X(subs, 1e00, ebb00000), \
7779 X(subi, 8000, f1a00000), \
7780 X(subis, 8000, f1b00000), \
7781 X(sxtb, b240, fa4ff080), \
7782 X(sxth, b200, fa0ff080), \
7783 X(tst, 4200, ea100f00), \
7784 X(uxtb, b2c0, fa5ff080), \
7785 X(uxth, b280, fa1ff080), \
7786 X(nop, bf00, f3af8000), \
7787 X(yield, bf10, f3af8001), \
7788 X(wfe, bf20, f3af8002), \
7789 X(wfi, bf30, f3af8003), \
7790 X(sev, bf40, f3af9004), /* typo, 8004? */
7791
7792 /* To catch errors in encoding functions, the codes are all offset by
7793 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7794 as 16-bit instructions. */
7795 #define X(a,b,c) T_MNEM_##a
7796 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7797 #undef X
7798
7799 #define X(a,b,c) 0x##b
7800 static const unsigned short thumb_op16[] = { T16_32_TAB };
7801 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7802 #undef X
7803
7804 #define X(a,b,c) 0x##c
7805 static const unsigned int thumb_op32[] = { T16_32_TAB };
7806 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7807 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7808 #undef X
7809 #undef T16_32_TAB
7810
7811 /* Thumb instruction encoders, in alphabetical order. */
7812
7813 /* ADDW or SUBW. */
7814 static void
7815 do_t_add_sub_w (void)
7816 {
7817 int Rd, Rn;
7818
7819 Rd = inst.operands[0].reg;
7820 Rn = inst.operands[1].reg;
7821
7822 constraint (Rd == 15, _("PC not allowed as destination"));
7823 inst.instruction |= (Rn << 16) | (Rd << 8);
7824 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7825 }
7826
7827 /* Parse an add or subtract instruction. We get here with inst.instruction
7828 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7829
7830 static void
7831 do_t_add_sub (void)
7832 {
7833 int Rd, Rs, Rn;
7834
7835 Rd = inst.operands[0].reg;
7836 Rs = (inst.operands[1].present
7837 ? inst.operands[1].reg /* Rd, Rs, foo */
7838 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7839
7840 if (unified_syntax)
7841 {
7842 bfd_boolean flags;
7843 bfd_boolean narrow;
7844 int opcode;
7845
7846 flags = (inst.instruction == T_MNEM_adds
7847 || inst.instruction == T_MNEM_subs);
7848 if (flags)
7849 narrow = (current_it_mask == 0);
7850 else
7851 narrow = (current_it_mask != 0);
7852 if (!inst.operands[2].isreg)
7853 {
7854 opcode = 0;
7855 if (inst.size_req != 4)
7856 {
7857 int add;
7858
7859 add = (inst.instruction == T_MNEM_add
7860 || inst.instruction == T_MNEM_adds);
7861 /* Attempt to use a narrow opcode, with relaxation if
7862 appropriate. */
7863 if (Rd == REG_SP && Rs == REG_SP && !flags)
7864 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7865 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7866 opcode = T_MNEM_add_sp;
7867 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7868 opcode = T_MNEM_add_pc;
7869 else if (Rd <= 7 && Rs <= 7 && narrow)
7870 {
7871 if (flags)
7872 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7873 else
7874 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7875 }
7876 if (opcode)
7877 {
7878 inst.instruction = THUMB_OP16(opcode);
7879 inst.instruction |= (Rd << 4) | Rs;
7880 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7881 if (inst.size_req != 2)
7882 inst.relax = opcode;
7883 }
7884 else
7885 constraint (inst.size_req == 2, BAD_HIREG);
7886 }
7887 if (inst.size_req == 4
7888 || (inst.size_req != 2 && !opcode))
7889 {
7890 /* ??? Convert large immediates to addw/subw. */
7891 inst.instruction = THUMB_OP32 (inst.instruction);
7892 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7893 inst.instruction |= inst.operands[0].reg << 8;
7894 inst.instruction |= inst.operands[1].reg << 16;
7895 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7896 }
7897 }
7898 else
7899 {
7900 Rn = inst.operands[2].reg;
7901 /* See if we can do this with a 16-bit instruction. */
7902 if (!inst.operands[2].shifted && inst.size_req != 4)
7903 {
7904 if (Rd > 7 || Rs > 7 || Rn > 7)
7905 narrow = FALSE;
7906
7907 if (narrow)
7908 {
7909 inst.instruction = ((inst.instruction == T_MNEM_adds
7910 || inst.instruction == T_MNEM_add)
7911 ? T_OPCODE_ADD_R3
7912 : T_OPCODE_SUB_R3);
7913 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7914 return;
7915 }
7916
7917 if (inst.instruction == T_MNEM_add)
7918 {
7919 if (Rd == Rs)
7920 {
7921 inst.instruction = T_OPCODE_ADD_HI;
7922 inst.instruction |= (Rd & 8) << 4;
7923 inst.instruction |= (Rd & 7);
7924 inst.instruction |= Rn << 3;
7925 return;
7926 }
7927 /* ... because addition is commutative! */
7928 else if (Rd == Rn)
7929 {
7930 inst.instruction = T_OPCODE_ADD_HI;
7931 inst.instruction |= (Rd & 8) << 4;
7932 inst.instruction |= (Rd & 7);
7933 inst.instruction |= Rs << 3;
7934 return;
7935 }
7936 }
7937 }
7938 /* If we get here, it can't be done in 16 bits. */
7939 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7940 _("shift must be constant"));
7941 inst.instruction = THUMB_OP32 (inst.instruction);
7942 inst.instruction |= Rd << 8;
7943 inst.instruction |= Rs << 16;
7944 encode_thumb32_shifted_operand (2);
7945 }
7946 }
7947 else
7948 {
7949 constraint (inst.instruction == T_MNEM_adds
7950 || inst.instruction == T_MNEM_subs,
7951 BAD_THUMB32);
7952
7953 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7954 {
7955 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7956 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7957 BAD_HIREG);
7958
7959 inst.instruction = (inst.instruction == T_MNEM_add
7960 ? 0x0000 : 0x8000);
7961 inst.instruction |= (Rd << 4) | Rs;
7962 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7963 return;
7964 }
7965
7966 Rn = inst.operands[2].reg;
7967 constraint (inst.operands[2].shifted, _("unshifted register required"));
7968
7969 /* We now have Rd, Rs, and Rn set to registers. */
7970 if (Rd > 7 || Rs > 7 || Rn > 7)
7971 {
7972 /* Can't do this for SUB. */
7973 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7974 inst.instruction = T_OPCODE_ADD_HI;
7975 inst.instruction |= (Rd & 8) << 4;
7976 inst.instruction |= (Rd & 7);
7977 if (Rs == Rd)
7978 inst.instruction |= Rn << 3;
7979 else if (Rn == Rd)
7980 inst.instruction |= Rs << 3;
7981 else
7982 constraint (1, _("dest must overlap one source register"));
7983 }
7984 else
7985 {
7986 inst.instruction = (inst.instruction == T_MNEM_add
7987 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7988 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7989 }
7990 }
7991 }
7992
7993 static void
7994 do_t_adr (void)
7995 {
7996 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7997 {
7998 /* Defer to section relaxation. */
7999 inst.relax = inst.instruction;
8000 inst.instruction = THUMB_OP16 (inst.instruction);
8001 inst.instruction |= inst.operands[0].reg << 4;
8002 }
8003 else if (unified_syntax && inst.size_req != 2)
8004 {
8005 /* Generate a 32-bit opcode. */
8006 inst.instruction = THUMB_OP32 (inst.instruction);
8007 inst.instruction |= inst.operands[0].reg << 8;
8008 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8009 inst.reloc.pc_rel = 1;
8010 }
8011 else
8012 {
8013 /* Generate a 16-bit opcode. */
8014 inst.instruction = THUMB_OP16 (inst.instruction);
8015 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8016 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8017 inst.reloc.pc_rel = 1;
8018
8019 inst.instruction |= inst.operands[0].reg << 4;
8020 }
8021 }
8022
8023 /* Arithmetic instructions for which there is just one 16-bit
8024 instruction encoding, and it allows only two low registers.
8025 For maximal compatibility with ARM syntax, we allow three register
8026 operands even when Thumb-32 instructions are not available, as long
8027 as the first two are identical. For instance, both "sbc r0,r1" and
8028 "sbc r0,r0,r1" are allowed. */
8029 static void
8030 do_t_arit3 (void)
8031 {
8032 int Rd, Rs, Rn;
8033
8034 Rd = inst.operands[0].reg;
8035 Rs = (inst.operands[1].present
8036 ? inst.operands[1].reg /* Rd, Rs, foo */
8037 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8038 Rn = inst.operands[2].reg;
8039
8040 if (unified_syntax)
8041 {
8042 if (!inst.operands[2].isreg)
8043 {
8044 /* For an immediate, we always generate a 32-bit opcode;
8045 section relaxation will shrink it later if possible. */
8046 inst.instruction = THUMB_OP32 (inst.instruction);
8047 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8048 inst.instruction |= Rd << 8;
8049 inst.instruction |= Rs << 16;
8050 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8051 }
8052 else
8053 {
8054 bfd_boolean narrow;
8055
8056 /* See if we can do this with a 16-bit instruction. */
8057 if (THUMB_SETS_FLAGS (inst.instruction))
8058 narrow = current_it_mask == 0;
8059 else
8060 narrow = current_it_mask != 0;
8061
8062 if (Rd > 7 || Rn > 7 || Rs > 7)
8063 narrow = FALSE;
8064 if (inst.operands[2].shifted)
8065 narrow = FALSE;
8066 if (inst.size_req == 4)
8067 narrow = FALSE;
8068
8069 if (narrow
8070 && Rd == Rs)
8071 {
8072 inst.instruction = THUMB_OP16 (inst.instruction);
8073 inst.instruction |= Rd;
8074 inst.instruction |= Rn << 3;
8075 return;
8076 }
8077
8078 /* If we get here, it can't be done in 16 bits. */
8079 constraint (inst.operands[2].shifted
8080 && inst.operands[2].immisreg,
8081 _("shift must be constant"));
8082 inst.instruction = THUMB_OP32 (inst.instruction);
8083 inst.instruction |= Rd << 8;
8084 inst.instruction |= Rs << 16;
8085 encode_thumb32_shifted_operand (2);
8086 }
8087 }
8088 else
8089 {
8090 /* On its face this is a lie - the instruction does set the
8091 flags. However, the only supported mnemonic in this mode
8092 says it doesn't. */
8093 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8094
8095 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8096 _("unshifted register required"));
8097 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8098 constraint (Rd != Rs,
8099 _("dest and source1 must be the same register"));
8100
8101 inst.instruction = THUMB_OP16 (inst.instruction);
8102 inst.instruction |= Rd;
8103 inst.instruction |= Rn << 3;
8104 }
8105 }
8106
8107 /* Similarly, but for instructions where the arithmetic operation is
8108 commutative, so we can allow either of them to be different from
8109 the destination operand in a 16-bit instruction. For instance, all
8110 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8111 accepted. */
8112 static void
8113 do_t_arit3c (void)
8114 {
8115 int Rd, Rs, Rn;
8116
8117 Rd = inst.operands[0].reg;
8118 Rs = (inst.operands[1].present
8119 ? inst.operands[1].reg /* Rd, Rs, foo */
8120 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8121 Rn = inst.operands[2].reg;
8122
8123 if (unified_syntax)
8124 {
8125 if (!inst.operands[2].isreg)
8126 {
8127 /* For an immediate, we always generate a 32-bit opcode;
8128 section relaxation will shrink it later if possible. */
8129 inst.instruction = THUMB_OP32 (inst.instruction);
8130 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8131 inst.instruction |= Rd << 8;
8132 inst.instruction |= Rs << 16;
8133 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8134 }
8135 else
8136 {
8137 bfd_boolean narrow;
8138
8139 /* See if we can do this with a 16-bit instruction. */
8140 if (THUMB_SETS_FLAGS (inst.instruction))
8141 narrow = current_it_mask == 0;
8142 else
8143 narrow = current_it_mask != 0;
8144
8145 if (Rd > 7 || Rn > 7 || Rs > 7)
8146 narrow = FALSE;
8147 if (inst.operands[2].shifted)
8148 narrow = FALSE;
8149 if (inst.size_req == 4)
8150 narrow = FALSE;
8151
8152 if (narrow)
8153 {
8154 if (Rd == Rs)
8155 {
8156 inst.instruction = THUMB_OP16 (inst.instruction);
8157 inst.instruction |= Rd;
8158 inst.instruction |= Rn << 3;
8159 return;
8160 }
8161 if (Rd == Rn)
8162 {
8163 inst.instruction = THUMB_OP16 (inst.instruction);
8164 inst.instruction |= Rd;
8165 inst.instruction |= Rs << 3;
8166 return;
8167 }
8168 }
8169
8170 /* If we get here, it can't be done in 16 bits. */
8171 constraint (inst.operands[2].shifted
8172 && inst.operands[2].immisreg,
8173 _("shift must be constant"));
8174 inst.instruction = THUMB_OP32 (inst.instruction);
8175 inst.instruction |= Rd << 8;
8176 inst.instruction |= Rs << 16;
8177 encode_thumb32_shifted_operand (2);
8178 }
8179 }
8180 else
8181 {
8182 /* On its face this is a lie - the instruction does set the
8183 flags. However, the only supported mnemonic in this mode
8184 says it doesn't. */
8185 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8186
8187 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8188 _("unshifted register required"));
8189 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8190
8191 inst.instruction = THUMB_OP16 (inst.instruction);
8192 inst.instruction |= Rd;
8193
8194 if (Rd == Rs)
8195 inst.instruction |= Rn << 3;
8196 else if (Rd == Rn)
8197 inst.instruction |= Rs << 3;
8198 else
8199 constraint (1, _("dest must overlap one source register"));
8200 }
8201 }
8202
8203 static void
8204 do_t_barrier (void)
8205 {
8206 if (inst.operands[0].present)
8207 {
8208 constraint ((inst.instruction & 0xf0) != 0x40
8209 && inst.operands[0].imm != 0xf,
8210 "bad barrier type");
8211 inst.instruction |= inst.operands[0].imm;
8212 }
8213 else
8214 inst.instruction |= 0xf;
8215 }
8216
8217 static void
8218 do_t_bfc (void)
8219 {
8220 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8221 constraint (msb > 32, _("bit-field extends past end of register"));
8222 /* The instruction encoding stores the LSB and MSB,
8223 not the LSB and width. */
8224 inst.instruction |= inst.operands[0].reg << 8;
8225 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8226 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8227 inst.instruction |= msb - 1;
8228 }
8229
8230 static void
8231 do_t_bfi (void)
8232 {
8233 unsigned int msb;
8234
8235 /* #0 in second position is alternative syntax for bfc, which is
8236 the same instruction but with REG_PC in the Rm field. */
8237 if (!inst.operands[1].isreg)
8238 inst.operands[1].reg = REG_PC;
8239
8240 msb = inst.operands[2].imm + inst.operands[3].imm;
8241 constraint (msb > 32, _("bit-field extends past end of register"));
8242 /* The instruction encoding stores the LSB and MSB,
8243 not the LSB and width. */
8244 inst.instruction |= inst.operands[0].reg << 8;
8245 inst.instruction |= inst.operands[1].reg << 16;
8246 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8247 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8248 inst.instruction |= msb - 1;
8249 }
8250
8251 static void
8252 do_t_bfx (void)
8253 {
8254 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8255 _("bit-field extends past end of register"));
8256 inst.instruction |= inst.operands[0].reg << 8;
8257 inst.instruction |= inst.operands[1].reg << 16;
8258 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8259 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8260 inst.instruction |= inst.operands[3].imm - 1;
8261 }
8262
8263 /* ARM V5 Thumb BLX (argument parse)
8264 BLX <target_addr> which is BLX(1)
8265 BLX <Rm> which is BLX(2)
8266 Unfortunately, there are two different opcodes for this mnemonic.
8267 So, the insns[].value is not used, and the code here zaps values
8268 into inst.instruction.
8269
8270 ??? How to take advantage of the additional two bits of displacement
8271 available in Thumb32 mode? Need new relocation? */
8272
8273 static void
8274 do_t_blx (void)
8275 {
8276 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8277 if (inst.operands[0].isreg)
8278 /* We have a register, so this is BLX(2). */
8279 inst.instruction |= inst.operands[0].reg << 3;
8280 else
8281 {
8282 /* No register. This must be BLX(1). */
8283 inst.instruction = 0xf000e800;
8284 #ifdef OBJ_ELF
8285 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8286 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8287 else
8288 #endif
8289 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8290 inst.reloc.pc_rel = 1;
8291 }
8292 }
8293
8294 static void
8295 do_t_branch (void)
8296 {
8297 int opcode;
8298 int cond;
8299
8300 if (current_it_mask)
8301 {
8302 /* Conditional branches inside IT blocks are encoded as unconditional
8303 branches. */
8304 cond = COND_ALWAYS;
8305 /* A branch must be the last instruction in an IT block. */
8306 constraint (current_it_mask != 0x10, BAD_BRANCH);
8307 }
8308 else
8309 cond = inst.cond;
8310
8311 if (cond != COND_ALWAYS)
8312 opcode = T_MNEM_bcond;
8313 else
8314 opcode = inst.instruction;
8315
8316 if (unified_syntax && inst.size_req == 4)
8317 {
8318 inst.instruction = THUMB_OP32(opcode);
8319 if (cond == COND_ALWAYS)
8320 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8321 else
8322 {
8323 assert (cond != 0xF);
8324 inst.instruction |= cond << 22;
8325 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8326 }
8327 }
8328 else
8329 {
8330 inst.instruction = THUMB_OP16(opcode);
8331 if (cond == COND_ALWAYS)
8332 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8333 else
8334 {
8335 inst.instruction |= cond << 8;
8336 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8337 }
8338 /* Allow section relaxation. */
8339 if (unified_syntax && inst.size_req != 2)
8340 inst.relax = opcode;
8341 }
8342
8343 inst.reloc.pc_rel = 1;
8344 }
8345
8346 static void
8347 do_t_bkpt (void)
8348 {
8349 constraint (inst.cond != COND_ALWAYS,
8350 _("instruction is always unconditional"));
8351 if (inst.operands[0].present)
8352 {
8353 constraint (inst.operands[0].imm > 255,
8354 _("immediate value out of range"));
8355 inst.instruction |= inst.operands[0].imm;
8356 }
8357 }
8358
8359 static void
8360 do_t_branch23 (void)
8361 {
8362 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8363 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8364 inst.reloc.pc_rel = 1;
8365
8366 /* If the destination of the branch is a defined symbol which does not have
8367 the THUMB_FUNC attribute, then we must be calling a function which has
8368 the (interfacearm) attribute. We look for the Thumb entry point to that
8369 function and change the branch to refer to that function instead. */
8370 if ( inst.reloc.exp.X_op == O_symbol
8371 && inst.reloc.exp.X_add_symbol != NULL
8372 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8373 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8374 inst.reloc.exp.X_add_symbol =
8375 find_real_start (inst.reloc.exp.X_add_symbol);
8376 }
8377
8378 static void
8379 do_t_bx (void)
8380 {
8381 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8382 inst.instruction |= inst.operands[0].reg << 3;
8383 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8384 should cause the alignment to be checked once it is known. This is
8385 because BX PC only works if the instruction is word aligned. */
8386 }
8387
8388 static void
8389 do_t_bxj (void)
8390 {
8391 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8392 if (inst.operands[0].reg == REG_PC)
8393 as_tsktsk (_("use of r15 in bxj is not really useful"));
8394
8395 inst.instruction |= inst.operands[0].reg << 16;
8396 }
8397
8398 static void
8399 do_t_clz (void)
8400 {
8401 inst.instruction |= inst.operands[0].reg << 8;
8402 inst.instruction |= inst.operands[1].reg << 16;
8403 inst.instruction |= inst.operands[1].reg;
8404 }
8405
8406 static void
8407 do_t_cps (void)
8408 {
8409 constraint (current_it_mask, BAD_NOT_IT);
8410 inst.instruction |= inst.operands[0].imm;
8411 }
8412
8413 static void
8414 do_t_cpsi (void)
8415 {
8416 constraint (current_it_mask, BAD_NOT_IT);
8417 if (unified_syntax
8418 && (inst.operands[1].present || inst.size_req == 4)
8419 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8420 {
8421 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8422 inst.instruction = 0xf3af8000;
8423 inst.instruction |= imod << 9;
8424 inst.instruction |= inst.operands[0].imm << 5;
8425 if (inst.operands[1].present)
8426 inst.instruction |= 0x100 | inst.operands[1].imm;
8427 }
8428 else
8429 {
8430 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8431 && (inst.operands[0].imm & 4),
8432 _("selected processor does not support 'A' form "
8433 "of this instruction"));
8434 constraint (inst.operands[1].present || inst.size_req == 4,
8435 _("Thumb does not support the 2-argument "
8436 "form of this instruction"));
8437 inst.instruction |= inst.operands[0].imm;
8438 }
8439 }
8440
8441 /* THUMB CPY instruction (argument parse). */
8442
8443 static void
8444 do_t_cpy (void)
8445 {
8446 if (inst.size_req == 4)
8447 {
8448 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8449 inst.instruction |= inst.operands[0].reg << 8;
8450 inst.instruction |= inst.operands[1].reg;
8451 }
8452 else
8453 {
8454 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8455 inst.instruction |= (inst.operands[0].reg & 0x7);
8456 inst.instruction |= inst.operands[1].reg << 3;
8457 }
8458 }
8459
8460 static void
8461 do_t_czb (void)
8462 {
8463 constraint (current_it_mask, BAD_NOT_IT);
8464 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8465 inst.instruction |= inst.operands[0].reg;
8466 inst.reloc.pc_rel = 1;
8467 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8468 }
8469
8470 static void
8471 do_t_dbg (void)
8472 {
8473 inst.instruction |= inst.operands[0].imm;
8474 }
8475
8476 static void
8477 do_t_div (void)
8478 {
8479 if (!inst.operands[1].present)
8480 inst.operands[1].reg = inst.operands[0].reg;
8481 inst.instruction |= inst.operands[0].reg << 8;
8482 inst.instruction |= inst.operands[1].reg << 16;
8483 inst.instruction |= inst.operands[2].reg;
8484 }
8485
8486 static void
8487 do_t_hint (void)
8488 {
8489 if (unified_syntax && inst.size_req == 4)
8490 inst.instruction = THUMB_OP32 (inst.instruction);
8491 else
8492 inst.instruction = THUMB_OP16 (inst.instruction);
8493 }
8494
8495 static void
8496 do_t_it (void)
8497 {
8498 unsigned int cond = inst.operands[0].imm;
8499
8500 constraint (current_it_mask, BAD_NOT_IT);
8501 current_it_mask = (inst.instruction & 0xf) | 0x10;
8502 current_cc = cond;
8503
8504 /* If the condition is a negative condition, invert the mask. */
8505 if ((cond & 0x1) == 0x0)
8506 {
8507 unsigned int mask = inst.instruction & 0x000f;
8508
8509 if ((mask & 0x7) == 0)
8510 /* no conversion needed */;
8511 else if ((mask & 0x3) == 0)
8512 mask ^= 0x8;
8513 else if ((mask & 0x1) == 0)
8514 mask ^= 0xC;
8515 else
8516 mask ^= 0xE;
8517
8518 inst.instruction &= 0xfff0;
8519 inst.instruction |= mask;
8520 }
8521
8522 inst.instruction |= cond << 4;
8523 }
8524
8525 static void
8526 do_t_ldmstm (void)
8527 {
8528 /* This really doesn't seem worth it. */
8529 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8530 _("expression too complex"));
8531 constraint (inst.operands[1].writeback,
8532 _("Thumb load/store multiple does not support {reglist}^"));
8533
8534 if (unified_syntax)
8535 {
8536 /* See if we can use a 16-bit instruction. */
8537 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8538 && inst.size_req != 4
8539 && inst.operands[0].reg <= 7
8540 && !(inst.operands[1].imm & ~0xff)
8541 && (inst.instruction == T_MNEM_stmia
8542 ? inst.operands[0].writeback
8543 : (inst.operands[0].writeback
8544 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8545 {
8546 if (inst.instruction == T_MNEM_stmia
8547 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8548 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8549 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8550 inst.operands[0].reg);
8551
8552 inst.instruction = THUMB_OP16 (inst.instruction);
8553 inst.instruction |= inst.operands[0].reg << 8;
8554 inst.instruction |= inst.operands[1].imm;
8555 }
8556 else
8557 {
8558 if (inst.operands[1].imm & (1 << 13))
8559 as_warn (_("SP should not be in register list"));
8560 if (inst.instruction == T_MNEM_stmia)
8561 {
8562 if (inst.operands[1].imm & (1 << 15))
8563 as_warn (_("PC should not be in register list"));
8564 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8565 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8566 inst.operands[0].reg);
8567 }
8568 else
8569 {
8570 if (inst.operands[1].imm & (1 << 14)
8571 && inst.operands[1].imm & (1 << 15))
8572 as_warn (_("LR and PC should not both be in register list"));
8573 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8574 && inst.operands[0].writeback)
8575 as_warn (_("base register should not be in register list "
8576 "when written back"));
8577 }
8578 if (inst.instruction < 0xffff)
8579 inst.instruction = THUMB_OP32 (inst.instruction);
8580 inst.instruction |= inst.operands[0].reg << 16;
8581 inst.instruction |= inst.operands[1].imm;
8582 if (inst.operands[0].writeback)
8583 inst.instruction |= WRITE_BACK;
8584 }
8585 }
8586 else
8587 {
8588 constraint (inst.operands[0].reg > 7
8589 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8590 if (inst.instruction == T_MNEM_stmia)
8591 {
8592 if (!inst.operands[0].writeback)
8593 as_warn (_("this instruction will write back the base register"));
8594 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8595 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8596 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8597 inst.operands[0].reg);
8598 }
8599 else
8600 {
8601 if (!inst.operands[0].writeback
8602 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8603 as_warn (_("this instruction will write back the base register"));
8604 else if (inst.operands[0].writeback
8605 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8606 as_warn (_("this instruction will not write back the base register"));
8607 }
8608
8609 inst.instruction = THUMB_OP16 (inst.instruction);
8610 inst.instruction |= inst.operands[0].reg << 8;
8611 inst.instruction |= inst.operands[1].imm;
8612 }
8613 }
8614
8615 static void
8616 do_t_ldrex (void)
8617 {
8618 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8619 || inst.operands[1].postind || inst.operands[1].writeback
8620 || inst.operands[1].immisreg || inst.operands[1].shifted
8621 || inst.operands[1].negative,
8622 BAD_ADDR_MODE);
8623
8624 inst.instruction |= inst.operands[0].reg << 12;
8625 inst.instruction |= inst.operands[1].reg << 16;
8626 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8627 }
8628
8629 static void
8630 do_t_ldrexd (void)
8631 {
8632 if (!inst.operands[1].present)
8633 {
8634 constraint (inst.operands[0].reg == REG_LR,
8635 _("r14 not allowed as first register "
8636 "when second register is omitted"));
8637 inst.operands[1].reg = inst.operands[0].reg + 1;
8638 }
8639 constraint (inst.operands[0].reg == inst.operands[1].reg,
8640 BAD_OVERLAP);
8641
8642 inst.instruction |= inst.operands[0].reg << 12;
8643 inst.instruction |= inst.operands[1].reg << 8;
8644 inst.instruction |= inst.operands[2].reg << 16;
8645 }
8646
8647 static void
8648 do_t_ldst (void)
8649 {
8650 unsigned long opcode;
8651 int Rn;
8652
8653 opcode = inst.instruction;
8654 if (unified_syntax)
8655 {
8656 if (!inst.operands[1].isreg)
8657 {
8658 if (opcode <= 0xffff)
8659 inst.instruction = THUMB_OP32 (opcode);
8660 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8661 return;
8662 }
8663 if (inst.operands[1].isreg
8664 && !inst.operands[1].writeback
8665 && !inst.operands[1].shifted && !inst.operands[1].postind
8666 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8667 && opcode <= 0xffff
8668 && inst.size_req != 4)
8669 {
8670 /* Insn may have a 16-bit form. */
8671 Rn = inst.operands[1].reg;
8672 if (inst.operands[1].immisreg)
8673 {
8674 inst.instruction = THUMB_OP16 (opcode);
8675 /* [Rn, Ri] */
8676 if (Rn <= 7 && inst.operands[1].imm <= 7)
8677 goto op16;
8678 }
8679 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8680 && opcode != T_MNEM_ldrsb)
8681 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8682 || (Rn == REG_SP && opcode == T_MNEM_str))
8683 {
8684 /* [Rn, #const] */
8685 if (Rn > 7)
8686 {
8687 if (Rn == REG_PC)
8688 {
8689 if (inst.reloc.pc_rel)
8690 opcode = T_MNEM_ldr_pc2;
8691 else
8692 opcode = T_MNEM_ldr_pc;
8693 }
8694 else
8695 {
8696 if (opcode == T_MNEM_ldr)
8697 opcode = T_MNEM_ldr_sp;
8698 else
8699 opcode = T_MNEM_str_sp;
8700 }
8701 inst.instruction = inst.operands[0].reg << 8;
8702 }
8703 else
8704 {
8705 inst.instruction = inst.operands[0].reg;
8706 inst.instruction |= inst.operands[1].reg << 3;
8707 }
8708 inst.instruction |= THUMB_OP16 (opcode);
8709 if (inst.size_req == 2)
8710 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8711 else
8712 inst.relax = opcode;
8713 return;
8714 }
8715 }
8716 /* Definitely a 32-bit variant. */
8717 inst.instruction = THUMB_OP32 (opcode);
8718 inst.instruction |= inst.operands[0].reg << 12;
8719 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8720 return;
8721 }
8722
8723 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8724
8725 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8726 {
8727 /* Only [Rn,Rm] is acceptable. */
8728 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8729 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8730 || inst.operands[1].postind || inst.operands[1].shifted
8731 || inst.operands[1].negative,
8732 _("Thumb does not support this addressing mode"));
8733 inst.instruction = THUMB_OP16 (inst.instruction);
8734 goto op16;
8735 }
8736
8737 inst.instruction = THUMB_OP16 (inst.instruction);
8738 if (!inst.operands[1].isreg)
8739 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8740 return;
8741
8742 constraint (!inst.operands[1].preind
8743 || inst.operands[1].shifted
8744 || inst.operands[1].writeback,
8745 _("Thumb does not support this addressing mode"));
8746 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8747 {
8748 constraint (inst.instruction & 0x0600,
8749 _("byte or halfword not valid for base register"));
8750 constraint (inst.operands[1].reg == REG_PC
8751 && !(inst.instruction & THUMB_LOAD_BIT),
8752 _("r15 based store not allowed"));
8753 constraint (inst.operands[1].immisreg,
8754 _("invalid base register for register offset"));
8755
8756 if (inst.operands[1].reg == REG_PC)
8757 inst.instruction = T_OPCODE_LDR_PC;
8758 else if (inst.instruction & THUMB_LOAD_BIT)
8759 inst.instruction = T_OPCODE_LDR_SP;
8760 else
8761 inst.instruction = T_OPCODE_STR_SP;
8762
8763 inst.instruction |= inst.operands[0].reg << 8;
8764 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8765 return;
8766 }
8767
8768 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8769 if (!inst.operands[1].immisreg)
8770 {
8771 /* Immediate offset. */
8772 inst.instruction |= inst.operands[0].reg;
8773 inst.instruction |= inst.operands[1].reg << 3;
8774 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8775 return;
8776 }
8777
8778 /* Register offset. */
8779 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8780 constraint (inst.operands[1].negative,
8781 _("Thumb does not support this addressing mode"));
8782
8783 op16:
8784 switch (inst.instruction)
8785 {
8786 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8787 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8788 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8789 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8790 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8791 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8792 case 0x5600 /* ldrsb */:
8793 case 0x5e00 /* ldrsh */: break;
8794 default: abort ();
8795 }
8796
8797 inst.instruction |= inst.operands[0].reg;
8798 inst.instruction |= inst.operands[1].reg << 3;
8799 inst.instruction |= inst.operands[1].imm << 6;
8800 }
8801
8802 static void
8803 do_t_ldstd (void)
8804 {
8805 if (!inst.operands[1].present)
8806 {
8807 inst.operands[1].reg = inst.operands[0].reg + 1;
8808 constraint (inst.operands[0].reg == REG_LR,
8809 _("r14 not allowed here"));
8810 }
8811 inst.instruction |= inst.operands[0].reg << 12;
8812 inst.instruction |= inst.operands[1].reg << 8;
8813 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8814
8815 }
8816
8817 static void
8818 do_t_ldstt (void)
8819 {
8820 inst.instruction |= inst.operands[0].reg << 12;
8821 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8822 }
8823
8824 static void
8825 do_t_mla (void)
8826 {
8827 inst.instruction |= inst.operands[0].reg << 8;
8828 inst.instruction |= inst.operands[1].reg << 16;
8829 inst.instruction |= inst.operands[2].reg;
8830 inst.instruction |= inst.operands[3].reg << 12;
8831 }
8832
8833 static void
8834 do_t_mlal (void)
8835 {
8836 inst.instruction |= inst.operands[0].reg << 12;
8837 inst.instruction |= inst.operands[1].reg << 8;
8838 inst.instruction |= inst.operands[2].reg << 16;
8839 inst.instruction |= inst.operands[3].reg;
8840 }
8841
8842 static void
8843 do_t_mov_cmp (void)
8844 {
8845 if (unified_syntax)
8846 {
8847 int r0off = (inst.instruction == T_MNEM_mov
8848 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8849 unsigned long opcode;
8850 bfd_boolean narrow;
8851 bfd_boolean low_regs;
8852
8853 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8854 opcode = inst.instruction;
8855 if (current_it_mask)
8856 narrow = opcode != T_MNEM_movs;
8857 else
8858 narrow = opcode != T_MNEM_movs || low_regs;
8859 if (inst.size_req == 4
8860 || inst.operands[1].shifted)
8861 narrow = FALSE;
8862
8863 if (!inst.operands[1].isreg)
8864 {
8865 /* Immediate operand. */
8866 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8867 narrow = 0;
8868 if (low_regs && narrow)
8869 {
8870 inst.instruction = THUMB_OP16 (opcode);
8871 inst.instruction |= inst.operands[0].reg << 8;
8872 if (inst.size_req == 2)
8873 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8874 else
8875 inst.relax = opcode;
8876 }
8877 else
8878 {
8879 inst.instruction = THUMB_OP32 (inst.instruction);
8880 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8881 inst.instruction |= inst.operands[0].reg << r0off;
8882 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8883 }
8884 }
8885 else if (!narrow)
8886 {
8887 inst.instruction = THUMB_OP32 (inst.instruction);
8888 inst.instruction |= inst.operands[0].reg << r0off;
8889 encode_thumb32_shifted_operand (1);
8890 }
8891 else
8892 switch (inst.instruction)
8893 {
8894 case T_MNEM_mov:
8895 inst.instruction = T_OPCODE_MOV_HR;
8896 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8897 inst.instruction |= (inst.operands[0].reg & 0x7);
8898 inst.instruction |= inst.operands[1].reg << 3;
8899 break;
8900
8901 case T_MNEM_movs:
8902 /* We know we have low registers at this point.
8903 Generate ADD Rd, Rs, #0. */
8904 inst.instruction = T_OPCODE_ADD_I3;
8905 inst.instruction |= inst.operands[0].reg;
8906 inst.instruction |= inst.operands[1].reg << 3;
8907 break;
8908
8909 case T_MNEM_cmp:
8910 if (low_regs)
8911 {
8912 inst.instruction = T_OPCODE_CMP_LR;
8913 inst.instruction |= inst.operands[0].reg;
8914 inst.instruction |= inst.operands[1].reg << 3;
8915 }
8916 else
8917 {
8918 inst.instruction = T_OPCODE_CMP_HR;
8919 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8920 inst.instruction |= (inst.operands[0].reg & 0x7);
8921 inst.instruction |= inst.operands[1].reg << 3;
8922 }
8923 break;
8924 }
8925 return;
8926 }
8927
8928 inst.instruction = THUMB_OP16 (inst.instruction);
8929 if (inst.operands[1].isreg)
8930 {
8931 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8932 {
8933 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8934 since a MOV instruction produces unpredictable results. */
8935 if (inst.instruction == T_OPCODE_MOV_I8)
8936 inst.instruction = T_OPCODE_ADD_I3;
8937 else
8938 inst.instruction = T_OPCODE_CMP_LR;
8939
8940 inst.instruction |= inst.operands[0].reg;
8941 inst.instruction |= inst.operands[1].reg << 3;
8942 }
8943 else
8944 {
8945 if (inst.instruction == T_OPCODE_MOV_I8)
8946 inst.instruction = T_OPCODE_MOV_HR;
8947 else
8948 inst.instruction = T_OPCODE_CMP_HR;
8949 do_t_cpy ();
8950 }
8951 }
8952 else
8953 {
8954 constraint (inst.operands[0].reg > 7,
8955 _("only lo regs allowed with immediate"));
8956 inst.instruction |= inst.operands[0].reg << 8;
8957 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8958 }
8959 }
8960
8961 static void
8962 do_t_mov16 (void)
8963 {
8964 bfd_vma imm;
8965 bfd_boolean top;
8966
8967 top = (inst.instruction & 0x00800000) != 0;
8968 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
8969 {
8970 constraint (top, _(":lower16: not allowed this instruction"));
8971 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
8972 }
8973 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
8974 {
8975 constraint (!top, _(":upper16: not allowed this instruction"));
8976 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
8977 }
8978
8979 inst.instruction |= inst.operands[0].reg << 8;
8980 if (inst.reloc.type == BFD_RELOC_UNUSED)
8981 {
8982 imm = inst.reloc.exp.X_add_number;
8983 inst.instruction |= (imm & 0xf000) << 4;
8984 inst.instruction |= (imm & 0x0800) << 15;
8985 inst.instruction |= (imm & 0x0700) << 4;
8986 inst.instruction |= (imm & 0x00ff);
8987 }
8988 }
8989
8990 static void
8991 do_t_mvn_tst (void)
8992 {
8993 if (unified_syntax)
8994 {
8995 int r0off = (inst.instruction == T_MNEM_mvn
8996 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8997 bfd_boolean narrow;
8998
8999 if (inst.size_req == 4
9000 || inst.instruction > 0xffff
9001 || inst.operands[1].shifted
9002 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9003 narrow = FALSE;
9004 else if (inst.instruction == T_MNEM_cmn)
9005 narrow = TRUE;
9006 else if (THUMB_SETS_FLAGS (inst.instruction))
9007 narrow = (current_it_mask == 0);
9008 else
9009 narrow = (current_it_mask != 0);
9010
9011 if (!inst.operands[1].isreg)
9012 {
9013 /* For an immediate, we always generate a 32-bit opcode;
9014 section relaxation will shrink it later if possible. */
9015 if (inst.instruction < 0xffff)
9016 inst.instruction = THUMB_OP32 (inst.instruction);
9017 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9018 inst.instruction |= inst.operands[0].reg << r0off;
9019 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9020 }
9021 else
9022 {
9023 /* See if we can do this with a 16-bit instruction. */
9024 if (narrow)
9025 {
9026 inst.instruction = THUMB_OP16 (inst.instruction);
9027 inst.instruction |= inst.operands[0].reg;
9028 inst.instruction |= inst.operands[1].reg << 3;
9029 }
9030 else
9031 {
9032 constraint (inst.operands[1].shifted
9033 && inst.operands[1].immisreg,
9034 _("shift must be constant"));
9035 if (inst.instruction < 0xffff)
9036 inst.instruction = THUMB_OP32 (inst.instruction);
9037 inst.instruction |= inst.operands[0].reg << r0off;
9038 encode_thumb32_shifted_operand (1);
9039 }
9040 }
9041 }
9042 else
9043 {
9044 constraint (inst.instruction > 0xffff
9045 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9046 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9047 _("unshifted register required"));
9048 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9049 BAD_HIREG);
9050
9051 inst.instruction = THUMB_OP16 (inst.instruction);
9052 inst.instruction |= inst.operands[0].reg;
9053 inst.instruction |= inst.operands[1].reg << 3;
9054 }
9055 }
9056
9057 static void
9058 do_t_mrs (void)
9059 {
9060 int flags;
9061
9062 if (do_vfp_nsyn_mrs () == SUCCESS)
9063 return;
9064
9065 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9066 if (flags == 0)
9067 {
9068 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9069 _("selected processor does not support "
9070 "requested special purpose register"));
9071 }
9072 else
9073 {
9074 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9075 _("selected processor does not support "
9076 "requested special purpose register %x"));
9077 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9078 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9079 _("'CPSR' or 'SPSR' expected"));
9080 }
9081
9082 inst.instruction |= inst.operands[0].reg << 8;
9083 inst.instruction |= (flags & SPSR_BIT) >> 2;
9084 inst.instruction |= inst.operands[1].imm & 0xff;
9085 }
9086
9087 static void
9088 do_t_msr (void)
9089 {
9090 int flags;
9091
9092 if (do_vfp_nsyn_msr () == SUCCESS)
9093 return;
9094
9095 constraint (!inst.operands[1].isreg,
9096 _("Thumb encoding does not support an immediate here"));
9097 flags = inst.operands[0].imm;
9098 if (flags & ~0xff)
9099 {
9100 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9101 _("selected processor does not support "
9102 "requested special purpose register"));
9103 }
9104 else
9105 {
9106 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9107 _("selected processor does not support "
9108 "requested special purpose register"));
9109 flags |= PSR_f;
9110 }
9111 inst.instruction |= (flags & SPSR_BIT) >> 2;
9112 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9113 inst.instruction |= (flags & 0xff);
9114 inst.instruction |= inst.operands[1].reg << 16;
9115 }
9116
9117 static void
9118 do_t_mul (void)
9119 {
9120 if (!inst.operands[2].present)
9121 inst.operands[2].reg = inst.operands[0].reg;
9122
9123 /* There is no 32-bit MULS and no 16-bit MUL. */
9124 if (unified_syntax && inst.instruction == T_MNEM_mul)
9125 {
9126 inst.instruction = THUMB_OP32 (inst.instruction);
9127 inst.instruction |= inst.operands[0].reg << 8;
9128 inst.instruction |= inst.operands[1].reg << 16;
9129 inst.instruction |= inst.operands[2].reg << 0;
9130 }
9131 else
9132 {
9133 constraint (!unified_syntax
9134 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9135 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9136 BAD_HIREG);
9137
9138 inst.instruction = THUMB_OP16 (inst.instruction);
9139 inst.instruction |= inst.operands[0].reg;
9140
9141 if (inst.operands[0].reg == inst.operands[1].reg)
9142 inst.instruction |= inst.operands[2].reg << 3;
9143 else if (inst.operands[0].reg == inst.operands[2].reg)
9144 inst.instruction |= inst.operands[1].reg << 3;
9145 else
9146 constraint (1, _("dest must overlap one source register"));
9147 }
9148 }
9149
9150 static void
9151 do_t_mull (void)
9152 {
9153 inst.instruction |= inst.operands[0].reg << 12;
9154 inst.instruction |= inst.operands[1].reg << 8;
9155 inst.instruction |= inst.operands[2].reg << 16;
9156 inst.instruction |= inst.operands[3].reg;
9157
9158 if (inst.operands[0].reg == inst.operands[1].reg)
9159 as_tsktsk (_("rdhi and rdlo must be different"));
9160 }
9161
9162 static void
9163 do_t_nop (void)
9164 {
9165 if (unified_syntax)
9166 {
9167 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9168 {
9169 inst.instruction = THUMB_OP32 (inst.instruction);
9170 inst.instruction |= inst.operands[0].imm;
9171 }
9172 else
9173 {
9174 inst.instruction = THUMB_OP16 (inst.instruction);
9175 inst.instruction |= inst.operands[0].imm << 4;
9176 }
9177 }
9178 else
9179 {
9180 constraint (inst.operands[0].present,
9181 _("Thumb does not support NOP with hints"));
9182 inst.instruction = 0x46c0;
9183 }
9184 }
9185
9186 static void
9187 do_t_neg (void)
9188 {
9189 if (unified_syntax)
9190 {
9191 bfd_boolean narrow;
9192
9193 if (THUMB_SETS_FLAGS (inst.instruction))
9194 narrow = (current_it_mask == 0);
9195 else
9196 narrow = (current_it_mask != 0);
9197 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9198 narrow = FALSE;
9199 if (inst.size_req == 4)
9200 narrow = FALSE;
9201
9202 if (!narrow)
9203 {
9204 inst.instruction = THUMB_OP32 (inst.instruction);
9205 inst.instruction |= inst.operands[0].reg << 8;
9206 inst.instruction |= inst.operands[1].reg << 16;
9207 }
9208 else
9209 {
9210 inst.instruction = THUMB_OP16 (inst.instruction);
9211 inst.instruction |= inst.operands[0].reg;
9212 inst.instruction |= inst.operands[1].reg << 3;
9213 }
9214 }
9215 else
9216 {
9217 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9218 BAD_HIREG);
9219 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9220
9221 inst.instruction = THUMB_OP16 (inst.instruction);
9222 inst.instruction |= inst.operands[0].reg;
9223 inst.instruction |= inst.operands[1].reg << 3;
9224 }
9225 }
9226
9227 static void
9228 do_t_pkhbt (void)
9229 {
9230 inst.instruction |= inst.operands[0].reg << 8;
9231 inst.instruction |= inst.operands[1].reg << 16;
9232 inst.instruction |= inst.operands[2].reg;
9233 if (inst.operands[3].present)
9234 {
9235 unsigned int val = inst.reloc.exp.X_add_number;
9236 constraint (inst.reloc.exp.X_op != O_constant,
9237 _("expression too complex"));
9238 inst.instruction |= (val & 0x1c) << 10;
9239 inst.instruction |= (val & 0x03) << 6;
9240 }
9241 }
9242
9243 static void
9244 do_t_pkhtb (void)
9245 {
9246 if (!inst.operands[3].present)
9247 inst.instruction &= ~0x00000020;
9248 do_t_pkhbt ();
9249 }
9250
9251 static void
9252 do_t_pld (void)
9253 {
9254 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9255 }
9256
9257 static void
9258 do_t_push_pop (void)
9259 {
9260 unsigned mask;
9261
9262 constraint (inst.operands[0].writeback,
9263 _("push/pop do not support {reglist}^"));
9264 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9265 _("expression too complex"));
9266
9267 mask = inst.operands[0].imm;
9268 if ((mask & ~0xff) == 0)
9269 inst.instruction = THUMB_OP16 (inst.instruction);
9270 else if ((inst.instruction == T_MNEM_push
9271 && (mask & ~0xff) == 1 << REG_LR)
9272 || (inst.instruction == T_MNEM_pop
9273 && (mask & ~0xff) == 1 << REG_PC))
9274 {
9275 inst.instruction = THUMB_OP16 (inst.instruction);
9276 inst.instruction |= THUMB_PP_PC_LR;
9277 mask &= 0xff;
9278 }
9279 else if (unified_syntax)
9280 {
9281 if (mask & (1 << 13))
9282 inst.error = _("SP not allowed in register list");
9283 if (inst.instruction == T_MNEM_push)
9284 {
9285 if (mask & (1 << 15))
9286 inst.error = _("PC not allowed in register list");
9287 }
9288 else
9289 {
9290 if (mask & (1 << 14)
9291 && mask & (1 << 15))
9292 inst.error = _("LR and PC should not both be in register list");
9293 }
9294 if ((mask & (mask - 1)) == 0)
9295 {
9296 /* Single register push/pop implemented as str/ldr. */
9297 if (inst.instruction == T_MNEM_push)
9298 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9299 else
9300 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9301 mask = ffs(mask) - 1;
9302 mask <<= 12;
9303 }
9304 else
9305 inst.instruction = THUMB_OP32 (inst.instruction);
9306 }
9307 else
9308 {
9309 inst.error = _("invalid register list to push/pop instruction");
9310 return;
9311 }
9312
9313 inst.instruction |= mask;
9314 }
9315
9316 static void
9317 do_t_rbit (void)
9318 {
9319 inst.instruction |= inst.operands[0].reg << 8;
9320 inst.instruction |= inst.operands[1].reg << 16;
9321 }
9322
9323 static void
9324 do_t_rev (void)
9325 {
9326 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9327 && inst.size_req != 4)
9328 {
9329 inst.instruction = THUMB_OP16 (inst.instruction);
9330 inst.instruction |= inst.operands[0].reg;
9331 inst.instruction |= inst.operands[1].reg << 3;
9332 }
9333 else if (unified_syntax)
9334 {
9335 inst.instruction = THUMB_OP32 (inst.instruction);
9336 inst.instruction |= inst.operands[0].reg << 8;
9337 inst.instruction |= inst.operands[1].reg << 16;
9338 inst.instruction |= inst.operands[1].reg;
9339 }
9340 else
9341 inst.error = BAD_HIREG;
9342 }
9343
9344 static void
9345 do_t_rsb (void)
9346 {
9347 int Rd, Rs;
9348
9349 Rd = inst.operands[0].reg;
9350 Rs = (inst.operands[1].present
9351 ? inst.operands[1].reg /* Rd, Rs, foo */
9352 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9353
9354 inst.instruction |= Rd << 8;
9355 inst.instruction |= Rs << 16;
9356 if (!inst.operands[2].isreg)
9357 {
9358 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9359 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9360 }
9361 else
9362 encode_thumb32_shifted_operand (2);
9363 }
9364
9365 static void
9366 do_t_setend (void)
9367 {
9368 constraint (current_it_mask, BAD_NOT_IT);
9369 if (inst.operands[0].imm)
9370 inst.instruction |= 0x8;
9371 }
9372
9373 static void
9374 do_t_shift (void)
9375 {
9376 if (!inst.operands[1].present)
9377 inst.operands[1].reg = inst.operands[0].reg;
9378
9379 if (unified_syntax)
9380 {
9381 bfd_boolean narrow;
9382 int shift_kind;
9383
9384 switch (inst.instruction)
9385 {
9386 case T_MNEM_asr:
9387 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9388 case T_MNEM_lsl:
9389 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9390 case T_MNEM_lsr:
9391 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9392 case T_MNEM_ror:
9393 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9394 default: abort ();
9395 }
9396
9397 if (THUMB_SETS_FLAGS (inst.instruction))
9398 narrow = (current_it_mask == 0);
9399 else
9400 narrow = (current_it_mask != 0);
9401 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9402 narrow = FALSE;
9403 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9404 narrow = FALSE;
9405 if (inst.operands[2].isreg
9406 && (inst.operands[1].reg != inst.operands[0].reg
9407 || inst.operands[2].reg > 7))
9408 narrow = FALSE;
9409 if (inst.size_req == 4)
9410 narrow = FALSE;
9411
9412 if (!narrow)
9413 {
9414 if (inst.operands[2].isreg)
9415 {
9416 inst.instruction = THUMB_OP32 (inst.instruction);
9417 inst.instruction |= inst.operands[0].reg << 8;
9418 inst.instruction |= inst.operands[1].reg << 16;
9419 inst.instruction |= inst.operands[2].reg;
9420 }
9421 else
9422 {
9423 inst.operands[1].shifted = 1;
9424 inst.operands[1].shift_kind = shift_kind;
9425 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9426 ? T_MNEM_movs : T_MNEM_mov);
9427 inst.instruction |= inst.operands[0].reg << 8;
9428 encode_thumb32_shifted_operand (1);
9429 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9430 inst.reloc.type = BFD_RELOC_UNUSED;
9431 }
9432 }
9433 else
9434 {
9435 if (inst.operands[2].isreg)
9436 {
9437 switch (shift_kind)
9438 {
9439 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9440 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9441 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9442 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9443 default: abort ();
9444 }
9445
9446 inst.instruction |= inst.operands[0].reg;
9447 inst.instruction |= inst.operands[2].reg << 3;
9448 }
9449 else
9450 {
9451 switch (shift_kind)
9452 {
9453 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9454 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9455 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9456 default: abort ();
9457 }
9458 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9459 inst.instruction |= inst.operands[0].reg;
9460 inst.instruction |= inst.operands[1].reg << 3;
9461 }
9462 }
9463 }
9464 else
9465 {
9466 constraint (inst.operands[0].reg > 7
9467 || inst.operands[1].reg > 7, BAD_HIREG);
9468 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9469
9470 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9471 {
9472 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9473 constraint (inst.operands[0].reg != inst.operands[1].reg,
9474 _("source1 and dest must be same register"));
9475
9476 switch (inst.instruction)
9477 {
9478 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9479 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9480 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9481 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9482 default: abort ();
9483 }
9484
9485 inst.instruction |= inst.operands[0].reg;
9486 inst.instruction |= inst.operands[2].reg << 3;
9487 }
9488 else
9489 {
9490 switch (inst.instruction)
9491 {
9492 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9493 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9494 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9495 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9496 default: abort ();
9497 }
9498 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9499 inst.instruction |= inst.operands[0].reg;
9500 inst.instruction |= inst.operands[1].reg << 3;
9501 }
9502 }
9503 }
9504
9505 static void
9506 do_t_simd (void)
9507 {
9508 inst.instruction |= inst.operands[0].reg << 8;
9509 inst.instruction |= inst.operands[1].reg << 16;
9510 inst.instruction |= inst.operands[2].reg;
9511 }
9512
9513 static void
9514 do_t_smc (void)
9515 {
9516 unsigned int value = inst.reloc.exp.X_add_number;
9517 constraint (inst.reloc.exp.X_op != O_constant,
9518 _("expression too complex"));
9519 inst.reloc.type = BFD_RELOC_UNUSED;
9520 inst.instruction |= (value & 0xf000) >> 12;
9521 inst.instruction |= (value & 0x0ff0);
9522 inst.instruction |= (value & 0x000f) << 16;
9523 }
9524
9525 static void
9526 do_t_ssat (void)
9527 {
9528 inst.instruction |= inst.operands[0].reg << 8;
9529 inst.instruction |= inst.operands[1].imm - 1;
9530 inst.instruction |= inst.operands[2].reg << 16;
9531
9532 if (inst.operands[3].present)
9533 {
9534 constraint (inst.reloc.exp.X_op != O_constant,
9535 _("expression too complex"));
9536
9537 if (inst.reloc.exp.X_add_number != 0)
9538 {
9539 if (inst.operands[3].shift_kind == SHIFT_ASR)
9540 inst.instruction |= 0x00200000; /* sh bit */
9541 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9542 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9543 }
9544 inst.reloc.type = BFD_RELOC_UNUSED;
9545 }
9546 }
9547
9548 static void
9549 do_t_ssat16 (void)
9550 {
9551 inst.instruction |= inst.operands[0].reg << 8;
9552 inst.instruction |= inst.operands[1].imm - 1;
9553 inst.instruction |= inst.operands[2].reg << 16;
9554 }
9555
9556 static void
9557 do_t_strex (void)
9558 {
9559 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9560 || inst.operands[2].postind || inst.operands[2].writeback
9561 || inst.operands[2].immisreg || inst.operands[2].shifted
9562 || inst.operands[2].negative,
9563 BAD_ADDR_MODE);
9564
9565 inst.instruction |= inst.operands[0].reg << 8;
9566 inst.instruction |= inst.operands[1].reg << 12;
9567 inst.instruction |= inst.operands[2].reg << 16;
9568 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9569 }
9570
9571 static void
9572 do_t_strexd (void)
9573 {
9574 if (!inst.operands[2].present)
9575 inst.operands[2].reg = inst.operands[1].reg + 1;
9576
9577 constraint (inst.operands[0].reg == inst.operands[1].reg
9578 || inst.operands[0].reg == inst.operands[2].reg
9579 || inst.operands[0].reg == inst.operands[3].reg
9580 || inst.operands[1].reg == inst.operands[2].reg,
9581 BAD_OVERLAP);
9582
9583 inst.instruction |= inst.operands[0].reg;
9584 inst.instruction |= inst.operands[1].reg << 12;
9585 inst.instruction |= inst.operands[2].reg << 8;
9586 inst.instruction |= inst.operands[3].reg << 16;
9587 }
9588
9589 static void
9590 do_t_sxtah (void)
9591 {
9592 inst.instruction |= inst.operands[0].reg << 8;
9593 inst.instruction |= inst.operands[1].reg << 16;
9594 inst.instruction |= inst.operands[2].reg;
9595 inst.instruction |= inst.operands[3].imm << 4;
9596 }
9597
9598 static void
9599 do_t_sxth (void)
9600 {
9601 if (inst.instruction <= 0xffff && inst.size_req != 4
9602 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9603 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9604 {
9605 inst.instruction = THUMB_OP16 (inst.instruction);
9606 inst.instruction |= inst.operands[0].reg;
9607 inst.instruction |= inst.operands[1].reg << 3;
9608 }
9609 else if (unified_syntax)
9610 {
9611 if (inst.instruction <= 0xffff)
9612 inst.instruction = THUMB_OP32 (inst.instruction);
9613 inst.instruction |= inst.operands[0].reg << 8;
9614 inst.instruction |= inst.operands[1].reg;
9615 inst.instruction |= inst.operands[2].imm << 4;
9616 }
9617 else
9618 {
9619 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9620 _("Thumb encoding does not support rotation"));
9621 constraint (1, BAD_HIREG);
9622 }
9623 }
9624
9625 static void
9626 do_t_swi (void)
9627 {
9628 inst.reloc.type = BFD_RELOC_ARM_SWI;
9629 }
9630
9631 static void
9632 do_t_tb (void)
9633 {
9634 int half;
9635
9636 half = (inst.instruction & 0x10) != 0;
9637 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9638 constraint (inst.operands[0].immisreg,
9639 _("instruction requires register index"));
9640 constraint (inst.operands[0].imm == 15,
9641 _("PC is not a valid index register"));
9642 constraint (!half && inst.operands[0].shifted,
9643 _("instruction does not allow shifted index"));
9644 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9645 }
9646
9647 static void
9648 do_t_usat (void)
9649 {
9650 inst.instruction |= inst.operands[0].reg << 8;
9651 inst.instruction |= inst.operands[1].imm;
9652 inst.instruction |= inst.operands[2].reg << 16;
9653
9654 if (inst.operands[3].present)
9655 {
9656 constraint (inst.reloc.exp.X_op != O_constant,
9657 _("expression too complex"));
9658 if (inst.reloc.exp.X_add_number != 0)
9659 {
9660 if (inst.operands[3].shift_kind == SHIFT_ASR)
9661 inst.instruction |= 0x00200000; /* sh bit */
9662
9663 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9664 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9665 }
9666 inst.reloc.type = BFD_RELOC_UNUSED;
9667 }
9668 }
9669
9670 static void
9671 do_t_usat16 (void)
9672 {
9673 inst.instruction |= inst.operands[0].reg << 8;
9674 inst.instruction |= inst.operands[1].imm;
9675 inst.instruction |= inst.operands[2].reg << 16;
9676 }
9677
9678 /* Neon instruction encoder helpers. */
9679
9680 /* Encodings for the different types for various Neon opcodes. */
9681
9682 /* An "invalid" code for the following tables. */
9683 #define N_INV -1u
9684
9685 struct neon_tab_entry
9686 {
9687 unsigned integer;
9688 unsigned float_or_poly;
9689 unsigned scalar_or_imm;
9690 };
9691
9692 /* Map overloaded Neon opcodes to their respective encodings. */
9693 #define NEON_ENC_TAB \
9694 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9695 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9696 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9697 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9698 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9699 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9700 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9701 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9702 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9703 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9704 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9705 /* Register variants of the following two instructions are encoded as
9706 vcge / vcgt with the operands reversed. */ \
9707 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9708 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9709 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9710 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9711 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9712 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9713 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9714 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9715 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9716 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9717 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9718 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9719 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9720 X(vshl, 0x0000400, N_INV, 0x0800510), \
9721 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9722 X(vand, 0x0000110, N_INV, 0x0800030), \
9723 X(vbic, 0x0100110, N_INV, 0x0800030), \
9724 X(veor, 0x1000110, N_INV, N_INV), \
9725 X(vorn, 0x0300110, N_INV, 0x0800010), \
9726 X(vorr, 0x0200110, N_INV, 0x0800010), \
9727 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9728 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9729 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9730 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9731 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9732 X(vst1, 0x0000000, 0x0800000, N_INV), \
9733 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9734 X(vst2, 0x0000100, 0x0800100, N_INV), \
9735 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9736 X(vst3, 0x0000200, 0x0800200, N_INV), \
9737 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9738 X(vst4, 0x0000300, 0x0800300, N_INV), \
9739 X(vmovn, 0x1b20200, N_INV, N_INV), \
9740 X(vtrn, 0x1b20080, N_INV, N_INV), \
9741 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9742 X(vqmovun, 0x1b20240, N_INV, N_INV), \
9743 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
9744 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
9745 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
9746 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
9747 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
9748 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
9749 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
9750
9751 enum neon_opc
9752 {
9753 #define X(OPC,I,F,S) N_MNEM_##OPC
9754 NEON_ENC_TAB
9755 #undef X
9756 };
9757
9758 static const struct neon_tab_entry neon_enc_tab[] =
9759 {
9760 #define X(OPC,I,F,S) { (I), (F), (S) }
9761 NEON_ENC_TAB
9762 #undef X
9763 };
9764
9765 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9766 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9767 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9768 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9769 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9770 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9771 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9772 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9773 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9774 #define NEON_ENC_SINGLE(X) \
9775 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
9776 #define NEON_ENC_DOUBLE(X) \
9777 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
9778
9779 /* Define shapes for instruction operands. The following mnemonic characters
9780 are used in this table:
9781
9782 F - VFP S<n> register
9783 D - Neon D<n> register
9784 Q - Neon Q<n> register
9785 I - Immediate
9786 S - Scalar
9787 R - ARM register
9788 L - D<n> register list
9789
9790 This table is used to generate various data:
9791 - enumerations of the form NS_DDR to be used as arguments to
9792 neon_select_shape.
9793 - a table classifying shapes into single, double, quad, mixed.
9794 - a table used to drive neon_select_shape.
9795 */
9796
9797 #define NEON_SHAPE_DEF \
9798 X(3, (D, D, D), DOUBLE), \
9799 X(3, (Q, Q, Q), QUAD), \
9800 X(3, (D, D, I), DOUBLE), \
9801 X(3, (Q, Q, I), QUAD), \
9802 X(3, (D, D, S), DOUBLE), \
9803 X(3, (Q, Q, S), QUAD), \
9804 X(2, (D, D), DOUBLE), \
9805 X(2, (Q, Q), QUAD), \
9806 X(2, (D, S), DOUBLE), \
9807 X(2, (Q, S), QUAD), \
9808 X(2, (D, R), DOUBLE), \
9809 X(2, (Q, R), QUAD), \
9810 X(2, (D, I), DOUBLE), \
9811 X(2, (Q, I), QUAD), \
9812 X(3, (D, L, D), DOUBLE), \
9813 X(2, (D, Q), MIXED), \
9814 X(2, (Q, D), MIXED), \
9815 X(3, (D, Q, I), MIXED), \
9816 X(3, (Q, D, I), MIXED), \
9817 X(3, (Q, D, D), MIXED), \
9818 X(3, (D, Q, Q), MIXED), \
9819 X(3, (Q, Q, D), MIXED), \
9820 X(3, (Q, D, S), MIXED), \
9821 X(3, (D, Q, S), MIXED), \
9822 X(4, (D, D, D, I), DOUBLE), \
9823 X(4, (Q, Q, Q, I), QUAD), \
9824 X(2, (F, F), SINGLE), \
9825 X(3, (F, F, F), SINGLE), \
9826 X(2, (F, I), SINGLE), \
9827 X(2, (F, D), MIXED), \
9828 X(2, (D, F), MIXED), \
9829 X(3, (F, F, I), MIXED), \
9830 X(4, (R, R, F, F), SINGLE), \
9831 X(4, (F, F, R, R), SINGLE), \
9832 X(3, (D, R, R), DOUBLE), \
9833 X(3, (R, R, D), DOUBLE), \
9834 X(2, (S, R), SINGLE), \
9835 X(2, (R, S), SINGLE), \
9836 X(2, (F, R), SINGLE), \
9837 X(2, (R, F), SINGLE)
9838
9839 #define S2(A,B) NS_##A##B
9840 #define S3(A,B,C) NS_##A##B##C
9841 #define S4(A,B,C,D) NS_##A##B##C##D
9842
9843 #define X(N, L, C) S##N L
9844
9845 enum neon_shape
9846 {
9847 NEON_SHAPE_DEF,
9848 NS_NULL
9849 };
9850
9851 #undef X
9852 #undef S2
9853 #undef S3
9854 #undef S4
9855
9856 enum neon_shape_class
9857 {
9858 SC_SINGLE,
9859 SC_DOUBLE,
9860 SC_QUAD,
9861 SC_MIXED
9862 };
9863
9864 #define X(N, L, C) SC_##C
9865
9866 static enum neon_shape_class neon_shape_class[] =
9867 {
9868 NEON_SHAPE_DEF
9869 };
9870
9871 #undef X
9872
9873 enum neon_shape_el
9874 {
9875 SE_F,
9876 SE_D,
9877 SE_Q,
9878 SE_I,
9879 SE_S,
9880 SE_R,
9881 SE_L
9882 };
9883
9884 /* Register widths of above. */
9885 static unsigned neon_shape_el_size[] =
9886 {
9887 32,
9888 64,
9889 128,
9890 0,
9891 32,
9892 32,
9893 0
9894 };
9895
9896 struct neon_shape_info
9897 {
9898 unsigned els;
9899 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
9900 };
9901
9902 #define S2(A,B) { SE_##A, SE_##B }
9903 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
9904 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
9905
9906 #define X(N, L, C) { N, S##N L }
9907
9908 static struct neon_shape_info neon_shape_tab[] =
9909 {
9910 NEON_SHAPE_DEF
9911 };
9912
9913 #undef X
9914 #undef S2
9915 #undef S3
9916 #undef S4
9917
9918 /* Bit masks used in type checking given instructions.
9919 'N_EQK' means the type must be the same as (or based on in some way) the key
9920 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9921 set, various other bits can be set as well in order to modify the meaning of
9922 the type constraint. */
9923
9924 enum neon_type_mask
9925 {
9926 N_S8 = 0x000001,
9927 N_S16 = 0x000002,
9928 N_S32 = 0x000004,
9929 N_S64 = 0x000008,
9930 N_U8 = 0x000010,
9931 N_U16 = 0x000020,
9932 N_U32 = 0x000040,
9933 N_U64 = 0x000080,
9934 N_I8 = 0x000100,
9935 N_I16 = 0x000200,
9936 N_I32 = 0x000400,
9937 N_I64 = 0x000800,
9938 N_8 = 0x001000,
9939 N_16 = 0x002000,
9940 N_32 = 0x004000,
9941 N_64 = 0x008000,
9942 N_P8 = 0x010000,
9943 N_P16 = 0x020000,
9944 N_F32 = 0x040000,
9945 N_F64 = 0x080000,
9946 N_KEY = 0x100000, /* key element (main type specifier). */
9947 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
9948 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
9949 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9950 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9951 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9952 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9953 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9954 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9955 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9956 N_UTYP = 0,
9957 N_MAX_NONSPECIAL = N_F64
9958 };
9959
9960 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9961
9962 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9963 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9964 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9965 #define N_SUF_32 (N_SU_32 | N_F32)
9966 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9967 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9968
9969 /* Pass this as the first type argument to neon_check_type to ignore types
9970 altogether. */
9971 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9972
9973 /* Select a "shape" for the current instruction (describing register types or
9974 sizes) from a list of alternatives. Return NS_NULL if the current instruction
9975 doesn't fit. For non-polymorphic shapes, checking is usually done as a
9976 function of operand parsing, so this function doesn't need to be called.
9977 Shapes should be listed in order of decreasing length. */
9978
9979 static enum neon_shape
9980 neon_select_shape (enum neon_shape shape, ...)
9981 {
9982 va_list ap;
9983 enum neon_shape first_shape = shape;
9984
9985 /* Fix missing optional operands. FIXME: we don't know at this point how
9986 many arguments we should have, so this makes the assumption that we have
9987 > 1. This is true of all current Neon opcodes, I think, but may not be
9988 true in the future. */
9989 if (!inst.operands[1].present)
9990 inst.operands[1] = inst.operands[0];
9991
9992 va_start (ap, shape);
9993
9994 for (; shape != NS_NULL; shape = va_arg (ap, int))
9995 {
9996 unsigned j;
9997 int matches = 1;
9998
9999 for (j = 0; j < neon_shape_tab[shape].els; j++)
10000 {
10001 if (!inst.operands[j].present)
10002 {
10003 matches = 0;
10004 break;
10005 }
10006
10007 switch (neon_shape_tab[shape].el[j])
10008 {
10009 case SE_F:
10010 if (!(inst.operands[j].isreg
10011 && inst.operands[j].isvec
10012 && inst.operands[j].issingle
10013 && !inst.operands[j].isquad))
10014 matches = 0;
10015 break;
10016
10017 case SE_D:
10018 if (!(inst.operands[j].isreg
10019 && inst.operands[j].isvec
10020 && !inst.operands[j].isquad
10021 && !inst.operands[j].issingle))
10022 matches = 0;
10023 break;
10024
10025 case SE_R:
10026 if (!(inst.operands[j].isreg
10027 && !inst.operands[j].isvec))
10028 matches = 0;
10029 break;
10030
10031 case SE_Q:
10032 if (!(inst.operands[j].isreg
10033 && inst.operands[j].isvec
10034 && inst.operands[j].isquad
10035 && !inst.operands[j].issingle))
10036 matches = 0;
10037 break;
10038
10039 case SE_I:
10040 if (!(!inst.operands[j].isreg
10041 && !inst.operands[j].isscalar))
10042 matches = 0;
10043 break;
10044
10045 case SE_S:
10046 if (!(!inst.operands[j].isreg
10047 && inst.operands[j].isscalar))
10048 matches = 0;
10049 break;
10050
10051 case SE_L:
10052 break;
10053 }
10054 }
10055 if (matches)
10056 break;
10057 }
10058
10059 va_end (ap);
10060
10061 if (shape == NS_NULL && first_shape != NS_NULL)
10062 first_error (_("invalid instruction shape"));
10063
10064 return shape;
10065 }
10066
10067 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10068 means the Q bit should be set). */
10069
10070 static int
10071 neon_quad (enum neon_shape shape)
10072 {
10073 return neon_shape_class[shape] == SC_QUAD;
10074 }
10075
10076 static void
10077 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10078 unsigned *g_size)
10079 {
10080 /* Allow modification to be made to types which are constrained to be
10081 based on the key element, based on bits set alongside N_EQK. */
10082 if ((typebits & N_EQK) != 0)
10083 {
10084 if ((typebits & N_HLF) != 0)
10085 *g_size /= 2;
10086 else if ((typebits & N_DBL) != 0)
10087 *g_size *= 2;
10088 if ((typebits & N_SGN) != 0)
10089 *g_type = NT_signed;
10090 else if ((typebits & N_UNS) != 0)
10091 *g_type = NT_unsigned;
10092 else if ((typebits & N_INT) != 0)
10093 *g_type = NT_integer;
10094 else if ((typebits & N_FLT) != 0)
10095 *g_type = NT_float;
10096 else if ((typebits & N_SIZ) != 0)
10097 *g_type = NT_untyped;
10098 }
10099 }
10100
10101 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10102 operand type, i.e. the single type specified in a Neon instruction when it
10103 is the only one given. */
10104
10105 static struct neon_type_el
10106 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10107 {
10108 struct neon_type_el dest = *key;
10109
10110 assert ((thisarg & N_EQK) != 0);
10111
10112 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10113
10114 return dest;
10115 }
10116
10117 /* Convert Neon type and size into compact bitmask representation. */
10118
10119 static enum neon_type_mask
10120 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10121 {
10122 switch (type)
10123 {
10124 case NT_untyped:
10125 switch (size)
10126 {
10127 case 8: return N_8;
10128 case 16: return N_16;
10129 case 32: return N_32;
10130 case 64: return N_64;
10131 default: ;
10132 }
10133 break;
10134
10135 case NT_integer:
10136 switch (size)
10137 {
10138 case 8: return N_I8;
10139 case 16: return N_I16;
10140 case 32: return N_I32;
10141 case 64: return N_I64;
10142 default: ;
10143 }
10144 break;
10145
10146 case NT_float:
10147 switch (size)
10148 {
10149 case 32: return N_F32;
10150 case 64: return N_F64;
10151 default: ;
10152 }
10153 break;
10154
10155 case NT_poly:
10156 switch (size)
10157 {
10158 case 8: return N_P8;
10159 case 16: return N_P16;
10160 default: ;
10161 }
10162 break;
10163
10164 case NT_signed:
10165 switch (size)
10166 {
10167 case 8: return N_S8;
10168 case 16: return N_S16;
10169 case 32: return N_S32;
10170 case 64: return N_S64;
10171 default: ;
10172 }
10173 break;
10174
10175 case NT_unsigned:
10176 switch (size)
10177 {
10178 case 8: return N_U8;
10179 case 16: return N_U16;
10180 case 32: return N_U32;
10181 case 64: return N_U64;
10182 default: ;
10183 }
10184 break;
10185
10186 default: ;
10187 }
10188
10189 return N_UTYP;
10190 }
10191
10192 /* Convert compact Neon bitmask type representation to a type and size. Only
10193 handles the case where a single bit is set in the mask. */
10194
10195 static int
10196 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10197 enum neon_type_mask mask)
10198 {
10199 if ((mask & N_EQK) != 0)
10200 return FAIL;
10201
10202 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10203 *size = 8;
10204 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10205 *size = 16;
10206 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10207 *size = 32;
10208 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10209 *size = 64;
10210 else
10211 return FAIL;
10212
10213 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10214 *type = NT_signed;
10215 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10216 *type = NT_unsigned;
10217 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10218 *type = NT_integer;
10219 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10220 *type = NT_untyped;
10221 else if ((mask & (N_P8 | N_P16)) != 0)
10222 *type = NT_poly;
10223 else if ((mask & (N_F32 | N_F64)) != 0)
10224 *type = NT_float;
10225 else
10226 return FAIL;
10227
10228 return SUCCESS;
10229 }
10230
10231 /* Modify a bitmask of allowed types. This is only needed for type
10232 relaxation. */
10233
10234 static unsigned
10235 modify_types_allowed (unsigned allowed, unsigned mods)
10236 {
10237 unsigned size;
10238 enum neon_el_type type;
10239 unsigned destmask;
10240 int i;
10241
10242 destmask = 0;
10243
10244 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10245 {
10246 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10247 {
10248 neon_modify_type_size (mods, &type, &size);
10249 destmask |= type_chk_of_el_type (type, size);
10250 }
10251 }
10252
10253 return destmask;
10254 }
10255
10256 /* Check type and return type classification.
10257 The manual states (paraphrase): If one datatype is given, it indicates the
10258 type given in:
10259 - the second operand, if there is one
10260 - the operand, if there is no second operand
10261 - the result, if there are no operands.
10262 This isn't quite good enough though, so we use a concept of a "key" datatype
10263 which is set on a per-instruction basis, which is the one which matters when
10264 only one data type is written.
10265 Note: this function has side-effects (e.g. filling in missing operands). All
10266 Neon instructions should call it before performing bit encoding. */
10267
10268 static struct neon_type_el
10269 neon_check_type (unsigned els, enum neon_shape ns, ...)
10270 {
10271 va_list ap;
10272 unsigned i, pass, key_el = 0;
10273 unsigned types[NEON_MAX_TYPE_ELS];
10274 enum neon_el_type k_type = NT_invtype;
10275 unsigned k_size = -1u;
10276 struct neon_type_el badtype = {NT_invtype, -1};
10277 unsigned key_allowed = 0;
10278
10279 /* Optional registers in Neon instructions are always (not) in operand 1.
10280 Fill in the missing operand here, if it was omitted. */
10281 if (els > 1 && !inst.operands[1].present)
10282 inst.operands[1] = inst.operands[0];
10283
10284 /* Suck up all the varargs. */
10285 va_start (ap, ns);
10286 for (i = 0; i < els; i++)
10287 {
10288 unsigned thisarg = va_arg (ap, unsigned);
10289 if (thisarg == N_IGNORE_TYPE)
10290 {
10291 va_end (ap);
10292 return badtype;
10293 }
10294 types[i] = thisarg;
10295 if ((thisarg & N_KEY) != 0)
10296 key_el = i;
10297 }
10298 va_end (ap);
10299
10300 if (inst.vectype.elems > 0)
10301 for (i = 0; i < els; i++)
10302 if (inst.operands[i].vectype.type != NT_invtype)
10303 {
10304 first_error (_("types specified in both the mnemonic and operands"));
10305 return badtype;
10306 }
10307
10308 /* Duplicate inst.vectype elements here as necessary.
10309 FIXME: No idea if this is exactly the same as the ARM assembler,
10310 particularly when an insn takes one register and one non-register
10311 operand. */
10312 if (inst.vectype.elems == 1 && els > 1)
10313 {
10314 unsigned j;
10315 inst.vectype.elems = els;
10316 inst.vectype.el[key_el] = inst.vectype.el[0];
10317 for (j = 0; j < els; j++)
10318 if (j != key_el)
10319 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10320 types[j]);
10321 }
10322 else if (inst.vectype.elems == 0 && els > 0)
10323 {
10324 unsigned j;
10325 /* No types were given after the mnemonic, so look for types specified
10326 after each operand. We allow some flexibility here; as long as the
10327 "key" operand has a type, we can infer the others. */
10328 for (j = 0; j < els; j++)
10329 if (inst.operands[j].vectype.type != NT_invtype)
10330 inst.vectype.el[j] = inst.operands[j].vectype;
10331
10332 if (inst.operands[key_el].vectype.type != NT_invtype)
10333 {
10334 for (j = 0; j < els; j++)
10335 if (inst.operands[j].vectype.type == NT_invtype)
10336 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10337 types[j]);
10338 }
10339 else
10340 {
10341 first_error (_("operand types can't be inferred"));
10342 return badtype;
10343 }
10344 }
10345 else if (inst.vectype.elems != els)
10346 {
10347 first_error (_("type specifier has the wrong number of parts"));
10348 return badtype;
10349 }
10350
10351 for (pass = 0; pass < 2; pass++)
10352 {
10353 for (i = 0; i < els; i++)
10354 {
10355 unsigned thisarg = types[i];
10356 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10357 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10358 enum neon_el_type g_type = inst.vectype.el[i].type;
10359 unsigned g_size = inst.vectype.el[i].size;
10360
10361 /* Decay more-specific signed & unsigned types to sign-insensitive
10362 integer types if sign-specific variants are unavailable. */
10363 if ((g_type == NT_signed || g_type == NT_unsigned)
10364 && (types_allowed & N_SU_ALL) == 0)
10365 g_type = NT_integer;
10366
10367 /* If only untyped args are allowed, decay any more specific types to
10368 them. Some instructions only care about signs for some element
10369 sizes, so handle that properly. */
10370 if ((g_size == 8 && (types_allowed & N_8) != 0)
10371 || (g_size == 16 && (types_allowed & N_16) != 0)
10372 || (g_size == 32 && (types_allowed & N_32) != 0)
10373 || (g_size == 64 && (types_allowed & N_64) != 0))
10374 g_type = NT_untyped;
10375
10376 if (pass == 0)
10377 {
10378 if ((thisarg & N_KEY) != 0)
10379 {
10380 k_type = g_type;
10381 k_size = g_size;
10382 key_allowed = thisarg & ~N_KEY;
10383 }
10384 }
10385 else
10386 {
10387 if ((thisarg & N_VFP) != 0)
10388 {
10389 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10390 unsigned regwidth = neon_shape_el_size[regshape], match;
10391
10392 /* In VFP mode, operands must match register widths. If we
10393 have a key operand, use its width, else use the width of
10394 the current operand. */
10395 if (k_size != -1u)
10396 match = k_size;
10397 else
10398 match = g_size;
10399
10400 if (regwidth != match)
10401 {
10402 first_error (_("operand size must match register width"));
10403 return badtype;
10404 }
10405 }
10406
10407 if ((thisarg & N_EQK) == 0)
10408 {
10409 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10410
10411 if ((given_type & types_allowed) == 0)
10412 {
10413 first_error (_("bad type in Neon instruction"));
10414 return badtype;
10415 }
10416 }
10417 else
10418 {
10419 enum neon_el_type mod_k_type = k_type;
10420 unsigned mod_k_size = k_size;
10421 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10422 if (g_type != mod_k_type || g_size != mod_k_size)
10423 {
10424 first_error (_("inconsistent types in Neon instruction"));
10425 return badtype;
10426 }
10427 }
10428 }
10429 }
10430 }
10431
10432 return inst.vectype.el[key_el];
10433 }
10434
10435 /* Neon-style VFP instruction forwarding. */
10436
10437 /* Thumb VFP instructions have 0xE in the condition field. */
10438
10439 static void
10440 do_vfp_cond_or_thumb (void)
10441 {
10442 if (thumb_mode)
10443 inst.instruction |= 0xe0000000;
10444 else
10445 inst.instruction |= inst.cond << 28;
10446 }
10447
10448 /* Look up and encode a simple mnemonic, for use as a helper function for the
10449 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10450 etc. It is assumed that operand parsing has already been done, and that the
10451 operands are in the form expected by the given opcode (this isn't necessarily
10452 the same as the form in which they were parsed, hence some massaging must
10453 take place before this function is called).
10454 Checks current arch version against that in the looked-up opcode. */
10455
10456 static void
10457 do_vfp_nsyn_opcode (const char *opname)
10458 {
10459 const struct asm_opcode *opcode;
10460
10461 opcode = hash_find (arm_ops_hsh, opname);
10462
10463 if (!opcode)
10464 abort ();
10465
10466 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
10467 thumb_mode ? *opcode->tvariant : *opcode->avariant),
10468 _(BAD_FPU));
10469
10470 if (thumb_mode)
10471 {
10472 inst.instruction = opcode->tvalue;
10473 opcode->tencode ();
10474 }
10475 else
10476 {
10477 inst.instruction = (inst.cond << 28) | opcode->avalue;
10478 opcode->aencode ();
10479 }
10480 }
10481
10482 static void
10483 do_vfp_nsyn_add_sub (enum neon_shape rs)
10484 {
10485 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
10486
10487 if (rs == NS_FFF)
10488 {
10489 if (is_add)
10490 do_vfp_nsyn_opcode ("fadds");
10491 else
10492 do_vfp_nsyn_opcode ("fsubs");
10493 }
10494 else
10495 {
10496 if (is_add)
10497 do_vfp_nsyn_opcode ("faddd");
10498 else
10499 do_vfp_nsyn_opcode ("fsubd");
10500 }
10501 }
10502
10503 /* Check operand types to see if this is a VFP instruction, and if so call
10504 PFN (). */
10505
10506 static int
10507 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
10508 {
10509 enum neon_shape rs;
10510 struct neon_type_el et;
10511
10512 switch (args)
10513 {
10514 case 2:
10515 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
10516 et = neon_check_type (2, rs,
10517 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
10518 break;
10519
10520 case 3:
10521 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
10522 et = neon_check_type (3, rs,
10523 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
10524 break;
10525
10526 default:
10527 abort ();
10528 }
10529
10530 if (et.type != NT_invtype)
10531 {
10532 pfn (rs);
10533 return SUCCESS;
10534 }
10535 else
10536 inst.error = NULL;
10537
10538 return FAIL;
10539 }
10540
10541 static void
10542 do_vfp_nsyn_mla_mls (enum neon_shape rs)
10543 {
10544 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
10545
10546 if (rs == NS_FFF)
10547 {
10548 if (is_mla)
10549 do_vfp_nsyn_opcode ("fmacs");
10550 else
10551 do_vfp_nsyn_opcode ("fmscs");
10552 }
10553 else
10554 {
10555 if (is_mla)
10556 do_vfp_nsyn_opcode ("fmacd");
10557 else
10558 do_vfp_nsyn_opcode ("fmscd");
10559 }
10560 }
10561
10562 static void
10563 do_vfp_nsyn_mul (enum neon_shape rs)
10564 {
10565 if (rs == NS_FFF)
10566 do_vfp_nsyn_opcode ("fmuls");
10567 else
10568 do_vfp_nsyn_opcode ("fmuld");
10569 }
10570
10571 static void
10572 do_vfp_nsyn_abs_neg (enum neon_shape rs)
10573 {
10574 int is_neg = (inst.instruction & 0x80) != 0;
10575 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
10576
10577 if (rs == NS_FF)
10578 {
10579 if (is_neg)
10580 do_vfp_nsyn_opcode ("fnegs");
10581 else
10582 do_vfp_nsyn_opcode ("fabss");
10583 }
10584 else
10585 {
10586 if (is_neg)
10587 do_vfp_nsyn_opcode ("fnegd");
10588 else
10589 do_vfp_nsyn_opcode ("fabsd");
10590 }
10591 }
10592
10593 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
10594 insns belong to Neon, and are handled elsewhere. */
10595
10596 static void
10597 do_vfp_nsyn_ldm_stm (int is_dbmode)
10598 {
10599 int is_ldm = (inst.instruction & (1 << 20)) != 0;
10600 if (is_ldm)
10601 {
10602 if (is_dbmode)
10603 do_vfp_nsyn_opcode ("fldmdbs");
10604 else
10605 do_vfp_nsyn_opcode ("fldmias");
10606 }
10607 else
10608 {
10609 if (is_dbmode)
10610 do_vfp_nsyn_opcode ("fstmdbs");
10611 else
10612 do_vfp_nsyn_opcode ("fstmias");
10613 }
10614 }
10615
10616 static void
10617 do_vfp_nsyn_ldr_str (int is_ldr)
10618 {
10619 if (is_ldr)
10620 do_vfp_nsyn_opcode ("flds");
10621 else
10622 do_vfp_nsyn_opcode ("fsts");
10623 }
10624
10625 static void
10626 do_vfp_nsyn_sqrt (void)
10627 {
10628 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
10629 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
10630
10631 if (rs == NS_FF)
10632 do_vfp_nsyn_opcode ("fsqrts");
10633 else
10634 do_vfp_nsyn_opcode ("fsqrtd");
10635 }
10636
10637 static void
10638 do_vfp_nsyn_div (void)
10639 {
10640 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
10641 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
10642 N_F32 | N_F64 | N_KEY | N_VFP);
10643
10644 if (rs == NS_FFF)
10645 do_vfp_nsyn_opcode ("fdivs");
10646 else
10647 do_vfp_nsyn_opcode ("fdivd");
10648 }
10649
10650 static void
10651 do_vfp_nsyn_nmul (void)
10652 {
10653 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
10654 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
10655 N_F32 | N_F64 | N_KEY | N_VFP);
10656
10657 if (rs == NS_FFF)
10658 {
10659 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
10660 do_vfp_sp_dyadic ();
10661 }
10662 else
10663 {
10664 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
10665 do_vfp_dp_rd_rn_rm ();
10666 }
10667 do_vfp_cond_or_thumb ();
10668 }
10669
10670 static void
10671 do_vfp_nsyn_cmp (void)
10672 {
10673 if (inst.operands[1].isreg)
10674 {
10675 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
10676 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
10677
10678 if (rs == NS_FF)
10679 {
10680 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
10681 do_vfp_sp_monadic ();
10682 }
10683 else
10684 {
10685 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
10686 do_vfp_dp_rd_rm ();
10687 }
10688 }
10689 else
10690 {
10691 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
10692 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
10693
10694 switch (inst.instruction & 0x0fffffff)
10695 {
10696 case N_MNEM_vcmp:
10697 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
10698 break;
10699 case N_MNEM_vcmpe:
10700 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
10701 break;
10702 default:
10703 abort ();
10704 }
10705
10706 if (rs == NS_FI)
10707 {
10708 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
10709 do_vfp_sp_compare_z ();
10710 }
10711 else
10712 {
10713 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
10714 do_vfp_dp_rd ();
10715 }
10716 }
10717 do_vfp_cond_or_thumb ();
10718 }
10719
10720 static void
10721 nsyn_insert_sp (void)
10722 {
10723 inst.operands[1] = inst.operands[0];
10724 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
10725 inst.operands[0].reg = 13;
10726 inst.operands[0].isreg = 1;
10727 inst.operands[0].writeback = 1;
10728 inst.operands[0].present = 1;
10729 }
10730
10731 static void
10732 do_vfp_nsyn_push (void)
10733 {
10734 nsyn_insert_sp ();
10735 if (inst.operands[1].issingle)
10736 do_vfp_nsyn_opcode ("fstmdbs");
10737 else
10738 do_vfp_nsyn_opcode ("fstmdbd");
10739 }
10740
10741 static void
10742 do_vfp_nsyn_pop (void)
10743 {
10744 nsyn_insert_sp ();
10745 if (inst.operands[1].issingle)
10746 do_vfp_nsyn_opcode ("fldmdbs");
10747 else
10748 do_vfp_nsyn_opcode ("fldmdbd");
10749 }
10750
10751 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10752 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10753
10754 static unsigned
10755 neon_dp_fixup (unsigned i)
10756 {
10757 if (thumb_mode)
10758 {
10759 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10760 if (i & (1 << 24))
10761 i |= 1 << 28;
10762
10763 i &= ~(1 << 24);
10764
10765 i |= 0xef000000;
10766 }
10767 else
10768 i |= 0xf2000000;
10769
10770 return i;
10771 }
10772
10773 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10774 (0, 1, 2, 3). */
10775
10776 static unsigned
10777 neon_logbits (unsigned x)
10778 {
10779 return ffs (x) - 4;
10780 }
10781
10782 #define LOW4(R) ((R) & 0xf)
10783 #define HI1(R) (((R) >> 4) & 1)
10784
10785 /* Encode insns with bit pattern:
10786
10787 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10788 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10789
10790 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10791 different meaning for some instruction. */
10792
10793 static void
10794 neon_three_same (int isquad, int ubit, int size)
10795 {
10796 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10797 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10798 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10799 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10800 inst.instruction |= LOW4 (inst.operands[2].reg);
10801 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10802 inst.instruction |= (isquad != 0) << 6;
10803 inst.instruction |= (ubit != 0) << 24;
10804 if (size != -1)
10805 inst.instruction |= neon_logbits (size) << 20;
10806
10807 inst.instruction = neon_dp_fixup (inst.instruction);
10808 }
10809
10810 /* Encode instructions of the form:
10811
10812 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10813 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10814
10815 Don't write size if SIZE == -1. */
10816
10817 static void
10818 neon_two_same (int qbit, int ubit, int size)
10819 {
10820 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10821 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10822 inst.instruction |= LOW4 (inst.operands[1].reg);
10823 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10824 inst.instruction |= (qbit != 0) << 6;
10825 inst.instruction |= (ubit != 0) << 24;
10826
10827 if (size != -1)
10828 inst.instruction |= neon_logbits (size) << 18;
10829
10830 inst.instruction = neon_dp_fixup (inst.instruction);
10831 }
10832
10833 /* Neon instruction encoders, in approximate order of appearance. */
10834
10835 static void
10836 do_neon_dyadic_i_su (void)
10837 {
10838 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
10839 struct neon_type_el et = neon_check_type (3, rs,
10840 N_EQK, N_EQK, N_SU_32 | N_KEY);
10841 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
10842 }
10843
10844 static void
10845 do_neon_dyadic_i64_su (void)
10846 {
10847 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
10848 struct neon_type_el et = neon_check_type (3, rs,
10849 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10850 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
10851 }
10852
10853 static void
10854 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10855 unsigned immbits)
10856 {
10857 unsigned size = et.size >> 3;
10858 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10859 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10860 inst.instruction |= LOW4 (inst.operands[1].reg);
10861 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10862 inst.instruction |= (isquad != 0) << 6;
10863 inst.instruction |= immbits << 16;
10864 inst.instruction |= (size >> 3) << 7;
10865 inst.instruction |= (size & 0x7) << 19;
10866 if (write_ubit)
10867 inst.instruction |= (uval != 0) << 24;
10868
10869 inst.instruction = neon_dp_fixup (inst.instruction);
10870 }
10871
10872 static void
10873 do_neon_shl_imm (void)
10874 {
10875 if (!inst.operands[2].isreg)
10876 {
10877 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
10878 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10879 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10880 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
10881 }
10882 else
10883 {
10884 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
10885 struct neon_type_el et = neon_check_type (3, rs,
10886 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10887 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10888 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
10889 }
10890 }
10891
10892 static void
10893 do_neon_qshl_imm (void)
10894 {
10895 if (!inst.operands[2].isreg)
10896 {
10897 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
10898 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10899 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10900 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
10901 inst.operands[2].imm);
10902 }
10903 else
10904 {
10905 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
10906 struct neon_type_el et = neon_check_type (3, rs,
10907 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10908 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10909 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
10910 }
10911 }
10912
10913 static int
10914 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10915 {
10916 /* Handle .I8 and .I64 as pseudo-instructions. */
10917 switch (size)
10918 {
10919 case 8:
10920 /* Unfortunately, this will make everything apart from zero out-of-range.
10921 FIXME is this the intended semantics? There doesn't seem much point in
10922 accepting .I8 if so. */
10923 immediate |= immediate << 8;
10924 size = 16;
10925 break;
10926 case 64:
10927 /* Similarly, anything other than zero will be replicated in bits [63:32],
10928 which probably isn't want we want if we specified .I64. */
10929 if (immediate != 0)
10930 goto bad_immediate;
10931 size = 32;
10932 break;
10933 default: ;
10934 }
10935
10936 if (immediate == (immediate & 0x000000ff))
10937 {
10938 *immbits = immediate;
10939 return (size == 16) ? 0x9 : 0x1;
10940 }
10941 else if (immediate == (immediate & 0x0000ff00))
10942 {
10943 *immbits = immediate >> 8;
10944 return (size == 16) ? 0xb : 0x3;
10945 }
10946 else if (immediate == (immediate & 0x00ff0000))
10947 {
10948 *immbits = immediate >> 16;
10949 return 0x5;
10950 }
10951 else if (immediate == (immediate & 0xff000000))
10952 {
10953 *immbits = immediate >> 24;
10954 return 0x7;
10955 }
10956
10957 bad_immediate:
10958 first_error (_("immediate value out of range"));
10959 return FAIL;
10960 }
10961
10962 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10963 A, B, C, D. */
10964
10965 static int
10966 neon_bits_same_in_bytes (unsigned imm)
10967 {
10968 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10969 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10970 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10971 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10972 }
10973
10974 /* For immediate of above form, return 0bABCD. */
10975
10976 static unsigned
10977 neon_squash_bits (unsigned imm)
10978 {
10979 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10980 | ((imm & 0x01000000) >> 21);
10981 }
10982
10983 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10984
10985 static unsigned
10986 neon_qfloat_bits (unsigned imm)
10987 {
10988 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10989 }
10990
10991 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10992 the instruction. *OP is passed as the initial value of the op field, and
10993 may be set to a different value depending on the constant (i.e.
10994 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10995 MVN). */
10996
10997 static int
10998 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10999 int *op, int size, enum neon_el_type type)
11000 {
11001 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11002 {
11003 if (size != 32 || *op == 1)
11004 return FAIL;
11005 *immbits = neon_qfloat_bits (immlo);
11006 return 0xf;
11007 }
11008 else if (size == 64 && neon_bits_same_in_bytes (immhi)
11009 && neon_bits_same_in_bytes (immlo))
11010 {
11011 /* Check this one first so we don't have to bother with immhi in later
11012 tests. */
11013 if (*op == 1)
11014 return FAIL;
11015 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
11016 *op = 1;
11017 return 0xe;
11018 }
11019 else if (immhi != 0)
11020 return FAIL;
11021 else if (immlo == (immlo & 0x000000ff))
11022 {
11023 /* 64-bit case was already handled. Don't allow MVN with 8-bit
11024 immediate. */
11025 if ((size != 8 && size != 16 && size != 32)
11026 || (size == 8 && *op == 1))
11027 return FAIL;
11028 *immbits = immlo;
11029 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
11030 }
11031 else if (immlo == (immlo & 0x0000ff00))
11032 {
11033 if (size != 16 && size != 32)
11034 return FAIL;
11035 *immbits = immlo >> 8;
11036 return (size == 16) ? 0xa : 0x2;
11037 }
11038 else if (immlo == (immlo & 0x00ff0000))
11039 {
11040 if (size != 32)
11041 return FAIL;
11042 *immbits = immlo >> 16;
11043 return 0x4;
11044 }
11045 else if (immlo == (immlo & 0xff000000))
11046 {
11047 if (size != 32)
11048 return FAIL;
11049 *immbits = immlo >> 24;
11050 return 0x6;
11051 }
11052 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11053 {
11054 if (size != 32)
11055 return FAIL;
11056 *immbits = (immlo >> 8) & 0xff;
11057 return 0xc;
11058 }
11059 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11060 {
11061 if (size != 32)
11062 return FAIL;
11063 *immbits = (immlo >> 16) & 0xff;
11064 return 0xd;
11065 }
11066
11067 return FAIL;
11068 }
11069
11070 /* Write immediate bits [7:0] to the following locations:
11071
11072 |28/24|23 19|18 16|15 4|3 0|
11073 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11074
11075 This function is used by VMOV/VMVN/VORR/VBIC. */
11076
11077 static void
11078 neon_write_immbits (unsigned immbits)
11079 {
11080 inst.instruction |= immbits & 0xf;
11081 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11082 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11083 }
11084
11085 /* Invert low-order SIZE bits of XHI:XLO. */
11086
11087 static void
11088 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11089 {
11090 unsigned immlo = xlo ? *xlo : 0;
11091 unsigned immhi = xhi ? *xhi : 0;
11092
11093 switch (size)
11094 {
11095 case 8:
11096 immlo = (~immlo) & 0xff;
11097 break;
11098
11099 case 16:
11100 immlo = (~immlo) & 0xffff;
11101 break;
11102
11103 case 64:
11104 immhi = (~immhi) & 0xffffffff;
11105 /* fall through. */
11106
11107 case 32:
11108 immlo = (~immlo) & 0xffffffff;
11109 break;
11110
11111 default:
11112 abort ();
11113 }
11114
11115 if (xlo)
11116 *xlo = immlo;
11117
11118 if (xhi)
11119 *xhi = immhi;
11120 }
11121
11122 static void
11123 do_neon_logic (void)
11124 {
11125 if (inst.operands[2].present && inst.operands[2].isreg)
11126 {
11127 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11128 neon_check_type (3, rs, N_IGNORE_TYPE);
11129 /* U bit and size field were set as part of the bitmask. */
11130 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11131 neon_three_same (neon_quad (rs), 0, -1);
11132 }
11133 else
11134 {
11135 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11136 struct neon_type_el et = neon_check_type (2, rs,
11137 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11138 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11139 unsigned immbits;
11140 int cmode;
11141
11142 if (et.type == NT_invtype)
11143 return;
11144
11145 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11146
11147 switch (opcode)
11148 {
11149 case N_MNEM_vbic:
11150 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
11151 et.size);
11152 break;
11153
11154 case N_MNEM_vorr:
11155 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
11156 et.size);
11157 break;
11158
11159 case N_MNEM_vand:
11160 /* Pseudo-instruction for VBIC. */
11161 immbits = inst.operands[1].imm;
11162 neon_invert_size (&immbits, 0, et.size);
11163 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11164 break;
11165
11166 case N_MNEM_vorn:
11167 /* Pseudo-instruction for VORR. */
11168 immbits = inst.operands[1].imm;
11169 neon_invert_size (&immbits, 0, et.size);
11170 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11171 break;
11172
11173 default:
11174 abort ();
11175 }
11176
11177 if (cmode == FAIL)
11178 return;
11179
11180 inst.instruction |= neon_quad (rs) << 6;
11181 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11182 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11183 inst.instruction |= cmode << 8;
11184 neon_write_immbits (immbits);
11185
11186 inst.instruction = neon_dp_fixup (inst.instruction);
11187 }
11188 }
11189
11190 static void
11191 do_neon_bitfield (void)
11192 {
11193 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11194 neon_check_type (3, rs, N_IGNORE_TYPE);
11195 neon_three_same (neon_quad (rs), 0, -1);
11196 }
11197
11198 static void
11199 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11200 unsigned destbits)
11201 {
11202 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11203 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11204 types | N_KEY);
11205 if (et.type == NT_float)
11206 {
11207 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11208 neon_three_same (neon_quad (rs), 0, -1);
11209 }
11210 else
11211 {
11212 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11213 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11214 }
11215 }
11216
11217 static void
11218 do_neon_dyadic_if_su (void)
11219 {
11220 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11221 }
11222
11223 static void
11224 do_neon_dyadic_if_su_d (void)
11225 {
11226 /* This version only allow D registers, but that constraint is enforced during
11227 operand parsing so we don't need to do anything extra here. */
11228 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11229 }
11230
11231 static void
11232 do_neon_dyadic_if_i (void)
11233 {
11234 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
11235 }
11236
11237 static void
11238 do_neon_dyadic_if_i_d (void)
11239 {
11240 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
11241 }
11242
11243 enum vfp_or_neon_is_neon_bits
11244 {
11245 NEON_CHECK_CC = 1,
11246 NEON_CHECK_ARCH = 2
11247 };
11248
11249 /* Call this function if an instruction which may have belonged to the VFP or
11250 Neon instruction sets, but turned out to be a Neon instruction (due to the
11251 operand types involved, etc.). We have to check and/or fix-up a couple of
11252 things:
11253
11254 - Make sure the user hasn't attempted to make a Neon instruction
11255 conditional.
11256 - Alter the value in the condition code field if necessary.
11257 - Make sure that the arch supports Neon instructions.
11258
11259 Which of these operations take place depends on bits from enum
11260 vfp_or_neon_is_neon_bits.
11261
11262 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11263 current instruction's condition is COND_ALWAYS, the condition field is
11264 changed to inst.uncond_value. This is necessary because instructions shared
11265 between VFP and Neon may be conditional for the VFP variants only, and the
11266 unconditional Neon version must have, e.g., 0xF in the condition field. */
11267
11268 static int
11269 vfp_or_neon_is_neon (unsigned check)
11270 {
11271 /* Conditions are always legal in Thumb mode (IT blocks). */
11272 if (!thumb_mode && (check & NEON_CHECK_CC))
11273 {
11274 if (inst.cond != COND_ALWAYS)
11275 {
11276 first_error (_(BAD_COND));
11277 return FAIL;
11278 }
11279 if (inst.uncond_value != -1)
11280 inst.instruction |= inst.uncond_value << 28;
11281 }
11282
11283 if ((check & NEON_CHECK_ARCH)
11284 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11285 {
11286 first_error (_(BAD_FPU));
11287 return FAIL;
11288 }
11289
11290 return SUCCESS;
11291 }
11292
11293 static void
11294 do_neon_addsub_if_i (void)
11295 {
11296 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11297 return;
11298
11299 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11300 return;
11301
11302 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11303 affected if we specify unsigned args. */
11304 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11305 }
11306
11307 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11308 result to be:
11309 V<op> A,B (A is operand 0, B is operand 2)
11310 to mean:
11311 V<op> A,B,A
11312 not:
11313 V<op> A,B,B
11314 so handle that case specially. */
11315
11316 static void
11317 neon_exchange_operands (void)
11318 {
11319 void *scratch = alloca (sizeof (inst.operands[0]));
11320 if (inst.operands[1].present)
11321 {
11322 /* Swap operands[1] and operands[2]. */
11323 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11324 inst.operands[1] = inst.operands[2];
11325 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11326 }
11327 else
11328 {
11329 inst.operands[1] = inst.operands[2];
11330 inst.operands[2] = inst.operands[0];
11331 }
11332 }
11333
11334 static void
11335 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11336 {
11337 if (inst.operands[2].isreg)
11338 {
11339 if (invert)
11340 neon_exchange_operands ();
11341 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11342 }
11343 else
11344 {
11345 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11346 struct neon_type_el et = neon_check_type (2, rs,
11347 N_EQK | N_SIZ, immtypes | N_KEY);
11348
11349 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11350 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11351 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11352 inst.instruction |= LOW4 (inst.operands[1].reg);
11353 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11354 inst.instruction |= neon_quad (rs) << 6;
11355 inst.instruction |= (et.type == NT_float) << 10;
11356 inst.instruction |= neon_logbits (et.size) << 18;
11357
11358 inst.instruction = neon_dp_fixup (inst.instruction);
11359 }
11360 }
11361
11362 static void
11363 do_neon_cmp (void)
11364 {
11365 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
11366 }
11367
11368 static void
11369 do_neon_cmp_inv (void)
11370 {
11371 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
11372 }
11373
11374 static void
11375 do_neon_ceq (void)
11376 {
11377 neon_compare (N_IF_32, N_IF_32, FALSE);
11378 }
11379
11380 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11381 scalars, which are encoded in 5 bits, M : Rm.
11382 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11383 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11384 index in M. */
11385
11386 static unsigned
11387 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
11388 {
11389 unsigned regno = NEON_SCALAR_REG (scalar);
11390 unsigned elno = NEON_SCALAR_INDEX (scalar);
11391
11392 switch (elsize)
11393 {
11394 case 16:
11395 if (regno > 7 || elno > 3)
11396 goto bad_scalar;
11397 return regno | (elno << 3);
11398
11399 case 32:
11400 if (regno > 15 || elno > 1)
11401 goto bad_scalar;
11402 return regno | (elno << 4);
11403
11404 default:
11405 bad_scalar:
11406 first_error (_("scalar out of range for multiply instruction"));
11407 }
11408
11409 return 0;
11410 }
11411
11412 /* Encode multiply / multiply-accumulate scalar instructions. */
11413
11414 static void
11415 neon_mul_mac (struct neon_type_el et, int ubit)
11416 {
11417 unsigned scalar;
11418
11419 /* Give a more helpful error message if we have an invalid type. */
11420 if (et.type == NT_invtype)
11421 return;
11422
11423 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
11424 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11425 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11426 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11427 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11428 inst.instruction |= LOW4 (scalar);
11429 inst.instruction |= HI1 (scalar) << 5;
11430 inst.instruction |= (et.type == NT_float) << 8;
11431 inst.instruction |= neon_logbits (et.size) << 20;
11432 inst.instruction |= (ubit != 0) << 24;
11433
11434 inst.instruction = neon_dp_fixup (inst.instruction);
11435 }
11436
11437 static void
11438 do_neon_mac_maybe_scalar (void)
11439 {
11440 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
11441 return;
11442
11443 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11444 return;
11445
11446 if (inst.operands[2].isscalar)
11447 {
11448 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11449 struct neon_type_el et = neon_check_type (3, rs,
11450 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
11451 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11452 neon_mul_mac (et, neon_quad (rs));
11453 }
11454 else
11455 do_neon_dyadic_if_i ();
11456 }
11457
11458 static void
11459 do_neon_tst (void)
11460 {
11461 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11462 struct neon_type_el et = neon_check_type (3, rs,
11463 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
11464 neon_three_same (neon_quad (rs), 0, et.size);
11465 }
11466
11467 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
11468 same types as the MAC equivalents. The polynomial type for this instruction
11469 is encoded the same as the integer type. */
11470
11471 static void
11472 do_neon_mul (void)
11473 {
11474 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
11475 return;
11476
11477 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11478 return;
11479
11480 if (inst.operands[2].isscalar)
11481 do_neon_mac_maybe_scalar ();
11482 else
11483 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
11484 }
11485
11486 static void
11487 do_neon_qdmulh (void)
11488 {
11489 if (inst.operands[2].isscalar)
11490 {
11491 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11492 struct neon_type_el et = neon_check_type (3, rs,
11493 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
11494 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11495 neon_mul_mac (et, neon_quad (rs));
11496 }
11497 else
11498 {
11499 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11500 struct neon_type_el et = neon_check_type (3, rs,
11501 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
11502 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11503 /* The U bit (rounding) comes from bit mask. */
11504 neon_three_same (neon_quad (rs), 0, et.size);
11505 }
11506 }
11507
11508 static void
11509 do_neon_fcmp_absolute (void)
11510 {
11511 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11512 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
11513 /* Size field comes from bit mask. */
11514 neon_three_same (neon_quad (rs), 1, -1);
11515 }
11516
11517 static void
11518 do_neon_fcmp_absolute_inv (void)
11519 {
11520 neon_exchange_operands ();
11521 do_neon_fcmp_absolute ();
11522 }
11523
11524 static void
11525 do_neon_step (void)
11526 {
11527 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11528 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
11529 neon_three_same (neon_quad (rs), 0, -1);
11530 }
11531
11532 static void
11533 do_neon_abs_neg (void)
11534 {
11535 enum neon_shape rs;
11536 struct neon_type_el et;
11537
11538 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
11539 return;
11540
11541 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11542 return;
11543
11544 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
11545 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
11546
11547 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11548 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11549 inst.instruction |= LOW4 (inst.operands[1].reg);
11550 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11551 inst.instruction |= neon_quad (rs) << 6;
11552 inst.instruction |= (et.type == NT_float) << 10;
11553 inst.instruction |= neon_logbits (et.size) << 18;
11554
11555 inst.instruction = neon_dp_fixup (inst.instruction);
11556 }
11557
11558 static void
11559 do_neon_sli (void)
11560 {
11561 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11562 struct neon_type_el et = neon_check_type (2, rs,
11563 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11564 int imm = inst.operands[2].imm;
11565 constraint (imm < 0 || (unsigned)imm >= et.size,
11566 _("immediate out of range for insert"));
11567 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
11568 }
11569
11570 static void
11571 do_neon_sri (void)
11572 {
11573 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11574 struct neon_type_el et = neon_check_type (2, rs,
11575 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11576 int imm = inst.operands[2].imm;
11577 constraint (imm < 1 || (unsigned)imm > et.size,
11578 _("immediate out of range for insert"));
11579 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
11580 }
11581
11582 static void
11583 do_neon_qshlu_imm (void)
11584 {
11585 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11586 struct neon_type_el et = neon_check_type (2, rs,
11587 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
11588 int imm = inst.operands[2].imm;
11589 constraint (imm < 0 || (unsigned)imm >= et.size,
11590 _("immediate out of range for shift"));
11591 /* Only encodes the 'U present' variant of the instruction.
11592 In this case, signed types have OP (bit 8) set to 0.
11593 Unsigned types have OP set to 1. */
11594 inst.instruction |= (et.type == NT_unsigned) << 8;
11595 /* The rest of the bits are the same as other immediate shifts. */
11596 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
11597 }
11598
11599 static void
11600 do_neon_qmovn (void)
11601 {
11602 struct neon_type_el et = neon_check_type (2, NS_DQ,
11603 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
11604 /* Saturating move where operands can be signed or unsigned, and the
11605 destination has the same signedness. */
11606 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11607 if (et.type == NT_unsigned)
11608 inst.instruction |= 0xc0;
11609 else
11610 inst.instruction |= 0x80;
11611 neon_two_same (0, 1, et.size / 2);
11612 }
11613
11614 static void
11615 do_neon_qmovun (void)
11616 {
11617 struct neon_type_el et = neon_check_type (2, NS_DQ,
11618 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
11619 /* Saturating move with unsigned results. Operands must be signed. */
11620 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11621 neon_two_same (0, 1, et.size / 2);
11622 }
11623
11624 static void
11625 do_neon_rshift_sat_narrow (void)
11626 {
11627 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11628 or unsigned. If operands are unsigned, results must also be unsigned. */
11629 struct neon_type_el et = neon_check_type (2, NS_DQI,
11630 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
11631 int imm = inst.operands[2].imm;
11632 /* This gets the bounds check, size encoding and immediate bits calculation
11633 right. */
11634 et.size /= 2;
11635
11636 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
11637 VQMOVN.I<size> <Dd>, <Qm>. */
11638 if (imm == 0)
11639 {
11640 inst.operands[2].present = 0;
11641 inst.instruction = N_MNEM_vqmovn;
11642 do_neon_qmovn ();
11643 return;
11644 }
11645
11646 constraint (imm < 1 || (unsigned)imm > et.size,
11647 _("immediate out of range"));
11648 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
11649 }
11650
11651 static void
11652 do_neon_rshift_sat_narrow_u (void)
11653 {
11654 /* FIXME: Types for narrowing. If operands are signed, results can be signed
11655 or unsigned. If operands are unsigned, results must also be unsigned. */
11656 struct neon_type_el et = neon_check_type (2, NS_DQI,
11657 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
11658 int imm = inst.operands[2].imm;
11659 /* This gets the bounds check, size encoding and immediate bits calculation
11660 right. */
11661 et.size /= 2;
11662
11663 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
11664 VQMOVUN.I<size> <Dd>, <Qm>. */
11665 if (imm == 0)
11666 {
11667 inst.operands[2].present = 0;
11668 inst.instruction = N_MNEM_vqmovun;
11669 do_neon_qmovun ();
11670 return;
11671 }
11672
11673 constraint (imm < 1 || (unsigned)imm > et.size,
11674 _("immediate out of range"));
11675 /* FIXME: The manual is kind of unclear about what value U should have in
11676 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
11677 must be 1. */
11678 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
11679 }
11680
11681 static void
11682 do_neon_movn (void)
11683 {
11684 struct neon_type_el et = neon_check_type (2, NS_DQ,
11685 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
11686 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11687 neon_two_same (0, 1, et.size / 2);
11688 }
11689
11690 static void
11691 do_neon_rshift_narrow (void)
11692 {
11693 struct neon_type_el et = neon_check_type (2, NS_DQI,
11694 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
11695 int imm = inst.operands[2].imm;
11696 /* This gets the bounds check, size encoding and immediate bits calculation
11697 right. */
11698 et.size /= 2;
11699
11700 /* If immediate is zero then we are a pseudo-instruction for
11701 VMOVN.I<size> <Dd>, <Qm> */
11702 if (imm == 0)
11703 {
11704 inst.operands[2].present = 0;
11705 inst.instruction = N_MNEM_vmovn;
11706 do_neon_movn ();
11707 return;
11708 }
11709
11710 constraint (imm < 1 || (unsigned)imm > et.size,
11711 _("immediate out of range for narrowing operation"));
11712 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
11713 }
11714
11715 static void
11716 do_neon_shll (void)
11717 {
11718 /* FIXME: Type checking when lengthening. */
11719 struct neon_type_el et = neon_check_type (2, NS_QDI,
11720 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
11721 unsigned imm = inst.operands[2].imm;
11722
11723 if (imm == et.size)
11724 {
11725 /* Maximum shift variant. */
11726 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11727 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11728 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11729 inst.instruction |= LOW4 (inst.operands[1].reg);
11730 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11731 inst.instruction |= neon_logbits (et.size) << 18;
11732
11733 inst.instruction = neon_dp_fixup (inst.instruction);
11734 }
11735 else
11736 {
11737 /* A more-specific type check for non-max versions. */
11738 et = neon_check_type (2, NS_QDI,
11739 N_EQK | N_DBL, N_SU_32 | N_KEY);
11740 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11741 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
11742 }
11743 }
11744
11745 /* Check the various types for the VCVT instruction, and return which version
11746 the current instruction is. */
11747
11748 static int
11749 neon_cvt_flavour (enum neon_shape rs)
11750 {
11751 #define CVT_VAR(C,X,Y) \
11752 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
11753 if (et.type != NT_invtype) \
11754 { \
11755 inst.error = NULL; \
11756 return (C); \
11757 }
11758 struct neon_type_el et;
11759 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
11760 || rs == NS_FF) ? N_VFP : 0;
11761 /* The instruction versions which take an immediate take one register
11762 argument, which is extended to the width of the full register. Thus the
11763 "source" and "destination" registers must have the same width. Hack that
11764 here by making the size equal to the key (wider, in this case) operand. */
11765 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
11766
11767 CVT_VAR (0, N_S32, N_F32);
11768 CVT_VAR (1, N_U32, N_F32);
11769 CVT_VAR (2, N_F32, N_S32);
11770 CVT_VAR (3, N_F32, N_U32);
11771
11772 whole_reg = N_VFP;
11773
11774 /* VFP instructions. */
11775 CVT_VAR (4, N_F32, N_F64);
11776 CVT_VAR (5, N_F64, N_F32);
11777 CVT_VAR (6, N_S32, N_F64 | key);
11778 CVT_VAR (7, N_U32, N_F64 | key);
11779 CVT_VAR (8, N_F64 | key, N_S32);
11780 CVT_VAR (9, N_F64 | key, N_U32);
11781 /* VFP instructions with bitshift. */
11782 CVT_VAR (10, N_F32 | key, N_S16);
11783 CVT_VAR (11, N_F32 | key, N_U16);
11784 CVT_VAR (12, N_F64 | key, N_S16);
11785 CVT_VAR (13, N_F64 | key, N_U16);
11786 CVT_VAR (14, N_S16, N_F32 | key);
11787 CVT_VAR (15, N_U16, N_F32 | key);
11788 CVT_VAR (16, N_S16, N_F64 | key);
11789 CVT_VAR (17, N_U16, N_F64 | key);
11790
11791 return -1;
11792 #undef CVT_VAR
11793 }
11794
11795 /* Neon-syntax VFP conversions. */
11796
11797 static void
11798 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
11799 {
11800 const char *opname = 0;
11801
11802 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
11803 {
11804 /* Conversions with immediate bitshift. */
11805 const char *enc[] =
11806 {
11807 "ftosls",
11808 "ftouls",
11809 "fsltos",
11810 "fultos",
11811 NULL,
11812 NULL,
11813 "ftosld",
11814 "ftould",
11815 "fsltod",
11816 "fultod",
11817 "fshtos",
11818 "fuhtos",
11819 "fshtod",
11820 "fuhtod",
11821 "ftoshs",
11822 "ftouhs",
11823 "ftoshd",
11824 "ftouhd"
11825 };
11826
11827 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
11828 {
11829 opname = enc[flavour];
11830 constraint (inst.operands[0].reg != inst.operands[1].reg,
11831 _("operands 0 and 1 must be the same register"));
11832 inst.operands[1] = inst.operands[2];
11833 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
11834 }
11835 }
11836 else
11837 {
11838 /* Conversions without bitshift. */
11839 const char *enc[] =
11840 {
11841 "ftosis",
11842 "ftouis",
11843 "fsitos",
11844 "fuitos",
11845 "fcvtsd",
11846 "fcvtds",
11847 "ftosid",
11848 "ftouid",
11849 "fsitod",
11850 "fuitod"
11851 };
11852
11853 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
11854 opname = enc[flavour];
11855 }
11856
11857 if (opname)
11858 do_vfp_nsyn_opcode (opname);
11859 }
11860
11861 static void
11862 do_vfp_nsyn_cvtz (void)
11863 {
11864 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
11865 int flavour = neon_cvt_flavour (rs);
11866 const char *enc[] =
11867 {
11868 "ftosizs",
11869 "ftouizs",
11870 NULL,
11871 NULL,
11872 NULL,
11873 NULL,
11874 "ftosizd",
11875 "ftouizd"
11876 };
11877
11878 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
11879 do_vfp_nsyn_opcode (enc[flavour]);
11880 }
11881
11882 static void
11883 do_neon_cvt (void)
11884 {
11885 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
11886 NS_FD, NS_DF, NS_FF, NS_NULL);
11887 int flavour = neon_cvt_flavour (rs);
11888
11889 /* VFP rather than Neon conversions. */
11890 if (flavour >= 4)
11891 {
11892 do_vfp_nsyn_cvt (rs, flavour);
11893 return;
11894 }
11895
11896 switch (rs)
11897 {
11898 case NS_DDI:
11899 case NS_QQI:
11900 {
11901 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11902 return;
11903
11904 /* Fixed-point conversion with #0 immediate is encoded as an
11905 integer conversion. */
11906 if (inst.operands[2].present && inst.operands[2].imm == 0)
11907 goto int_encode;
11908 unsigned immbits = 32 - inst.operands[2].imm;
11909 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11910 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11911 if (flavour != -1)
11912 inst.instruction |= enctab[flavour];
11913 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11914 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11915 inst.instruction |= LOW4 (inst.operands[1].reg);
11916 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11917 inst.instruction |= neon_quad (rs) << 6;
11918 inst.instruction |= 1 << 21;
11919 inst.instruction |= immbits << 16;
11920
11921 inst.instruction = neon_dp_fixup (inst.instruction);
11922 }
11923 break;
11924
11925 case NS_DD:
11926 case NS_QQ:
11927 int_encode:
11928 {
11929 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11930
11931 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11932
11933 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11934 return;
11935
11936 if (flavour != -1)
11937 inst.instruction |= enctab[flavour];
11938
11939 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11940 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11941 inst.instruction |= LOW4 (inst.operands[1].reg);
11942 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11943 inst.instruction |= neon_quad (rs) << 6;
11944 inst.instruction |= 2 << 18;
11945
11946 inst.instruction = neon_dp_fixup (inst.instruction);
11947 }
11948 break;
11949
11950 default:
11951 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
11952 do_vfp_nsyn_cvt (rs, flavour);
11953 }
11954 }
11955
11956 static void
11957 neon_move_immediate (void)
11958 {
11959 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11960 struct neon_type_el et = neon_check_type (2, rs,
11961 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11962 unsigned immlo, immhi = 0, immbits;
11963 int op, cmode;
11964
11965 constraint (et.type == NT_invtype,
11966 _("operand size must be specified for immediate VMOV"));
11967
11968 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11969 op = (inst.instruction & (1 << 5)) != 0;
11970
11971 immlo = inst.operands[1].imm;
11972 if (inst.operands[1].regisimm)
11973 immhi = inst.operands[1].reg;
11974
11975 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11976 _("immediate has bits set outside the operand size"));
11977
11978 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11979 et.size, et.type)) == FAIL)
11980 {
11981 /* Invert relevant bits only. */
11982 neon_invert_size (&immlo, &immhi, et.size);
11983 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11984 with one or the other; those cases are caught by
11985 neon_cmode_for_move_imm. */
11986 op = !op;
11987 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11988 et.size, et.type)) == FAIL)
11989 {
11990 first_error (_("immediate out of range"));
11991 return;
11992 }
11993 }
11994
11995 inst.instruction &= ~(1 << 5);
11996 inst.instruction |= op << 5;
11997
11998 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11999 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12000 inst.instruction |= neon_quad (rs) << 6;
12001 inst.instruction |= cmode << 8;
12002
12003 neon_write_immbits (immbits);
12004 }
12005
12006 static void
12007 do_neon_mvn (void)
12008 {
12009 if (inst.operands[1].isreg)
12010 {
12011 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12012
12013 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12014 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12015 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12016 inst.instruction |= LOW4 (inst.operands[1].reg);
12017 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12018 inst.instruction |= neon_quad (rs) << 6;
12019 }
12020 else
12021 {
12022 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12023 neon_move_immediate ();
12024 }
12025
12026 inst.instruction = neon_dp_fixup (inst.instruction);
12027 }
12028
12029 /* Encode instructions of form:
12030
12031 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12032 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12033
12034 */
12035
12036 static void
12037 neon_mixed_length (struct neon_type_el et, unsigned size)
12038 {
12039 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12040 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12041 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12042 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12043 inst.instruction |= LOW4 (inst.operands[2].reg);
12044 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12045 inst.instruction |= (et.type == NT_unsigned) << 24;
12046 inst.instruction |= neon_logbits (size) << 20;
12047
12048 inst.instruction = neon_dp_fixup (inst.instruction);
12049 }
12050
12051 static void
12052 do_neon_dyadic_long (void)
12053 {
12054 /* FIXME: Type checking for lengthening op. */
12055 struct neon_type_el et = neon_check_type (3, NS_QDD,
12056 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12057 neon_mixed_length (et, et.size);
12058 }
12059
12060 static void
12061 do_neon_abal (void)
12062 {
12063 struct neon_type_el et = neon_check_type (3, NS_QDD,
12064 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12065 neon_mixed_length (et, et.size);
12066 }
12067
12068 static void
12069 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12070 {
12071 if (inst.operands[2].isscalar)
12072 {
12073 struct neon_type_el et = neon_check_type (3, NS_QDS,
12074 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12075 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12076 neon_mul_mac (et, et.type == NT_unsigned);
12077 }
12078 else
12079 {
12080 struct neon_type_el et = neon_check_type (3, NS_QDD,
12081 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12082 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12083 neon_mixed_length (et, et.size);
12084 }
12085 }
12086
12087 static void
12088 do_neon_mac_maybe_scalar_long (void)
12089 {
12090 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12091 }
12092
12093 static void
12094 do_neon_dyadic_wide (void)
12095 {
12096 struct neon_type_el et = neon_check_type (3, NS_QQD,
12097 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12098 neon_mixed_length (et, et.size);
12099 }
12100
12101 static void
12102 do_neon_dyadic_narrow (void)
12103 {
12104 struct neon_type_el et = neon_check_type (3, NS_QDD,
12105 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12106 neon_mixed_length (et, et.size / 2);
12107 }
12108
12109 static void
12110 do_neon_mul_sat_scalar_long (void)
12111 {
12112 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12113 }
12114
12115 static void
12116 do_neon_vmull (void)
12117 {
12118 if (inst.operands[2].isscalar)
12119 do_neon_mac_maybe_scalar_long ();
12120 else
12121 {
12122 struct neon_type_el et = neon_check_type (3, NS_QDD,
12123 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12124 if (et.type == NT_poly)
12125 inst.instruction = NEON_ENC_POLY (inst.instruction);
12126 else
12127 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12128 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12129 zero. Should be OK as-is. */
12130 neon_mixed_length (et, et.size);
12131 }
12132 }
12133
12134 static void
12135 do_neon_ext (void)
12136 {
12137 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12138 struct neon_type_el et = neon_check_type (3, rs,
12139 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12140 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12141 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12142 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12143 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12144 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12145 inst.instruction |= LOW4 (inst.operands[2].reg);
12146 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12147 inst.instruction |= neon_quad (rs) << 6;
12148 inst.instruction |= imm << 8;
12149
12150 inst.instruction = neon_dp_fixup (inst.instruction);
12151 }
12152
12153 static void
12154 do_neon_rev (void)
12155 {
12156 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12157 struct neon_type_el et = neon_check_type (2, rs,
12158 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12159 unsigned op = (inst.instruction >> 7) & 3;
12160 /* N (width of reversed regions) is encoded as part of the bitmask. We
12161 extract it here to check the elements to be reversed are smaller.
12162 Otherwise we'd get a reserved instruction. */
12163 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12164 assert (elsize != 0);
12165 constraint (et.size >= elsize,
12166 _("elements must be smaller than reversal region"));
12167 neon_two_same (neon_quad (rs), 1, et.size);
12168 }
12169
12170 static void
12171 do_neon_dup (void)
12172 {
12173 if (inst.operands[1].isscalar)
12174 {
12175 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12176 struct neon_type_el et = neon_check_type (2, rs,
12177 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12178 unsigned sizebits = et.size >> 3;
12179 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12180 int logsize = neon_logbits (et.size);
12181 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12182
12183 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12184 return;
12185
12186 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12187 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12188 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12189 inst.instruction |= LOW4 (dm);
12190 inst.instruction |= HI1 (dm) << 5;
12191 inst.instruction |= neon_quad (rs) << 6;
12192 inst.instruction |= x << 17;
12193 inst.instruction |= sizebits << 16;
12194
12195 inst.instruction = neon_dp_fixup (inst.instruction);
12196 }
12197 else
12198 {
12199 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12200 struct neon_type_el et = neon_check_type (2, rs,
12201 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12202 /* Duplicate ARM register to lanes of vector. */
12203 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12204 switch (et.size)
12205 {
12206 case 8: inst.instruction |= 0x400000; break;
12207 case 16: inst.instruction |= 0x000020; break;
12208 case 32: inst.instruction |= 0x000000; break;
12209 default: break;
12210 }
12211 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12212 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12213 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12214 inst.instruction |= neon_quad (rs) << 21;
12215 /* The encoding for this instruction is identical for the ARM and Thumb
12216 variants, except for the condition field. */
12217 do_vfp_cond_or_thumb ();
12218 }
12219 }
12220
12221 /* VMOV has particularly many variations. It can be one of:
12222 0. VMOV<c><q> <Qd>, <Qm>
12223 1. VMOV<c><q> <Dd>, <Dm>
12224 (Register operations, which are VORR with Rm = Rn.)
12225 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12226 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12227 (Immediate loads.)
12228 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12229 (ARM register to scalar.)
12230 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12231 (Two ARM registers to vector.)
12232 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12233 (Scalar to ARM register.)
12234 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12235 (Vector to two ARM registers.)
12236 8. VMOV.F32 <Sd>, <Sm>
12237 9. VMOV.F64 <Dd>, <Dm>
12238 (VFP register moves.)
12239 10. VMOV.F32 <Sd>, #imm
12240 11. VMOV.F64 <Dd>, #imm
12241 (VFP float immediate load.)
12242 12. VMOV <Rd>, <Sm>
12243 (VFP single to ARM reg.)
12244 13. VMOV <Sd>, <Rm>
12245 (ARM reg to VFP single.)
12246 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12247 (Two ARM regs to two VFP singles.)
12248 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12249 (Two VFP singles to two ARM regs.)
12250
12251 These cases can be disambiguated using neon_select_shape, except cases 1/9
12252 and 3/11 which depend on the operand type too.
12253
12254 All the encoded bits are hardcoded by this function.
12255
12256 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12257 Cases 5, 7 may be used with VFPv2 and above.
12258
12259 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12260 can specify a type where it doesn't make sense to, and is ignored).
12261 */
12262
12263 static void
12264 do_neon_mov (void)
12265 {
12266 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12267 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12268 NS_NULL);
12269 struct neon_type_el et;
12270 const char *ldconst = 0;
12271
12272 switch (rs)
12273 {
12274 case NS_DD: /* case 1/9. */
12275 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12276 /* It is not an error here if no type is given. */
12277 inst.error = NULL;
12278 if (et.type == NT_float && et.size == 64)
12279 {
12280 do_vfp_nsyn_opcode ("fcpyd");
12281 break;
12282 }
12283 /* fall through. */
12284
12285 case NS_QQ: /* case 0/1. */
12286 {
12287 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12288 return;
12289 /* The architecture manual I have doesn't explicitly state which
12290 value the U bit should have for register->register moves, but
12291 the equivalent VORR instruction has U = 0, so do that. */
12292 inst.instruction = 0x0200110;
12293 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12294 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12295 inst.instruction |= LOW4 (inst.operands[1].reg);
12296 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12297 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12298 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12299 inst.instruction |= neon_quad (rs) << 6;
12300
12301 inst.instruction = neon_dp_fixup (inst.instruction);
12302 }
12303 break;
12304
12305 case NS_DI: /* case 3/11. */
12306 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12307 inst.error = NULL;
12308 if (et.type == NT_float && et.size == 64)
12309 {
12310 /* case 11 (fconstd). */
12311 ldconst = "fconstd";
12312 goto encode_fconstd;
12313 }
12314 /* fall through. */
12315
12316 case NS_QI: /* case 2/3. */
12317 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12318 return;
12319 inst.instruction = 0x0800010;
12320 neon_move_immediate ();
12321 inst.instruction = neon_dp_fixup (inst.instruction);
12322 break;
12323
12324 case NS_SR: /* case 4. */
12325 {
12326 unsigned bcdebits = 0;
12327 struct neon_type_el et = neon_check_type (2, NS_NULL,
12328 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12329 int logsize = neon_logbits (et.size);
12330 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12331 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12332
12333 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12334 _(BAD_FPU));
12335 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12336 && et.size != 32, _(BAD_FPU));
12337 constraint (et.type == NT_invtype, _("bad type for scalar"));
12338 constraint (x >= 64 / et.size, _("scalar index out of range"));
12339
12340 switch (et.size)
12341 {
12342 case 8: bcdebits = 0x8; break;
12343 case 16: bcdebits = 0x1; break;
12344 case 32: bcdebits = 0x0; break;
12345 default: ;
12346 }
12347
12348 bcdebits |= x << logsize;
12349
12350 inst.instruction = 0xe000b10;
12351 do_vfp_cond_or_thumb ();
12352 inst.instruction |= LOW4 (dn) << 16;
12353 inst.instruction |= HI1 (dn) << 7;
12354 inst.instruction |= inst.operands[1].reg << 12;
12355 inst.instruction |= (bcdebits & 3) << 5;
12356 inst.instruction |= (bcdebits >> 2) << 21;
12357 }
12358 break;
12359
12360 case NS_DRR: /* case 5 (fmdrr). */
12361 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12362 _(BAD_FPU));
12363
12364 inst.instruction = 0xc400b10;
12365 do_vfp_cond_or_thumb ();
12366 inst.instruction |= LOW4 (inst.operands[0].reg);
12367 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
12368 inst.instruction |= inst.operands[1].reg << 12;
12369 inst.instruction |= inst.operands[2].reg << 16;
12370 break;
12371
12372 case NS_RS: /* case 6. */
12373 {
12374 struct neon_type_el et = neon_check_type (2, NS_NULL,
12375 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
12376 unsigned logsize = neon_logbits (et.size);
12377 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
12378 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
12379 unsigned abcdebits = 0;
12380
12381 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12382 _(BAD_FPU));
12383 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12384 && et.size != 32, _(BAD_FPU));
12385 constraint (et.type == NT_invtype, _("bad type for scalar"));
12386 constraint (x >= 64 / et.size, _("scalar index out of range"));
12387
12388 switch (et.size)
12389 {
12390 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
12391 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
12392 case 32: abcdebits = 0x00; break;
12393 default: ;
12394 }
12395
12396 abcdebits |= x << logsize;
12397 inst.instruction = 0xe100b10;
12398 do_vfp_cond_or_thumb ();
12399 inst.instruction |= LOW4 (dn) << 16;
12400 inst.instruction |= HI1 (dn) << 7;
12401 inst.instruction |= inst.operands[0].reg << 12;
12402 inst.instruction |= (abcdebits & 3) << 5;
12403 inst.instruction |= (abcdebits >> 2) << 21;
12404 }
12405 break;
12406
12407 case NS_RRD: /* case 7 (fmrrd). */
12408 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12409 _(BAD_FPU));
12410
12411 inst.instruction = 0xc500b10;
12412 do_vfp_cond_or_thumb ();
12413 inst.instruction |= inst.operands[0].reg << 12;
12414 inst.instruction |= inst.operands[1].reg << 16;
12415 inst.instruction |= LOW4 (inst.operands[2].reg);
12416 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12417 break;
12418
12419 case NS_FF: /* case 8 (fcpys). */
12420 do_vfp_nsyn_opcode ("fcpys");
12421 break;
12422
12423 case NS_FI: /* case 10 (fconsts). */
12424 ldconst = "fconsts";
12425 encode_fconstd:
12426 if (is_quarter_float (inst.operands[1].imm))
12427 {
12428 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
12429 do_vfp_nsyn_opcode (ldconst);
12430 }
12431 else
12432 first_error (_("immediate out of range"));
12433 break;
12434
12435 case NS_RF: /* case 12 (fmrs). */
12436 do_vfp_nsyn_opcode ("fmrs");
12437 break;
12438
12439 case NS_FR: /* case 13 (fmsr). */
12440 do_vfp_nsyn_opcode ("fmsr");
12441 break;
12442
12443 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12444 (one of which is a list), but we have parsed four. Do some fiddling to
12445 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12446 expect. */
12447 case NS_RRFF: /* case 14 (fmrrs). */
12448 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
12449 _("VFP registers must be adjacent"));
12450 inst.operands[2].imm = 2;
12451 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
12452 do_vfp_nsyn_opcode ("fmrrs");
12453 break;
12454
12455 case NS_FFRR: /* case 15 (fmsrr). */
12456 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
12457 _("VFP registers must be adjacent"));
12458 inst.operands[1] = inst.operands[2];
12459 inst.operands[2] = inst.operands[3];
12460 inst.operands[0].imm = 2;
12461 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
12462 do_vfp_nsyn_opcode ("fmsrr");
12463 break;
12464
12465 default:
12466 abort ();
12467 }
12468 }
12469
12470 static void
12471 do_neon_rshift_round_imm (void)
12472 {
12473 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12474 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12475 int imm = inst.operands[2].imm;
12476
12477 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
12478 if (imm == 0)
12479 {
12480 inst.operands[2].present = 0;
12481 do_neon_mov ();
12482 return;
12483 }
12484
12485 constraint (imm < 1 || (unsigned)imm > et.size,
12486 _("immediate out of range for shift"));
12487 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12488 et.size - imm);
12489 }
12490
12491 static void
12492 do_neon_movl (void)
12493 {
12494 struct neon_type_el et = neon_check_type (2, NS_QD,
12495 N_EQK | N_DBL, N_SU_32 | N_KEY);
12496 unsigned sizebits = et.size >> 3;
12497 inst.instruction |= sizebits << 19;
12498 neon_two_same (0, et.type == NT_unsigned, -1);
12499 }
12500
12501 static void
12502 do_neon_trn (void)
12503 {
12504 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12505 struct neon_type_el et = neon_check_type (2, rs,
12506 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12507 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12508 neon_two_same (neon_quad (rs), 1, et.size);
12509 }
12510
12511 static void
12512 do_neon_zip_uzp (void)
12513 {
12514 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12515 struct neon_type_el et = neon_check_type (2, rs,
12516 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12517 if (rs == NS_DD && et.size == 32)
12518 {
12519 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
12520 inst.instruction = N_MNEM_vtrn;
12521 do_neon_trn ();
12522 return;
12523 }
12524 neon_two_same (neon_quad (rs), 1, et.size);
12525 }
12526
12527 static void
12528 do_neon_sat_abs_neg (void)
12529 {
12530 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12531 struct neon_type_el et = neon_check_type (2, rs,
12532 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
12533 neon_two_same (neon_quad (rs), 1, et.size);
12534 }
12535
12536 static void
12537 do_neon_pair_long (void)
12538 {
12539 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12540 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
12541 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
12542 inst.instruction |= (et.type == NT_unsigned) << 7;
12543 neon_two_same (neon_quad (rs), 1, et.size);
12544 }
12545
12546 static void
12547 do_neon_recip_est (void)
12548 {
12549 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12550 struct neon_type_el et = neon_check_type (2, rs,
12551 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
12552 inst.instruction |= (et.type == NT_float) << 8;
12553 neon_two_same (neon_quad (rs), 1, et.size);
12554 }
12555
12556 static void
12557 do_neon_cls (void)
12558 {
12559 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12560 struct neon_type_el et = neon_check_type (2, rs,
12561 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
12562 neon_two_same (neon_quad (rs), 1, et.size);
12563 }
12564
12565 static void
12566 do_neon_clz (void)
12567 {
12568 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12569 struct neon_type_el et = neon_check_type (2, rs,
12570 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
12571 neon_two_same (neon_quad (rs), 1, et.size);
12572 }
12573
12574 static void
12575 do_neon_cnt (void)
12576 {
12577 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12578 struct neon_type_el et = neon_check_type (2, rs,
12579 N_EQK | N_INT, N_8 | N_KEY);
12580 neon_two_same (neon_quad (rs), 1, et.size);
12581 }
12582
12583 static void
12584 do_neon_swp (void)
12585 {
12586 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12587 neon_two_same (neon_quad (rs), 1, -1);
12588 }
12589
12590 static void
12591 do_neon_tbl_tbx (void)
12592 {
12593 unsigned listlenbits;
12594 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
12595
12596 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
12597 {
12598 first_error (_("bad list length for table lookup"));
12599 return;
12600 }
12601
12602 listlenbits = inst.operands[1].imm - 1;
12603 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12604 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12605 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12606 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12607 inst.instruction |= LOW4 (inst.operands[2].reg);
12608 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12609 inst.instruction |= listlenbits << 8;
12610
12611 inst.instruction = neon_dp_fixup (inst.instruction);
12612 }
12613
12614 static void
12615 do_neon_ldm_stm (void)
12616 {
12617 /* P, U and L bits are part of bitmask. */
12618 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
12619 unsigned offsetbits = inst.operands[1].imm * 2;
12620
12621 if (inst.operands[1].issingle)
12622 {
12623 do_vfp_nsyn_ldm_stm (is_dbmode);
12624 return;
12625 }
12626
12627 constraint (is_dbmode && !inst.operands[0].writeback,
12628 _("writeback (!) must be used for VLDMDB and VSTMDB"));
12629
12630 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
12631 _("register list must contain at least 1 and at most 16 "
12632 "registers"));
12633
12634 inst.instruction |= inst.operands[0].reg << 16;
12635 inst.instruction |= inst.operands[0].writeback << 21;
12636 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12637 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
12638
12639 inst.instruction |= offsetbits;
12640
12641 do_vfp_cond_or_thumb ();
12642 }
12643
12644 static void
12645 do_neon_ldr_str (void)
12646 {
12647 unsigned offsetbits;
12648 int offset_up = 1;
12649 int is_ldr = (inst.instruction & (1 << 20)) != 0;
12650
12651 if (inst.operands[0].issingle)
12652 {
12653 do_vfp_nsyn_ldr_str (is_ldr);
12654 return;
12655 }
12656
12657 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12658 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12659
12660 constraint (inst.reloc.pc_rel && !is_ldr,
12661 _("PC-relative addressing unavailable with VSTR"));
12662
12663 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
12664 _("Immediate value must be a constant"));
12665
12666 if (inst.reloc.exp.X_add_number < 0)
12667 {
12668 offset_up = 0;
12669 offsetbits = -inst.reloc.exp.X_add_number / 4;
12670 }
12671 else
12672 offsetbits = inst.reloc.exp.X_add_number / 4;
12673
12674 /* FIXME: Does this catch everything? */
12675 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12676 || inst.operands[1].postind || inst.operands[1].writeback
12677 || inst.operands[1].immisreg || inst.operands[1].shifted,
12678 BAD_ADDR_MODE);
12679 constraint ((inst.operands[1].imm & 3) != 0,
12680 _("Offset must be a multiple of 4"));
12681 constraint (offsetbits != (offsetbits & 0xff),
12682 _("Immediate offset out of range"));
12683
12684 inst.instruction |= inst.operands[1].reg << 16;
12685 inst.instruction |= offsetbits & 0xff;
12686 inst.instruction |= offset_up << 23;
12687
12688 do_vfp_cond_or_thumb ();
12689
12690 if (inst.reloc.pc_rel)
12691 {
12692 if (thumb_mode)
12693 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
12694 else
12695 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
12696 }
12697 else
12698 inst.reloc.type = BFD_RELOC_UNUSED;
12699 }
12700
12701 /* "interleave" version also handles non-interleaving register VLD1/VST1
12702 instructions. */
12703
12704 static void
12705 do_neon_ld_st_interleave (void)
12706 {
12707 struct neon_type_el et = neon_check_type (1, NS_NULL,
12708 N_8 | N_16 | N_32 | N_64);
12709 unsigned alignbits = 0;
12710 unsigned idx;
12711 /* The bits in this table go:
12712 0: register stride of one (0) or two (1)
12713 1,2: register list length, minus one (1, 2, 3, 4).
12714 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
12715 We use -1 for invalid entries. */
12716 const int typetable[] =
12717 {
12718 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
12719 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
12720 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
12721 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
12722 };
12723 int typebits;
12724
12725 if (et.type == NT_invtype)
12726 return;
12727
12728 if (inst.operands[1].immisalign)
12729 switch (inst.operands[1].imm >> 8)
12730 {
12731 case 64: alignbits = 1; break;
12732 case 128:
12733 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
12734 goto bad_alignment;
12735 alignbits = 2;
12736 break;
12737 case 256:
12738 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
12739 goto bad_alignment;
12740 alignbits = 3;
12741 break;
12742 default:
12743 bad_alignment:
12744 first_error (_("bad alignment"));
12745 return;
12746 }
12747
12748 inst.instruction |= alignbits << 4;
12749 inst.instruction |= neon_logbits (et.size) << 6;
12750
12751 /* Bits [4:6] of the immediate in a list specifier encode register stride
12752 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
12753 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
12754 up the right value for "type" in a table based on this value and the given
12755 list style, then stick it back. */
12756 idx = ((inst.operands[0].imm >> 4) & 7)
12757 | (((inst.instruction >> 8) & 3) << 3);
12758
12759 typebits = typetable[idx];
12760
12761 constraint (typebits == -1, _("bad list type for instruction"));
12762
12763 inst.instruction &= ~0xf00;
12764 inst.instruction |= typebits << 8;
12765 }
12766
12767 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
12768 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
12769 otherwise. The variable arguments are a list of pairs of legal (size, align)
12770 values, terminated with -1. */
12771
12772 static int
12773 neon_alignment_bit (int size, int align, int *do_align, ...)
12774 {
12775 va_list ap;
12776 int result = FAIL, thissize, thisalign;
12777
12778 if (!inst.operands[1].immisalign)
12779 {
12780 *do_align = 0;
12781 return SUCCESS;
12782 }
12783
12784 va_start (ap, do_align);
12785
12786 do
12787 {
12788 thissize = va_arg (ap, int);
12789 if (thissize == -1)
12790 break;
12791 thisalign = va_arg (ap, int);
12792
12793 if (size == thissize && align == thisalign)
12794 result = SUCCESS;
12795 }
12796 while (result != SUCCESS);
12797
12798 va_end (ap);
12799
12800 if (result == SUCCESS)
12801 *do_align = 1;
12802 else
12803 first_error (_("unsupported alignment for instruction"));
12804
12805 return result;
12806 }
12807
12808 static void
12809 do_neon_ld_st_lane (void)
12810 {
12811 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
12812 int align_good, do_align = 0;
12813 int logsize = neon_logbits (et.size);
12814 int align = inst.operands[1].imm >> 8;
12815 int n = (inst.instruction >> 8) & 3;
12816 int max_el = 64 / et.size;
12817
12818 if (et.type == NT_invtype)
12819 return;
12820
12821 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
12822 _("bad list length"));
12823 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
12824 _("scalar index out of range"));
12825 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
12826 && et.size == 8,
12827 _("stride of 2 unavailable when element size is 8"));
12828
12829 switch (n)
12830 {
12831 case 0: /* VLD1 / VST1. */
12832 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
12833 32, 32, -1);
12834 if (align_good == FAIL)
12835 return;
12836 if (do_align)
12837 {
12838 unsigned alignbits = 0;
12839 switch (et.size)
12840 {
12841 case 16: alignbits = 0x1; break;
12842 case 32: alignbits = 0x3; break;
12843 default: ;
12844 }
12845 inst.instruction |= alignbits << 4;
12846 }
12847 break;
12848
12849 case 1: /* VLD2 / VST2. */
12850 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
12851 32, 64, -1);
12852 if (align_good == FAIL)
12853 return;
12854 if (do_align)
12855 inst.instruction |= 1 << 4;
12856 break;
12857
12858 case 2: /* VLD3 / VST3. */
12859 constraint (inst.operands[1].immisalign,
12860 _("can't use alignment with this instruction"));
12861 break;
12862
12863 case 3: /* VLD4 / VST4. */
12864 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
12865 16, 64, 32, 64, 32, 128, -1);
12866 if (align_good == FAIL)
12867 return;
12868 if (do_align)
12869 {
12870 unsigned alignbits = 0;
12871 switch (et.size)
12872 {
12873 case 8: alignbits = 0x1; break;
12874 case 16: alignbits = 0x1; break;
12875 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
12876 default: ;
12877 }
12878 inst.instruction |= alignbits << 4;
12879 }
12880 break;
12881
12882 default: ;
12883 }
12884
12885 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
12886 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12887 inst.instruction |= 1 << (4 + logsize);
12888
12889 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
12890 inst.instruction |= logsize << 10;
12891 }
12892
12893 /* Encode single n-element structure to all lanes VLD<n> instructions. */
12894
12895 static void
12896 do_neon_ld_dup (void)
12897 {
12898 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
12899 int align_good, do_align = 0;
12900
12901 if (et.type == NT_invtype)
12902 return;
12903
12904 switch ((inst.instruction >> 8) & 3)
12905 {
12906 case 0: /* VLD1. */
12907 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
12908 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
12909 &do_align, 16, 16, 32, 32, -1);
12910 if (align_good == FAIL)
12911 return;
12912 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
12913 {
12914 case 1: break;
12915 case 2: inst.instruction |= 1 << 5; break;
12916 default: first_error (_("bad list length")); return;
12917 }
12918 inst.instruction |= neon_logbits (et.size) << 6;
12919 break;
12920
12921 case 1: /* VLD2. */
12922 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
12923 &do_align, 8, 16, 16, 32, 32, 64, -1);
12924 if (align_good == FAIL)
12925 return;
12926 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
12927 _("bad list length"));
12928 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12929 inst.instruction |= 1 << 5;
12930 inst.instruction |= neon_logbits (et.size) << 6;
12931 break;
12932
12933 case 2: /* VLD3. */
12934 constraint (inst.operands[1].immisalign,
12935 _("can't use alignment with this instruction"));
12936 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
12937 _("bad list length"));
12938 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12939 inst.instruction |= 1 << 5;
12940 inst.instruction |= neon_logbits (et.size) << 6;
12941 break;
12942
12943 case 3: /* VLD4. */
12944 {
12945 int align = inst.operands[1].imm >> 8;
12946 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
12947 16, 64, 32, 64, 32, 128, -1);
12948 if (align_good == FAIL)
12949 return;
12950 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
12951 _("bad list length"));
12952 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12953 inst.instruction |= 1 << 5;
12954 if (et.size == 32 && align == 128)
12955 inst.instruction |= 0x3 << 6;
12956 else
12957 inst.instruction |= neon_logbits (et.size) << 6;
12958 }
12959 break;
12960
12961 default: ;
12962 }
12963
12964 inst.instruction |= do_align << 4;
12965 }
12966
12967 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12968 apart from bits [11:4]. */
12969
12970 static void
12971 do_neon_ldx_stx (void)
12972 {
12973 switch (NEON_LANE (inst.operands[0].imm))
12974 {
12975 case NEON_INTERLEAVE_LANES:
12976 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
12977 do_neon_ld_st_interleave ();
12978 break;
12979
12980 case NEON_ALL_LANES:
12981 inst.instruction = NEON_ENC_DUP (inst.instruction);
12982 do_neon_ld_dup ();
12983 break;
12984
12985 default:
12986 inst.instruction = NEON_ENC_LANE (inst.instruction);
12987 do_neon_ld_st_lane ();
12988 }
12989
12990 /* L bit comes from bit mask. */
12991 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12992 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12993 inst.instruction |= inst.operands[1].reg << 16;
12994
12995 if (inst.operands[1].postind)
12996 {
12997 int postreg = inst.operands[1].imm & 0xf;
12998 constraint (!inst.operands[1].immisreg,
12999 _("post-index must be a register"));
13000 constraint (postreg == 0xd || postreg == 0xf,
13001 _("bad register for post-index"));
13002 inst.instruction |= postreg;
13003 }
13004 else if (inst.operands[1].writeback)
13005 {
13006 inst.instruction |= 0xd;
13007 }
13008 else
13009 inst.instruction |= 0xf;
13010
13011 if (thumb_mode)
13012 inst.instruction |= 0xf9000000;
13013 else
13014 inst.instruction |= 0xf4000000;
13015 }
13016
13017 \f
13018 /* Overall per-instruction processing. */
13019
13020 /* We need to be able to fix up arbitrary expressions in some statements.
13021 This is so that we can handle symbols that are an arbitrary distance from
13022 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13023 which returns part of an address in a form which will be valid for
13024 a data instruction. We do this by pushing the expression into a symbol
13025 in the expr_section, and creating a fix for that. */
13026
13027 static void
13028 fix_new_arm (fragS * frag,
13029 int where,
13030 short int size,
13031 expressionS * exp,
13032 int pc_rel,
13033 int reloc)
13034 {
13035 fixS * new_fix;
13036
13037 switch (exp->X_op)
13038 {
13039 case O_constant:
13040 case O_symbol:
13041 case O_add:
13042 case O_subtract:
13043 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13044 break;
13045
13046 default:
13047 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13048 pc_rel, reloc);
13049 break;
13050 }
13051
13052 /* Mark whether the fix is to a THUMB instruction, or an ARM
13053 instruction. */
13054 new_fix->tc_fix_data = thumb_mode;
13055 }
13056
13057 /* Create a frg for an instruction requiring relaxation. */
13058 static void
13059 output_relax_insn (void)
13060 {
13061 char * to;
13062 symbolS *sym;
13063 int offset;
13064
13065 #ifdef OBJ_ELF
13066 /* The size of the instruction is unknown, so tie the debug info to the
13067 start of the instruction. */
13068 dwarf2_emit_insn (0);
13069 #endif
13070
13071 switch (inst.reloc.exp.X_op)
13072 {
13073 case O_symbol:
13074 sym = inst.reloc.exp.X_add_symbol;
13075 offset = inst.reloc.exp.X_add_number;
13076 break;
13077 case O_constant:
13078 sym = NULL;
13079 offset = inst.reloc.exp.X_add_number;
13080 break;
13081 default:
13082 sym = make_expr_symbol (&inst.reloc.exp);
13083 offset = 0;
13084 break;
13085 }
13086 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13087 inst.relax, sym, offset, NULL/*offset, opcode*/);
13088 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13089 }
13090
13091 /* Write a 32-bit thumb instruction to buf. */
13092 static void
13093 put_thumb32_insn (char * buf, unsigned long insn)
13094 {
13095 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13096 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13097 }
13098
13099 static void
13100 output_inst (const char * str)
13101 {
13102 char * to = NULL;
13103
13104 if (inst.error)
13105 {
13106 as_bad ("%s -- `%s'", inst.error, str);
13107 return;
13108 }
13109 if (inst.relax) {
13110 output_relax_insn();
13111 return;
13112 }
13113 if (inst.size == 0)
13114 return;
13115
13116 to = frag_more (inst.size);
13117
13118 if (thumb_mode && (inst.size > THUMB_SIZE))
13119 {
13120 assert (inst.size == (2 * THUMB_SIZE));
13121 put_thumb32_insn (to, inst.instruction);
13122 }
13123 else if (inst.size > INSN_SIZE)
13124 {
13125 assert (inst.size == (2 * INSN_SIZE));
13126 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13127 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13128 }
13129 else
13130 md_number_to_chars (to, inst.instruction, inst.size);
13131
13132 if (inst.reloc.type != BFD_RELOC_UNUSED)
13133 fix_new_arm (frag_now, to - frag_now->fr_literal,
13134 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13135 inst.reloc.type);
13136
13137 #ifdef OBJ_ELF
13138 dwarf2_emit_insn (inst.size);
13139 #endif
13140 }
13141
13142 /* Tag values used in struct asm_opcode's tag field. */
13143 enum opcode_tag
13144 {
13145 OT_unconditional, /* Instruction cannot be conditionalized.
13146 The ARM condition field is still 0xE. */
13147 OT_unconditionalF, /* Instruction cannot be conditionalized
13148 and carries 0xF in its ARM condition field. */
13149 OT_csuffix, /* Instruction takes a conditional suffix. */
13150 OT_csuffixF, /* Some forms of the instruction take a conditional
13151 suffix, others place 0xF where the condition field
13152 would be. */
13153 OT_cinfix3, /* Instruction takes a conditional infix,
13154 beginning at character index 3. (In
13155 unified mode, it becomes a suffix.) */
13156 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13157 tsts, cmps, cmns, and teqs. */
13158 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13159 character index 3, even in unified mode. Used for
13160 legacy instructions where suffix and infix forms
13161 may be ambiguous. */
13162 OT_csuf_or_in3, /* Instruction takes either a conditional
13163 suffix or an infix at character index 3. */
13164 OT_odd_infix_unc, /* This is the unconditional variant of an
13165 instruction that takes a conditional infix
13166 at an unusual position. In unified mode,
13167 this variant will accept a suffix. */
13168 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13169 are the conditional variants of instructions that
13170 take conditional infixes in unusual positions.
13171 The infix appears at character index
13172 (tag - OT_odd_infix_0). These are not accepted
13173 in unified mode. */
13174 };
13175
13176 /* Subroutine of md_assemble, responsible for looking up the primary
13177 opcode from the mnemonic the user wrote. STR points to the
13178 beginning of the mnemonic.
13179
13180 This is not simply a hash table lookup, because of conditional
13181 variants. Most instructions have conditional variants, which are
13182 expressed with a _conditional affix_ to the mnemonic. If we were
13183 to encode each conditional variant as a literal string in the opcode
13184 table, it would have approximately 20,000 entries.
13185
13186 Most mnemonics take this affix as a suffix, and in unified syntax,
13187 'most' is upgraded to 'all'. However, in the divided syntax, some
13188 instructions take the affix as an infix, notably the s-variants of
13189 the arithmetic instructions. Of those instructions, all but six
13190 have the infix appear after the third character of the mnemonic.
13191
13192 Accordingly, the algorithm for looking up primary opcodes given
13193 an identifier is:
13194
13195 1. Look up the identifier in the opcode table.
13196 If we find a match, go to step U.
13197
13198 2. Look up the last two characters of the identifier in the
13199 conditions table. If we find a match, look up the first N-2
13200 characters of the identifier in the opcode table. If we
13201 find a match, go to step CE.
13202
13203 3. Look up the fourth and fifth characters of the identifier in
13204 the conditions table. If we find a match, extract those
13205 characters from the identifier, and look up the remaining
13206 characters in the opcode table. If we find a match, go
13207 to step CM.
13208
13209 4. Fail.
13210
13211 U. Examine the tag field of the opcode structure, in case this is
13212 one of the six instructions with its conditional infix in an
13213 unusual place. If it is, the tag tells us where to find the
13214 infix; look it up in the conditions table and set inst.cond
13215 accordingly. Otherwise, this is an unconditional instruction.
13216 Again set inst.cond accordingly. Return the opcode structure.
13217
13218 CE. Examine the tag field to make sure this is an instruction that
13219 should receive a conditional suffix. If it is not, fail.
13220 Otherwise, set inst.cond from the suffix we already looked up,
13221 and return the opcode structure.
13222
13223 CM. Examine the tag field to make sure this is an instruction that
13224 should receive a conditional infix after the third character.
13225 If it is not, fail. Otherwise, undo the edits to the current
13226 line of input and proceed as for case CE. */
13227
13228 static const struct asm_opcode *
13229 opcode_lookup (char **str)
13230 {
13231 char *end, *base;
13232 char *affix;
13233 const struct asm_opcode *opcode;
13234 const struct asm_cond *cond;
13235 char save[2];
13236
13237 /* Scan up to the end of the mnemonic, which must end in white space,
13238 '.' (in unified mode only), or end of string. */
13239 for (base = end = *str; *end != '\0'; end++)
13240 if (*end == ' ' || (unified_syntax && *end == '.'))
13241 break;
13242
13243 if (end == base)
13244 return 0;
13245
13246 /* Handle a possible width suffix and/or Neon type suffix. */
13247 if (end[0] == '.')
13248 {
13249 int offset = 2;
13250
13251 if (end[1] == 'w')
13252 inst.size_req = 4;
13253 else if (end[1] == 'n')
13254 inst.size_req = 2;
13255 else
13256 offset = 0;
13257
13258 inst.vectype.elems = 0;
13259
13260 *str = end + offset;
13261
13262 if (end[offset] == '.')
13263 {
13264 /* See if we have a Neon type suffix. */
13265 if (parse_neon_type (&inst.vectype, str) == FAIL)
13266 return 0;
13267 }
13268 else if (end[offset] != '\0' && end[offset] != ' ')
13269 return 0;
13270 }
13271 else
13272 *str = end;
13273
13274 /* Look for unaffixed or special-case affixed mnemonic. */
13275 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13276 if (opcode)
13277 {
13278 /* step U */
13279 if (opcode->tag < OT_odd_infix_0)
13280 {
13281 inst.cond = COND_ALWAYS;
13282 return opcode;
13283 }
13284
13285 if (unified_syntax)
13286 as_warn (_("conditional infixes are deprecated in unified syntax"));
13287 affix = base + (opcode->tag - OT_odd_infix_0);
13288 cond = hash_find_n (arm_cond_hsh, affix, 2);
13289 assert (cond);
13290
13291 inst.cond = cond->value;
13292 return opcode;
13293 }
13294
13295 /* Cannot have a conditional suffix on a mnemonic of less than two
13296 characters. */
13297 if (end - base < 3)
13298 return 0;
13299
13300 /* Look for suffixed mnemonic. */
13301 affix = end - 2;
13302 cond = hash_find_n (arm_cond_hsh, affix, 2);
13303 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13304 if (opcode && cond)
13305 {
13306 /* step CE */
13307 switch (opcode->tag)
13308 {
13309 case OT_cinfix3_legacy:
13310 /* Ignore conditional suffixes matched on infix only mnemonics. */
13311 break;
13312
13313 case OT_cinfix3:
13314 case OT_cinfix3_deprecated:
13315 case OT_odd_infix_unc:
13316 if (!unified_syntax)
13317 return 0;
13318 /* else fall through */
13319
13320 case OT_csuffix:
13321 case OT_csuffixF:
13322 case OT_csuf_or_in3:
13323 inst.cond = cond->value;
13324 return opcode;
13325
13326 case OT_unconditional:
13327 case OT_unconditionalF:
13328 if (thumb_mode)
13329 {
13330 inst.cond = cond->value;
13331 }
13332 else
13333 {
13334 /* delayed diagnostic */
13335 inst.error = BAD_COND;
13336 inst.cond = COND_ALWAYS;
13337 }
13338 return opcode;
13339
13340 default:
13341 return 0;
13342 }
13343 }
13344
13345 /* Cannot have a usual-position infix on a mnemonic of less than
13346 six characters (five would be a suffix). */
13347 if (end - base < 6)
13348 return 0;
13349
13350 /* Look for infixed mnemonic in the usual position. */
13351 affix = base + 3;
13352 cond = hash_find_n (arm_cond_hsh, affix, 2);
13353 if (!cond)
13354 return 0;
13355
13356 memcpy (save, affix, 2);
13357 memmove (affix, affix + 2, (end - affix) - 2);
13358 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13359 memmove (affix + 2, affix, (end - affix) - 2);
13360 memcpy (affix, save, 2);
13361
13362 if (opcode
13363 && (opcode->tag == OT_cinfix3
13364 || opcode->tag == OT_cinfix3_deprecated
13365 || opcode->tag == OT_csuf_or_in3
13366 || opcode->tag == OT_cinfix3_legacy))
13367 {
13368 /* step CM */
13369 if (unified_syntax
13370 && (opcode->tag == OT_cinfix3
13371 || opcode->tag == OT_cinfix3_deprecated))
13372 as_warn (_("conditional infixes are deprecated in unified syntax"));
13373
13374 inst.cond = cond->value;
13375 return opcode;
13376 }
13377
13378 return 0;
13379 }
13380
13381 void
13382 md_assemble (char *str)
13383 {
13384 char *p = str;
13385 const struct asm_opcode * opcode;
13386
13387 /* Align the previous label if needed. */
13388 if (last_label_seen != NULL)
13389 {
13390 symbol_set_frag (last_label_seen, frag_now);
13391 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
13392 S_SET_SEGMENT (last_label_seen, now_seg);
13393 }
13394
13395 memset (&inst, '\0', sizeof (inst));
13396 inst.reloc.type = BFD_RELOC_UNUSED;
13397
13398 opcode = opcode_lookup (&p);
13399 if (!opcode)
13400 {
13401 /* It wasn't an instruction, but it might be a register alias of
13402 the form alias .req reg, or a Neon .dn/.qn directive. */
13403 if (!create_register_alias (str, p)
13404 && !create_neon_reg_alias (str, p))
13405 as_bad (_("bad instruction `%s'"), str);
13406
13407 return;
13408 }
13409
13410 if (opcode->tag == OT_cinfix3_deprecated)
13411 as_warn (_("s suffix on comparison instruction is deprecated"));
13412
13413 /* The value which unconditional instructions should have in place of the
13414 condition field. */
13415 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
13416
13417 if (thumb_mode)
13418 {
13419 arm_feature_set variant;
13420
13421 variant = cpu_variant;
13422 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13423 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
13424 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
13425 /* Check that this instruction is supported for this CPU. */
13426 if (!opcode->tvariant
13427 || (thumb_mode == 1
13428 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
13429 {
13430 as_bad (_("selected processor does not support `%s'"), str);
13431 return;
13432 }
13433 if (inst.cond != COND_ALWAYS && !unified_syntax
13434 && opcode->tencode != do_t_branch)
13435 {
13436 as_bad (_("Thumb does not support conditional execution"));
13437 return;
13438 }
13439
13440 /* Check conditional suffixes. */
13441 if (current_it_mask)
13442 {
13443 int cond;
13444 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
13445 current_it_mask <<= 1;
13446 current_it_mask &= 0x1f;
13447 /* The BKPT instruction is unconditional even in an IT block. */
13448 if (!inst.error
13449 && cond != inst.cond && opcode->tencode != do_t_bkpt)
13450 {
13451 as_bad (_("incorrect condition in IT block"));
13452 return;
13453 }
13454 }
13455 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
13456 {
13457 as_bad (_("thumb conditional instrunction not in IT block"));
13458 return;
13459 }
13460
13461 mapping_state (MAP_THUMB);
13462 inst.instruction = opcode->tvalue;
13463
13464 if (!parse_operands (p, opcode->operands))
13465 opcode->tencode ();
13466
13467 /* Clear current_it_mask at the end of an IT block. */
13468 if (current_it_mask == 0x10)
13469 current_it_mask = 0;
13470
13471 if (!(inst.error || inst.relax))
13472 {
13473 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
13474 inst.size = (inst.instruction > 0xffff ? 4 : 2);
13475 if (inst.size_req && inst.size_req != inst.size)
13476 {
13477 as_bad (_("cannot honor width suffix -- `%s'"), str);
13478 return;
13479 }
13480 }
13481 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13482 *opcode->tvariant);
13483 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13484 set those bits when Thumb-2 32-bit instructions are seen. ie.
13485 anything other than bl/blx.
13486 This is overly pessimistic for relaxable instructions. */
13487 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
13488 || inst.relax)
13489 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13490 arm_ext_v6t2);
13491 }
13492 else
13493 {
13494 /* Check that this instruction is supported for this CPU. */
13495 if (!opcode->avariant ||
13496 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
13497 {
13498 as_bad (_("selected processor does not support `%s'"), str);
13499 return;
13500 }
13501 if (inst.size_req)
13502 {
13503 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
13504 return;
13505 }
13506
13507 mapping_state (MAP_ARM);
13508 inst.instruction = opcode->avalue;
13509 if (opcode->tag == OT_unconditionalF)
13510 inst.instruction |= 0xF << 28;
13511 else
13512 inst.instruction |= inst.cond << 28;
13513 inst.size = INSN_SIZE;
13514 if (!parse_operands (p, opcode->operands))
13515 opcode->aencode ();
13516 /* Arm mode bx is marked as both v4T and v5 because it's still required
13517 on a hypothetical non-thumb v5 core. */
13518 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
13519 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
13520 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
13521 else
13522 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
13523 *opcode->avariant);
13524 }
13525 output_inst (str);
13526 }
13527
13528 /* Various frobbings of labels and their addresses. */
13529
13530 void
13531 arm_start_line_hook (void)
13532 {
13533 last_label_seen = NULL;
13534 }
13535
13536 void
13537 arm_frob_label (symbolS * sym)
13538 {
13539 last_label_seen = sym;
13540
13541 ARM_SET_THUMB (sym, thumb_mode);
13542
13543 #if defined OBJ_COFF || defined OBJ_ELF
13544 ARM_SET_INTERWORK (sym, support_interwork);
13545 #endif
13546
13547 /* Note - do not allow local symbols (.Lxxx) to be labeled
13548 as Thumb functions. This is because these labels, whilst
13549 they exist inside Thumb code, are not the entry points for
13550 possible ARM->Thumb calls. Also, these labels can be used
13551 as part of a computed goto or switch statement. eg gcc
13552 can generate code that looks like this:
13553
13554 ldr r2, [pc, .Laaa]
13555 lsl r3, r3, #2
13556 ldr r2, [r3, r2]
13557 mov pc, r2
13558
13559 .Lbbb: .word .Lxxx
13560 .Lccc: .word .Lyyy
13561 ..etc...
13562 .Laaa: .word Lbbb
13563
13564 The first instruction loads the address of the jump table.
13565 The second instruction converts a table index into a byte offset.
13566 The third instruction gets the jump address out of the table.
13567 The fourth instruction performs the jump.
13568
13569 If the address stored at .Laaa is that of a symbol which has the
13570 Thumb_Func bit set, then the linker will arrange for this address
13571 to have the bottom bit set, which in turn would mean that the
13572 address computation performed by the third instruction would end
13573 up with the bottom bit set. Since the ARM is capable of unaligned
13574 word loads, the instruction would then load the incorrect address
13575 out of the jump table, and chaos would ensue. */
13576 if (label_is_thumb_function_name
13577 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
13578 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
13579 {
13580 /* When the address of a Thumb function is taken the bottom
13581 bit of that address should be set. This will allow
13582 interworking between Arm and Thumb functions to work
13583 correctly. */
13584
13585 THUMB_SET_FUNC (sym, 1);
13586
13587 label_is_thumb_function_name = FALSE;
13588 }
13589
13590 #ifdef OBJ_ELF
13591 dwarf2_emit_label (sym);
13592 #endif
13593 }
13594
13595 int
13596 arm_data_in_code (void)
13597 {
13598 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
13599 {
13600 *input_line_pointer = '/';
13601 input_line_pointer += 5;
13602 *input_line_pointer = 0;
13603 return 1;
13604 }
13605
13606 return 0;
13607 }
13608
13609 char *
13610 arm_canonicalize_symbol_name (char * name)
13611 {
13612 int len;
13613
13614 if (thumb_mode && (len = strlen (name)) > 5
13615 && streq (name + len - 5, "/data"))
13616 *(name + len - 5) = 0;
13617
13618 return name;
13619 }
13620 \f
13621 /* Table of all register names defined by default. The user can
13622 define additional names with .req. Note that all register names
13623 should appear in both upper and lowercase variants. Some registers
13624 also have mixed-case names. */
13625
13626 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
13627 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
13628 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
13629 #define REGSET(p,t) \
13630 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
13631 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
13632 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
13633 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
13634 #define REGSETH(p,t) \
13635 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
13636 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
13637 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
13638 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
13639 #define REGSET2(p,t) \
13640 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
13641 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
13642 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
13643 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
13644
13645 static const struct reg_entry reg_names[] =
13646 {
13647 /* ARM integer registers. */
13648 REGSET(r, RN), REGSET(R, RN),
13649
13650 /* ATPCS synonyms. */
13651 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
13652 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
13653 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
13654
13655 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
13656 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
13657 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
13658
13659 /* Well-known aliases. */
13660 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
13661 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
13662
13663 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
13664 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
13665
13666 /* Coprocessor numbers. */
13667 REGSET(p, CP), REGSET(P, CP),
13668
13669 /* Coprocessor register numbers. The "cr" variants are for backward
13670 compatibility. */
13671 REGSET(c, CN), REGSET(C, CN),
13672 REGSET(cr, CN), REGSET(CR, CN),
13673
13674 /* FPA registers. */
13675 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
13676 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
13677
13678 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
13679 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
13680
13681 /* VFP SP registers. */
13682 REGSET(s,VFS), REGSET(S,VFS),
13683 REGSETH(s,VFS), REGSETH(S,VFS),
13684
13685 /* VFP DP Registers. */
13686 REGSET(d,VFD), REGSET(D,VFD),
13687 /* Extra Neon DP registers. */
13688 REGSETH(d,VFD), REGSETH(D,VFD),
13689
13690 /* Neon QP registers. */
13691 REGSET2(q,NQ), REGSET2(Q,NQ),
13692
13693 /* VFP control registers. */
13694 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
13695 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
13696
13697 /* Maverick DSP coprocessor registers. */
13698 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
13699 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
13700
13701 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
13702 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
13703 REGDEF(dspsc,0,DSPSC),
13704
13705 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
13706 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
13707 REGDEF(DSPSC,0,DSPSC),
13708
13709 /* iWMMXt data registers - p0, c0-15. */
13710 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
13711
13712 /* iWMMXt control registers - p1, c0-3. */
13713 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
13714 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
13715 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
13716 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
13717
13718 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
13719 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
13720 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
13721 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
13722 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
13723
13724 /* XScale accumulator registers. */
13725 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
13726 };
13727 #undef REGDEF
13728 #undef REGNUM
13729 #undef REGSET
13730
13731 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
13732 within psr_required_here. */
13733 static const struct asm_psr psrs[] =
13734 {
13735 /* Backward compatibility notation. Note that "all" is no longer
13736 truly all possible PSR bits. */
13737 {"all", PSR_c | PSR_f},
13738 {"flg", PSR_f},
13739 {"ctl", PSR_c},
13740
13741 /* Individual flags. */
13742 {"f", PSR_f},
13743 {"c", PSR_c},
13744 {"x", PSR_x},
13745 {"s", PSR_s},
13746 /* Combinations of flags. */
13747 {"fs", PSR_f | PSR_s},
13748 {"fx", PSR_f | PSR_x},
13749 {"fc", PSR_f | PSR_c},
13750 {"sf", PSR_s | PSR_f},
13751 {"sx", PSR_s | PSR_x},
13752 {"sc", PSR_s | PSR_c},
13753 {"xf", PSR_x | PSR_f},
13754 {"xs", PSR_x | PSR_s},
13755 {"xc", PSR_x | PSR_c},
13756 {"cf", PSR_c | PSR_f},
13757 {"cs", PSR_c | PSR_s},
13758 {"cx", PSR_c | PSR_x},
13759 {"fsx", PSR_f | PSR_s | PSR_x},
13760 {"fsc", PSR_f | PSR_s | PSR_c},
13761 {"fxs", PSR_f | PSR_x | PSR_s},
13762 {"fxc", PSR_f | PSR_x | PSR_c},
13763 {"fcs", PSR_f | PSR_c | PSR_s},
13764 {"fcx", PSR_f | PSR_c | PSR_x},
13765 {"sfx", PSR_s | PSR_f | PSR_x},
13766 {"sfc", PSR_s | PSR_f | PSR_c},
13767 {"sxf", PSR_s | PSR_x | PSR_f},
13768 {"sxc", PSR_s | PSR_x | PSR_c},
13769 {"scf", PSR_s | PSR_c | PSR_f},
13770 {"scx", PSR_s | PSR_c | PSR_x},
13771 {"xfs", PSR_x | PSR_f | PSR_s},
13772 {"xfc", PSR_x | PSR_f | PSR_c},
13773 {"xsf", PSR_x | PSR_s | PSR_f},
13774 {"xsc", PSR_x | PSR_s | PSR_c},
13775 {"xcf", PSR_x | PSR_c | PSR_f},
13776 {"xcs", PSR_x | PSR_c | PSR_s},
13777 {"cfs", PSR_c | PSR_f | PSR_s},
13778 {"cfx", PSR_c | PSR_f | PSR_x},
13779 {"csf", PSR_c | PSR_s | PSR_f},
13780 {"csx", PSR_c | PSR_s | PSR_x},
13781 {"cxf", PSR_c | PSR_x | PSR_f},
13782 {"cxs", PSR_c | PSR_x | PSR_s},
13783 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
13784 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
13785 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
13786 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
13787 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
13788 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
13789 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
13790 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
13791 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
13792 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
13793 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
13794 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
13795 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
13796 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
13797 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
13798 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
13799 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
13800 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
13801 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
13802 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
13803 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
13804 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
13805 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
13806 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
13807 };
13808
13809 /* Table of V7M psr names. */
13810 static const struct asm_psr v7m_psrs[] =
13811 {
13812 {"apsr", 0 },
13813 {"iapsr", 1 },
13814 {"eapsr", 2 },
13815 {"psr", 3 },
13816 {"ipsr", 5 },
13817 {"epsr", 6 },
13818 {"iepsr", 7 },
13819 {"msp", 8 },
13820 {"psp", 9 },
13821 {"primask", 16},
13822 {"basepri", 17},
13823 {"basepri_max", 18},
13824 {"faultmask", 19},
13825 {"control", 20}
13826 };
13827
13828 /* Table of all shift-in-operand names. */
13829 static const struct asm_shift_name shift_names [] =
13830 {
13831 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
13832 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
13833 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
13834 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
13835 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
13836 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
13837 };
13838
13839 /* Table of all explicit relocation names. */
13840 #ifdef OBJ_ELF
13841 static struct reloc_entry reloc_names[] =
13842 {
13843 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
13844 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
13845 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
13846 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
13847 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
13848 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
13849 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
13850 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
13851 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
13852 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
13853 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
13854 };
13855 #endif
13856
13857 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
13858 static const struct asm_cond conds[] =
13859 {
13860 {"eq", 0x0},
13861 {"ne", 0x1},
13862 {"cs", 0x2}, {"hs", 0x2},
13863 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
13864 {"mi", 0x4},
13865 {"pl", 0x5},
13866 {"vs", 0x6},
13867 {"vc", 0x7},
13868 {"hi", 0x8},
13869 {"ls", 0x9},
13870 {"ge", 0xa},
13871 {"lt", 0xb},
13872 {"gt", 0xc},
13873 {"le", 0xd},
13874 {"al", 0xe}
13875 };
13876
13877 static struct asm_barrier_opt barrier_opt_names[] =
13878 {
13879 { "sy", 0xf },
13880 { "un", 0x7 },
13881 { "st", 0xe },
13882 { "unst", 0x6 }
13883 };
13884
13885 /* Table of ARM-format instructions. */
13886
13887 /* Macros for gluing together operand strings. N.B. In all cases
13888 other than OPS0, the trailing OP_stop comes from default
13889 zero-initialization of the unspecified elements of the array. */
13890 #define OPS0() { OP_stop, }
13891 #define OPS1(a) { OP_##a, }
13892 #define OPS2(a,b) { OP_##a,OP_##b, }
13893 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
13894 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
13895 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
13896 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
13897
13898 /* These macros abstract out the exact format of the mnemonic table and
13899 save some repeated characters. */
13900
13901 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
13902 #define TxCE(mnem, op, top, nops, ops, ae, te) \
13903 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
13904 THUMB_VARIANT, do_##ae, do_##te }
13905
13906 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
13907 a T_MNEM_xyz enumerator. */
13908 #define TCE(mnem, aop, top, nops, ops, ae, te) \
13909 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
13910 #define tCE(mnem, aop, top, nops, ops, ae, te) \
13911 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13912
13913 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
13914 infix after the third character. */
13915 #define TxC3(mnem, op, top, nops, ops, ae, te) \
13916 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
13917 THUMB_VARIANT, do_##ae, do_##te }
13918 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
13919 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
13920 THUMB_VARIANT, do_##ae, do_##te }
13921 #define TC3(mnem, aop, top, nops, ops, ae, te) \
13922 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
13923 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
13924 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
13925 #define tC3(mnem, aop, top, nops, ops, ae, te) \
13926 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13927 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
13928 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
13929
13930 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
13931 appear in the condition table. */
13932 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
13933 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13934 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
13935
13936 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
13937 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
13938 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
13939 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
13940 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
13941 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
13942 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
13943 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
13944 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
13945 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
13946 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
13947 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13948 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13949 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13950 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13951 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13952 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13953 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13954 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13955 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13956
13957 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13958 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13959 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13960 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13961
13962 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13963 field is still 0xE. Many of the Thumb variants can be executed
13964 conditionally, so this is checked separately. */
13965 #define TUE(mnem, op, top, nops, ops, ae, te) \
13966 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13967 THUMB_VARIANT, do_##ae, do_##te }
13968
13969 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13970 condition code field. */
13971 #define TUF(mnem, op, top, nops, ops, ae, te) \
13972 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13973 THUMB_VARIANT, do_##ae, do_##te }
13974
13975 /* ARM-only variants of all the above. */
13976 #define CE(mnem, op, nops, ops, ae) \
13977 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13978
13979 #define C3(mnem, op, nops, ops, ae) \
13980 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13981
13982 /* Legacy mnemonics that always have conditional infix after the third
13983 character. */
13984 #define CL(mnem, op, nops, ops, ae) \
13985 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13986 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13987
13988 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13989 #define cCE(mnem, op, nops, ops, ae) \
13990 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13991
13992 /* Legacy coprocessor instructions where conditional infix and conditional
13993 suffix are ambiguous. For consistency this includes all FPA instructions,
13994 not just the potentially ambiguous ones. */
13995 #define cCL(mnem, op, nops, ops, ae) \
13996 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13997 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13998
13999 /* Coprocessor, takes either a suffix or a position-3 infix
14000 (for an FPA corner case). */
14001 #define C3E(mnem, op, nops, ops, ae) \
14002 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14003 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14004
14005 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14006 { #m1 #m2 #m3, OPS##nops ops, \
14007 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14008 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14009
14010 #define CM(m1, m2, op, nops, ops, ae) \
14011 xCM_(m1, , m2, op, nops, ops, ae), \
14012 xCM_(m1, eq, m2, op, nops, ops, ae), \
14013 xCM_(m1, ne, m2, op, nops, ops, ae), \
14014 xCM_(m1, cs, m2, op, nops, ops, ae), \
14015 xCM_(m1, hs, m2, op, nops, ops, ae), \
14016 xCM_(m1, cc, m2, op, nops, ops, ae), \
14017 xCM_(m1, ul, m2, op, nops, ops, ae), \
14018 xCM_(m1, lo, m2, op, nops, ops, ae), \
14019 xCM_(m1, mi, m2, op, nops, ops, ae), \
14020 xCM_(m1, pl, m2, op, nops, ops, ae), \
14021 xCM_(m1, vs, m2, op, nops, ops, ae), \
14022 xCM_(m1, vc, m2, op, nops, ops, ae), \
14023 xCM_(m1, hi, m2, op, nops, ops, ae), \
14024 xCM_(m1, ls, m2, op, nops, ops, ae), \
14025 xCM_(m1, ge, m2, op, nops, ops, ae), \
14026 xCM_(m1, lt, m2, op, nops, ops, ae), \
14027 xCM_(m1, gt, m2, op, nops, ops, ae), \
14028 xCM_(m1, le, m2, op, nops, ops, ae), \
14029 xCM_(m1, al, m2, op, nops, ops, ae)
14030
14031 #define UE(mnem, op, nops, ops, ae) \
14032 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14033
14034 #define UF(mnem, op, nops, ops, ae) \
14035 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14036
14037 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14038 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14039 use the same encoding function for each. */
14040 #define NUF(mnem, op, nops, ops, enc) \
14041 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14042 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14043
14044 /* Neon data processing, version which indirects through neon_enc_tab for
14045 the various overloaded versions of opcodes. */
14046 #define nUF(mnem, op, nops, ops, enc) \
14047 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14048 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14049
14050 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14051 version. */
14052 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14053 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14054 THUMB_VARIANT, do_##enc, do_##enc }
14055
14056 #define NCE(mnem, op, nops, ops, enc) \
14057 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14058
14059 #define NCEF(mnem, op, nops, ops, enc) \
14060 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14061
14062 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14063 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14064 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14065 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14066
14067 #define nCE(mnem, op, nops, ops, enc) \
14068 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14069
14070 #define nCEF(mnem, op, nops, ops, enc) \
14071 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14072
14073 #define do_0 0
14074
14075 /* Thumb-only, unconditional. */
14076 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14077
14078 static const struct asm_opcode insns[] =
14079 {
14080 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14081 #define THUMB_VARIANT &arm_ext_v4t
14082 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14083 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14084 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14085 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14086 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14087 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14088 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
14089 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
14090 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14091 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14092 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14093 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14094 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14095 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14096 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14097 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14098
14099 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14100 for setting PSR flag bits. They are obsolete in V6 and do not
14101 have Thumb equivalents. */
14102 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14103 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14104 CL(tstp, 110f000, 2, (RR, SH), cmp),
14105 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14106 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14107 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14108 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14109 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14110 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14111
14112 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14113 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14114 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14115 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14116
14117 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
14118 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
14119 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
14120 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
14121
14122 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14123 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14124 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14125 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14126 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14127 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14128
14129 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14130 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14131 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14132 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14133
14134 /* Pseudo ops. */
14135 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14136 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14137 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14138
14139 /* Thumb-compatibility pseudo ops. */
14140 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14141 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14142 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14143 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14144 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14145 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14146 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14147 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14148 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14149 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14150 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14151 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14152
14153 #undef THUMB_VARIANT
14154 #define THUMB_VARIANT &arm_ext_v6
14155 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14156
14157 /* V1 instructions with no Thumb analogue prior to V6T2. */
14158 #undef THUMB_VARIANT
14159 #define THUMB_VARIANT &arm_ext_v6t2
14160 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14161 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14162 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14163 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14164 CL(teqp, 130f000, 2, (RR, SH), cmp),
14165
14166 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14167 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14168 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14169 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14170
14171 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14172 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14173
14174 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14175 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14176
14177 /* V1 instructions with no Thumb analogue at all. */
14178 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14179 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14180
14181 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14182 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14183 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14184 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14185 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14186 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14187 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14188 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14189
14190 #undef ARM_VARIANT
14191 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14192 #undef THUMB_VARIANT
14193 #define THUMB_VARIANT &arm_ext_v4t
14194 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14195 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14196
14197 #undef THUMB_VARIANT
14198 #define THUMB_VARIANT &arm_ext_v6t2
14199 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14200 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14201
14202 /* Generic coprocessor instructions. */
14203 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14204 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
14205 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
14206 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
14207 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
14208 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14209 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14210
14211 #undef ARM_VARIANT
14212 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14213 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14214 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14215
14216 #undef ARM_VARIANT
14217 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14218 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14219 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14220
14221 #undef ARM_VARIANT
14222 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14223 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14224 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14225 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14226 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14227 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14228 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14229 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14230 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14231
14232 #undef ARM_VARIANT
14233 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14234 #undef THUMB_VARIANT
14235 #define THUMB_VARIANT &arm_ext_v4t
14236 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
14237 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
14238 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
14239 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
14240 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
14241 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
14242
14243 #undef ARM_VARIANT
14244 #define ARM_VARIANT &arm_ext_v4t_5
14245 /* ARM Architecture 4T. */
14246 /* Note: bx (and blx) are required on V5, even if the processor does
14247 not support Thumb. */
14248 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14249
14250 #undef ARM_VARIANT
14251 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14252 #undef THUMB_VARIANT
14253 #define THUMB_VARIANT &arm_ext_v5t
14254 /* Note: blx has 2 variants; the .value coded here is for
14255 BLX(2). Only this variant has conditional execution. */
14256 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14257 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14258
14259 #undef THUMB_VARIANT
14260 #define THUMB_VARIANT &arm_ext_v6t2
14261 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14262 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
14263 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
14264 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
14265 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
14266 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14267 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14268 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14269
14270 #undef ARM_VARIANT
14271 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14272 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14273 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14274 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14275 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14276
14277 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14278 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14279
14280 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14281 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14282 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14283 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14284
14285 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14286 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14287 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14288 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14289
14290 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14291 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14292
14293 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14294 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14295 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14296 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14297
14298 #undef ARM_VARIANT
14299 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14300 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14301 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
14302 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
14303
14304 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14305 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14306
14307 #undef ARM_VARIANT
14308 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14309 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14310
14311 #undef ARM_VARIANT
14312 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14313 #undef THUMB_VARIANT
14314 #define THUMB_VARIANT &arm_ext_v6
14315 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14316 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14317 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14318 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14319 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14320 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14321 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14322 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14323 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14324 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14325
14326 #undef THUMB_VARIANT
14327 #define THUMB_VARIANT &arm_ext_v6t2
14328 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14329 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14330 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14331
14332 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14333 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14334
14335 /* ARM V6 not included in V7M (eg. integer SIMD). */
14336 #undef THUMB_VARIANT
14337 #define THUMB_VARIANT &arm_ext_v6_notm
14338 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14339 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14340 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14341 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14342 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14343 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14344 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14345 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14346 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14347 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14348 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14349 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14350 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14351 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14352 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14353 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14354 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14355 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14356 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14357 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14358 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14359 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14360 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14361 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14362 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14363 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14364 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14365 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14366 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14367 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14368 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14369 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14370 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14371 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14372 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14373 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14374 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14375 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14376 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14377 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14378 UF(rfeib, 9900a00, 1, (RRw), rfe),
14379 UF(rfeda, 8100a00, 1, (RRw), rfe),
14380 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14381 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14382 UF(rfefa, 9900a00, 1, (RRw), rfe),
14383 UF(rfeea, 8100a00, 1, (RRw), rfe),
14384 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14385 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14386 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14387 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14388 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14389 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14390 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14391 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14392 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14393 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14394 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14395 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14396 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14397 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14398 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14399 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14400 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14401 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14402 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14403 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14404 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14405 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14406 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14407 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14408 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14409 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14410 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14411 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14412 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
14413 UF(srsib, 9cd0500, 1, (I31w), srs),
14414 UF(srsda, 84d0500, 1, (I31w), srs),
14415 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
14416 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
14417 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
14418 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
14419 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14420 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14421 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
14422
14423 #undef ARM_VARIANT
14424 #define ARM_VARIANT &arm_ext_v6k
14425 #undef THUMB_VARIANT
14426 #define THUMB_VARIANT &arm_ext_v6k
14427 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
14428 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
14429 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
14430 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
14431
14432 #undef THUMB_VARIANT
14433 #define THUMB_VARIANT &arm_ext_v6_notm
14434 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
14435 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
14436
14437 #undef THUMB_VARIANT
14438 #define THUMB_VARIANT &arm_ext_v6t2
14439 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14440 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14441 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14442 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14443 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
14444
14445 #undef ARM_VARIANT
14446 #define ARM_VARIANT &arm_ext_v6z
14447 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
14448
14449 #undef ARM_VARIANT
14450 #define ARM_VARIANT &arm_ext_v6t2
14451 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
14452 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
14453 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14454 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14455
14456 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14457 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
14458 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
14459 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
14460
14461 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14462 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14463 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14464 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14465
14466 UT(cbnz, b900, 2, (RR, EXP), t_czb),
14467 UT(cbz, b100, 2, (RR, EXP), t_czb),
14468 /* ARM does not really have an IT instruction. */
14469 TUE(it, 0, bf08, 1, (COND), it, t_it),
14470 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
14471 TUE(ite, 0, bf04, 1, (COND), it, t_it),
14472 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
14473 TUE(itet, 0, bf06, 1, (COND), it, t_it),
14474 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
14475 TUE(itee, 0, bf02, 1, (COND), it, t_it),
14476 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
14477 TUE(itett, 0, bf07, 1, (COND), it, t_it),
14478 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
14479 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
14480 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
14481 TUE(itete, 0, bf05, 1, (COND), it, t_it),
14482 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
14483 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
14484
14485 /* Thumb2 only instructions. */
14486 #undef ARM_VARIANT
14487 #define ARM_VARIANT NULL
14488
14489 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14490 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14491 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
14492 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
14493
14494 /* Thumb-2 hardware division instructions (R and M profiles only). */
14495 #undef THUMB_VARIANT
14496 #define THUMB_VARIANT &arm_ext_div
14497 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
14498 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
14499
14500 /* ARM V7 instructions. */
14501 #undef ARM_VARIANT
14502 #define ARM_VARIANT &arm_ext_v7
14503 #undef THUMB_VARIANT
14504 #define THUMB_VARIANT &arm_ext_v7
14505 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
14506 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
14507 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
14508 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
14509 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
14510
14511 #undef ARM_VARIANT
14512 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
14513 cCE(wfs, e200110, 1, (RR), rd),
14514 cCE(rfs, e300110, 1, (RR), rd),
14515 cCE(wfc, e400110, 1, (RR), rd),
14516 cCE(rfc, e500110, 1, (RR), rd),
14517
14518 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
14519 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
14520 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
14521 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
14522
14523 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
14524 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
14525 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
14526 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
14527
14528 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
14529 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
14530 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
14531 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
14532 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
14533 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
14534 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
14535 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
14536 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
14537 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
14538 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
14539 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
14540
14541 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
14542 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
14543 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
14544 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
14545 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
14546 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
14547 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
14548 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
14549 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
14550 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
14551 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
14552 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
14553
14554 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
14555 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
14556 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
14557 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
14558 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
14559 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
14560 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
14561 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
14562 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
14563 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
14564 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
14565 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
14566
14567 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
14568 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
14569 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
14570 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
14571 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
14572 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
14573 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
14574 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
14575 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
14576 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
14577 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
14578 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
14579
14580 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
14581 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
14582 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
14583 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
14584 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
14585 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
14586 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
14587 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
14588 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
14589 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
14590 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
14591 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
14592
14593 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
14594 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
14595 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
14596 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
14597 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
14598 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
14599 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
14600 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
14601 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
14602 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
14603 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
14604 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
14605
14606 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
14607 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
14608 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
14609 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
14610 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
14611 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
14612 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
14613 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
14614 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
14615 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
14616 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
14617 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
14618
14619 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
14620 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
14621 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
14622 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
14623 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
14624 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
14625 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
14626 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
14627 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
14628 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
14629 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
14630 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
14631
14632 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
14633 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
14634 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
14635 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
14636 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
14637 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
14638 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
14639 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
14640 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
14641 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
14642 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
14643 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
14644
14645 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
14646 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
14647 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
14648 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
14649 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
14650 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
14651 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
14652 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
14653 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
14654 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
14655 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
14656 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
14657
14658 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
14659 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
14660 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
14661 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
14662 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
14663 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
14664 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
14665 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
14666 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
14667 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
14668 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
14669 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
14670
14671 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
14672 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
14673 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
14674 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
14675 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
14676 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
14677 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
14678 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
14679 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
14680 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
14681 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
14682 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
14683
14684 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
14685 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
14686 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
14687 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
14688 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
14689 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
14690 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
14691 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
14692 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
14693 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
14694 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
14695 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
14696
14697 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
14698 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
14699 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
14700 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
14701 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
14702 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
14703 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
14704 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
14705 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
14706 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
14707 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
14708 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
14709
14710 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
14711 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
14712 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
14713 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
14714 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
14715 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
14716 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
14717 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
14718 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
14719 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
14720 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
14721 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
14722
14723 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
14724 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
14725 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
14726 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
14727 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
14728 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
14729 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
14730 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
14731 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
14732 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
14733 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
14734 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
14735
14736 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
14737 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
14738 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
14739 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
14740 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
14741 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14742 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14743 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14744 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
14745 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
14746 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
14747 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
14748
14749 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
14750 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
14751 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
14752 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
14753 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
14754 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14755 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14756 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14757 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
14758 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
14759 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
14760 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
14761
14762 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
14763 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
14764 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
14765 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
14766 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
14767 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14768 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14769 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14770 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
14771 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
14772 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
14773 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
14774
14775 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
14776 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
14777 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
14778 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
14779 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
14780 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14781 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14782 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14783 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
14784 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
14785 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
14786 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
14787
14788 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
14789 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
14790 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
14791 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
14792 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
14793 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14794 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14795 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14796 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
14797 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
14798 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
14799 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
14800
14801 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
14802 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
14803 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
14804 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
14805 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
14806 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14807 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14808 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14809 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
14810 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
14811 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
14812 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
14813
14814 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
14815 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
14816 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
14817 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
14818 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
14819 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14820 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14821 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14822 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
14823 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
14824 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
14825 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
14826
14827 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
14828 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
14829 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
14830 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
14831 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
14832 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14833 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14834 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14835 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
14836 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
14837 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
14838 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
14839
14840 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
14841 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
14842 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
14843 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
14844 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
14845 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14846 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14847 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14848 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
14849 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
14850 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
14851 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
14852
14853 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
14854 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
14855 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
14856 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
14857 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
14858 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14859 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14860 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14861 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
14862 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
14863 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
14864 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
14865
14866 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
14867 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
14868 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
14869 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
14870 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
14871 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14872 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14873 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14874 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
14875 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
14876 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
14877 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
14878
14879 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
14880 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
14881 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
14882 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
14883 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
14884 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14885 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14886 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14887 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
14888 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
14889 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
14890 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
14891
14892 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
14893 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
14894 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
14895 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
14896 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
14897 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
14898 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
14899 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
14900 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
14901 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
14902 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
14903 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
14904
14905 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
14906 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
14907 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
14908 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
14909
14910 cCL(flts, e000110, 2, (RF, RR), rn_rd),
14911 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
14912 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
14913 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
14914 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
14915 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
14916 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
14917 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
14918 cCL(flte, e080110, 2, (RF, RR), rn_rd),
14919 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
14920 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
14921 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
14922
14923 /* The implementation of the FIX instruction is broken on some
14924 assemblers, in that it accepts a precision specifier as well as a
14925 rounding specifier, despite the fact that this is meaningless.
14926 To be more compatible, we accept it as well, though of course it
14927 does not set any bits. */
14928 cCE(fix, e100110, 2, (RR, RF), rd_rm),
14929 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
14930 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
14931 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
14932 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
14933 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
14934 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
14935 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
14936 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
14937 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
14938 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
14939 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
14940 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
14941
14942 /* Instructions that were new with the real FPA, call them V2. */
14943 #undef ARM_VARIANT
14944 #define ARM_VARIANT &fpu_fpa_ext_v2
14945 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14946 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14947 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14948 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14949 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14950 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
14951
14952 #undef ARM_VARIANT
14953 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
14954 /* Moves and type conversions. */
14955 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
14956 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
14957 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
14958 cCE(fmstat, ef1fa10, 0, (), noargs),
14959 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
14960 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
14961 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
14962 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14963 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
14964 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14965 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
14966 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
14967
14968 /* Memory operations. */
14969 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
14970 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
14971 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14972 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14973 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14974 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14975 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14976 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14977 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14978 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14979 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14980 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14981 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14982 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14983 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14984 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14985 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14986 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14987
14988 /* Monadic operations. */
14989 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
14990 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
14991 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
14992
14993 /* Dyadic operations. */
14994 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14995 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14996 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14997 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14998 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14999 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15000 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15001 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15002 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15003
15004 /* Comparisons. */
15005 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15006 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15007 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15008 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15009
15010 #undef ARM_VARIANT
15011 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15012 /* Moves and type conversions. */
15013 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15014 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15015 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15016 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15017 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15018 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15019 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15020 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15021 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15022 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15023 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15024 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15025 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15026
15027 /* Memory operations. */
15028 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
15029 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
15030 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15031 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15032 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15033 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15034 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15035 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15036 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15037 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15038
15039 /* Monadic operations. */
15040 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15041 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15042 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15043
15044 /* Dyadic operations. */
15045 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15046 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15047 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15048 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15049 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15050 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15051 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15052 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15053 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15054
15055 /* Comparisons. */
15056 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15057 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15058 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15059 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15060
15061 #undef ARM_VARIANT
15062 #define ARM_VARIANT &fpu_vfp_ext_v2
15063 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15064 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15065 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15066 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15067
15068 /* Instructions which may belong to either the Neon or VFP instruction sets.
15069 Individual encoder functions perform additional architecture checks. */
15070 #undef ARM_VARIANT
15071 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15072 #undef THUMB_VARIANT
15073 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15074 /* These mnemonics are unique to VFP. */
15075 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15076 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15077 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15078 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15079 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15080 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15081 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15082 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15083 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15084 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15085
15086 /* Mnemonics shared by Neon and VFP. */
15087 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15088 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15089 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15090
15091 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15092 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15093
15094 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15095 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15096
15097 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15098 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15099 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15100 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15101 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15102 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15103 NCE(vldr, d100b00, 2, (RVSD, ADDR), neon_ldr_str),
15104 NCE(vstr, d000b00, 2, (RVSD, ADDR), neon_ldr_str),
15105
15106 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15107
15108 /* NOTE: All VMOV encoding is special-cased! */
15109 NCE(vmov, 0, 1, (VMOV), neon_mov),
15110 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15111
15112 #undef THUMB_VARIANT
15113 #define THUMB_VARIANT &fpu_neon_ext_v1
15114 #undef ARM_VARIANT
15115 #define ARM_VARIANT &fpu_neon_ext_v1
15116 /* Data processing with three registers of the same length. */
15117 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15118 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15119 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15120 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15121 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15122 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15123 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15124 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15125 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15126 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15127 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15128 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15129 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15130 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15131 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15132 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15133 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15134 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15135 /* If not immediate, fall back to neon_dyadic_i64_su.
15136 shl_imm should accept I8 I16 I32 I64,
15137 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15138 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15139 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15140 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15141 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15142 /* Logic ops, types optional & ignored. */
15143 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15144 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15145 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15146 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15147 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15148 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15149 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15150 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15151 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15152 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15153 /* Bitfield ops, untyped. */
15154 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15155 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15156 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15157 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15158 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15159 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15160 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15161 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15162 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15163 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15164 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15165 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15166 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15167 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15168 back to neon_dyadic_if_su. */
15169 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15170 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15171 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15172 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15173 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15174 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15175 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15176 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15177 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
15178 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15179 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15180 /* As above, D registers only. */
15181 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15182 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15183 /* Int and float variants, signedness unimportant. */
15184 /* If not scalar, fall back to neon_dyadic_if_i. */
15185 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15186 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15187 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15188 /* Add/sub take types I8 I16 I32 I64 F32. */
15189 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15190 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15191 /* vtst takes sizes 8, 16, 32. */
15192 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15193 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15194 /* VMUL takes I8 I16 I32 F32 P8. */
15195 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15196 /* VQD{R}MULH takes S16 S32. */
15197 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15198 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15199 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15200 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15201 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15202 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15203 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15204 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15205 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15206 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15207 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15208 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15209 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15210 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15211 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15212 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15213
15214 /* Two address, int/float. Types S8 S16 S32 F32. */
15215 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15216 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15217
15218 /* Data processing with two registers and a shift amount. */
15219 /* Right shifts, and variants with rounding.
15220 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15221 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15222 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15223 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15224 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15225 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15226 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15227 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15228 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15229 /* Shift and insert. Sizes accepted 8 16 32 64. */
15230 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15231 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15232 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15233 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15234 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15235 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15236 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15237 /* Right shift immediate, saturating & narrowing, with rounding variants.
15238 Types accepted S16 S32 S64 U16 U32 U64. */
15239 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15240 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15241 /* As above, unsigned. Types accepted S16 S32 S64. */
15242 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15243 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15244 /* Right shift narrowing. Types accepted I16 I32 I64. */
15245 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15246 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15247 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15248 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15249 /* CVT with optional immediate for fixed-point variant. */
15250 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15251
15252 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15253 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15254
15255 /* Data processing, three registers of different lengths. */
15256 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15257 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15258 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15259 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15260 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15261 /* If not scalar, fall back to neon_dyadic_long.
15262 Vector types as above, scalar types S16 S32 U16 U32. */
15263 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15264 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15265 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15266 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15267 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15268 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15269 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15270 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15271 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15272 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15273 /* Saturating doubling multiplies. Types S16 S32. */
15274 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15275 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15276 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15277 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15278 S16 S32 U16 U32. */
15279 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15280
15281 /* Extract. Size 8. */
15282 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15283 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15284
15285 /* Two registers, miscellaneous. */
15286 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15287 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15288 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15289 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15290 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15291 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15292 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15293 /* Vector replicate. Sizes 8 16 32. */
15294 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15295 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15296 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15297 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15298 /* VMOVN. Types I16 I32 I64. */
15299 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15300 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15301 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15302 /* VQMOVUN. Types S16 S32 S64. */
15303 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15304 /* VZIP / VUZP. Sizes 8 16 32. */
15305 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15306 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15307 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15308 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15309 /* VQABS / VQNEG. Types S8 S16 S32. */
15310 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15311 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15312 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15313 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15314 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15315 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15316 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15317 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15318 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15319 /* Reciprocal estimates. Types U32 F32. */
15320 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15321 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15322 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15323 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15324 /* VCLS. Types S8 S16 S32. */
15325 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15326 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15327 /* VCLZ. Types I8 I16 I32. */
15328 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15329 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15330 /* VCNT. Size 8. */
15331 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15332 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15333 /* Two address, untyped. */
15334 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15335 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15336 /* VTRN. Sizes 8 16 32. */
15337 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15338 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15339
15340 /* Table lookup. Size 8. */
15341 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15342 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15343
15344 #undef THUMB_VARIANT
15345 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15346 #undef ARM_VARIANT
15347 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15348 /* Neon element/structure load/store. */
15349 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15350 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15351 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15352 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15353 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15354 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15355 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15356 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15357
15358 #undef THUMB_VARIANT
15359 #define THUMB_VARIANT &fpu_vfp_ext_v3
15360 #undef ARM_VARIANT
15361 #define ARM_VARIANT &fpu_vfp_ext_v3
15362 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15363 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15364 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15365 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15366 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15367 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15368 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15369 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15370 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15371 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15372 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15373 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15374 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15375 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15376 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15377 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15378 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15379 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15380
15381 #undef THUMB_VARIANT
15382 #undef ARM_VARIANT
15383 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15384 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15385 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15386 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15387 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15388 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15389 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15390 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
15391 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
15392
15393 #undef ARM_VARIANT
15394 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15395 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
15396 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
15397 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
15398 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
15399 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
15400 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
15401 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
15402 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
15403 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
15404 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15405 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15406 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15407 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15408 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15409 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15410 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15411 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15412 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15413 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
15414 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
15415 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15416 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15417 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15418 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15419 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15420 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15421 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
15422 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
15423 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
15424 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
15425 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
15426 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
15427 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
15428 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
15429 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
15430 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
15431 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
15432 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15433 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15434 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15435 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15436 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15437 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15438 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15439 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15440 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15441 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
15442 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15443 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15444 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15445 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15446 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15447 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15448 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15449 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15450 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15451 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15452 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15453 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15454 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15455 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15456 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15457 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15458 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15459 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15460 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15461 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15462 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15463 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15464 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15465 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15466 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15467 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15468 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15469 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15470 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15471 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15472 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15473 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15474 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15475 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15476 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15477 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15478 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15479 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15480 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15481 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15482 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15483 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
15484 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15485 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15486 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15487 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15488 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15489 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15490 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15491 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15492 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15493 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15494 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15495 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15496 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15497 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15498 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15499 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15500 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15501 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15502 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15503 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15504 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15505 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
15506 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15507 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15508 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15509 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15510 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15511 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15512 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15513 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15514 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15515 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15516 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15517 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15518 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15519 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15520 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15521 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15522 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15523 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
15524 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15525 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15526 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15527 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15528 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15529 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15530 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15531 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15532 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15533 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15534 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15535 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15536 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15537 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
15538 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
15539 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
15540 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
15541 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
15542 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
15543 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15544 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15545 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15546 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
15547 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
15548 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
15549 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
15550 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
15551 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
15552 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15553 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15554 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15555 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15556 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
15557
15558 #undef ARM_VARIANT
15559 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
15560 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
15561 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
15562 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
15563 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
15564 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
15565 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
15566 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
15567 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
15568 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
15569 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
15570 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
15571 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
15572 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
15573 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
15574 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
15575 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
15576 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
15577 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
15578 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
15579 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
15580 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
15581 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
15582 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
15583 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
15584 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
15585 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
15586 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
15587 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
15588 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
15589 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
15590 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
15591 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
15592 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
15593 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
15594 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
15595 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
15596 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
15597 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
15598 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
15599 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
15600 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
15601 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
15602 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
15603 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
15604 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
15605 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
15606 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
15607 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
15608 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
15609 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
15610 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
15611 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
15612 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
15613 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
15614 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
15615 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
15616 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
15617 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
15618 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
15619 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
15620 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
15621 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
15622 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
15623 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
15624 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
15625 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
15626 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
15627 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
15628 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
15629 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
15630 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
15631 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
15632 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
15633 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
15634 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
15635 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
15636 };
15637 #undef ARM_VARIANT
15638 #undef THUMB_VARIANT
15639 #undef TCE
15640 #undef TCM
15641 #undef TUE
15642 #undef TUF
15643 #undef TCC
15644 #undef cCE
15645 #undef cCL
15646 #undef C3E
15647 #undef CE
15648 #undef CM
15649 #undef UE
15650 #undef UF
15651 #undef UT
15652 #undef NUF
15653 #undef nUF
15654 #undef NCE
15655 #undef nCE
15656 #undef OPS0
15657 #undef OPS1
15658 #undef OPS2
15659 #undef OPS3
15660 #undef OPS4
15661 #undef OPS5
15662 #undef OPS6
15663 #undef do_0
15664 \f
15665 /* MD interface: bits in the object file. */
15666
15667 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
15668 for use in the a.out file, and stores them in the array pointed to by buf.
15669 This knows about the endian-ness of the target machine and does
15670 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
15671 2 (short) and 4 (long) Floating numbers are put out as a series of
15672 LITTLENUMS (shorts, here at least). */
15673
15674 void
15675 md_number_to_chars (char * buf, valueT val, int n)
15676 {
15677 if (target_big_endian)
15678 number_to_chars_bigendian (buf, val, n);
15679 else
15680 number_to_chars_littleendian (buf, val, n);
15681 }
15682
15683 static valueT
15684 md_chars_to_number (char * buf, int n)
15685 {
15686 valueT result = 0;
15687 unsigned char * where = (unsigned char *) buf;
15688
15689 if (target_big_endian)
15690 {
15691 while (n--)
15692 {
15693 result <<= 8;
15694 result |= (*where++ & 255);
15695 }
15696 }
15697 else
15698 {
15699 while (n--)
15700 {
15701 result <<= 8;
15702 result |= (where[n] & 255);
15703 }
15704 }
15705
15706 return result;
15707 }
15708
15709 /* MD interface: Sections. */
15710
15711 /* Estimate the size of a frag before relaxing. Assume everything fits in
15712 2 bytes. */
15713
15714 int
15715 md_estimate_size_before_relax (fragS * fragp,
15716 segT segtype ATTRIBUTE_UNUSED)
15717 {
15718 fragp->fr_var = 2;
15719 return 2;
15720 }
15721
15722 /* Convert a machine dependent frag. */
15723
15724 void
15725 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
15726 {
15727 unsigned long insn;
15728 unsigned long old_op;
15729 char *buf;
15730 expressionS exp;
15731 fixS *fixp;
15732 int reloc_type;
15733 int pc_rel;
15734 int opcode;
15735
15736 buf = fragp->fr_literal + fragp->fr_fix;
15737
15738 old_op = bfd_get_16(abfd, buf);
15739 if (fragp->fr_symbol) {
15740 exp.X_op = O_symbol;
15741 exp.X_add_symbol = fragp->fr_symbol;
15742 } else {
15743 exp.X_op = O_constant;
15744 }
15745 exp.X_add_number = fragp->fr_offset;
15746 opcode = fragp->fr_subtype;
15747 switch (opcode)
15748 {
15749 case T_MNEM_ldr_pc:
15750 case T_MNEM_ldr_pc2:
15751 case T_MNEM_ldr_sp:
15752 case T_MNEM_str_sp:
15753 case T_MNEM_ldr:
15754 case T_MNEM_ldrb:
15755 case T_MNEM_ldrh:
15756 case T_MNEM_str:
15757 case T_MNEM_strb:
15758 case T_MNEM_strh:
15759 if (fragp->fr_var == 4)
15760 {
15761 insn = THUMB_OP32(opcode);
15762 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
15763 {
15764 insn |= (old_op & 0x700) << 4;
15765 }
15766 else
15767 {
15768 insn |= (old_op & 7) << 12;
15769 insn |= (old_op & 0x38) << 13;
15770 }
15771 insn |= 0x00000c00;
15772 put_thumb32_insn (buf, insn);
15773 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
15774 }
15775 else
15776 {
15777 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
15778 }
15779 pc_rel = (opcode == T_MNEM_ldr_pc2);
15780 break;
15781 case T_MNEM_adr:
15782 if (fragp->fr_var == 4)
15783 {
15784 insn = THUMB_OP32 (opcode);
15785 insn |= (old_op & 0xf0) << 4;
15786 put_thumb32_insn (buf, insn);
15787 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
15788 }
15789 else
15790 {
15791 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
15792 exp.X_add_number -= 4;
15793 }
15794 pc_rel = 1;
15795 break;
15796 case T_MNEM_mov:
15797 case T_MNEM_movs:
15798 case T_MNEM_cmp:
15799 case T_MNEM_cmn:
15800 if (fragp->fr_var == 4)
15801 {
15802 int r0off = (opcode == T_MNEM_mov
15803 || opcode == T_MNEM_movs) ? 0 : 8;
15804 insn = THUMB_OP32 (opcode);
15805 insn = (insn & 0xe1ffffff) | 0x10000000;
15806 insn |= (old_op & 0x700) << r0off;
15807 put_thumb32_insn (buf, insn);
15808 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
15809 }
15810 else
15811 {
15812 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
15813 }
15814 pc_rel = 0;
15815 break;
15816 case T_MNEM_b:
15817 if (fragp->fr_var == 4)
15818 {
15819 insn = THUMB_OP32(opcode);
15820 put_thumb32_insn (buf, insn);
15821 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
15822 }
15823 else
15824 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
15825 pc_rel = 1;
15826 break;
15827 case T_MNEM_bcond:
15828 if (fragp->fr_var == 4)
15829 {
15830 insn = THUMB_OP32(opcode);
15831 insn |= (old_op & 0xf00) << 14;
15832 put_thumb32_insn (buf, insn);
15833 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
15834 }
15835 else
15836 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
15837 pc_rel = 1;
15838 break;
15839 case T_MNEM_add_sp:
15840 case T_MNEM_add_pc:
15841 case T_MNEM_inc_sp:
15842 case T_MNEM_dec_sp:
15843 if (fragp->fr_var == 4)
15844 {
15845 /* ??? Choose between add and addw. */
15846 insn = THUMB_OP32 (opcode);
15847 insn |= (old_op & 0xf0) << 4;
15848 put_thumb32_insn (buf, insn);
15849 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
15850 }
15851 else
15852 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
15853 pc_rel = 0;
15854 break;
15855
15856 case T_MNEM_addi:
15857 case T_MNEM_addis:
15858 case T_MNEM_subi:
15859 case T_MNEM_subis:
15860 if (fragp->fr_var == 4)
15861 {
15862 insn = THUMB_OP32 (opcode);
15863 insn |= (old_op & 0xf0) << 4;
15864 insn |= (old_op & 0xf) << 16;
15865 put_thumb32_insn (buf, insn);
15866 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
15867 }
15868 else
15869 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
15870 pc_rel = 0;
15871 break;
15872 default:
15873 abort();
15874 }
15875 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
15876 reloc_type);
15877 fixp->fx_file = fragp->fr_file;
15878 fixp->fx_line = fragp->fr_line;
15879 fragp->fr_fix += fragp->fr_var;
15880 }
15881
15882 /* Return the size of a relaxable immediate operand instruction.
15883 SHIFT and SIZE specify the form of the allowable immediate. */
15884 static int
15885 relax_immediate (fragS *fragp, int size, int shift)
15886 {
15887 offsetT offset;
15888 offsetT mask;
15889 offsetT low;
15890
15891 /* ??? Should be able to do better than this. */
15892 if (fragp->fr_symbol)
15893 return 4;
15894
15895 low = (1 << shift) - 1;
15896 mask = (1 << (shift + size)) - (1 << shift);
15897 offset = fragp->fr_offset;
15898 /* Force misaligned offsets to 32-bit variant. */
15899 if (offset & low)
15900 return -4;
15901 if (offset & ~mask)
15902 return 4;
15903 return 2;
15904 }
15905
15906 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
15907 load. */
15908 static int
15909 relax_adr (fragS *fragp, asection *sec)
15910 {
15911 addressT addr;
15912 offsetT val;
15913
15914 /* Assume worst case for symbols not known to be in the same section. */
15915 if (!S_IS_DEFINED(fragp->fr_symbol)
15916 || sec != S_GET_SEGMENT (fragp->fr_symbol))
15917 return 4;
15918
15919 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
15920 addr = fragp->fr_address + fragp->fr_fix;
15921 addr = (addr + 4) & ~3;
15922 /* Fix the insn as the 4-byte version if the target address is not
15923 sufficiently aligned. This is prevents an infinite loop when two
15924 instructions have contradictory range/alignment requirements. */
15925 if (val & 3)
15926 return -4;
15927 val -= addr;
15928 if (val < 0 || val > 1020)
15929 return 4;
15930 return 2;
15931 }
15932
15933 /* Return the size of a relaxable add/sub immediate instruction. */
15934 static int
15935 relax_addsub (fragS *fragp, asection *sec)
15936 {
15937 char *buf;
15938 int op;
15939
15940 buf = fragp->fr_literal + fragp->fr_fix;
15941 op = bfd_get_16(sec->owner, buf);
15942 if ((op & 0xf) == ((op >> 4) & 0xf))
15943 return relax_immediate (fragp, 8, 0);
15944 else
15945 return relax_immediate (fragp, 3, 0);
15946 }
15947
15948
15949 /* Return the size of a relaxable branch instruction. BITS is the
15950 size of the offset field in the narrow instruction. */
15951
15952 static int
15953 relax_branch (fragS *fragp, asection *sec, int bits)
15954 {
15955 addressT addr;
15956 offsetT val;
15957 offsetT limit;
15958
15959 /* Assume worst case for symbols not known to be in the same section. */
15960 if (!S_IS_DEFINED(fragp->fr_symbol)
15961 || sec != S_GET_SEGMENT (fragp->fr_symbol))
15962 return 4;
15963
15964 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
15965 addr = fragp->fr_address + fragp->fr_fix + 4;
15966 val -= addr;
15967
15968 /* Offset is a signed value *2 */
15969 limit = 1 << bits;
15970 if (val >= limit || val < -limit)
15971 return 4;
15972 return 2;
15973 }
15974
15975
15976 /* Relax a machine dependent frag. This returns the amount by which
15977 the current size of the frag should change. */
15978
15979 int
15980 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
15981 {
15982 int oldsize;
15983 int newsize;
15984
15985 oldsize = fragp->fr_var;
15986 switch (fragp->fr_subtype)
15987 {
15988 case T_MNEM_ldr_pc2:
15989 newsize = relax_adr(fragp, sec);
15990 break;
15991 case T_MNEM_ldr_pc:
15992 case T_MNEM_ldr_sp:
15993 case T_MNEM_str_sp:
15994 newsize = relax_immediate(fragp, 8, 2);
15995 break;
15996 case T_MNEM_ldr:
15997 case T_MNEM_str:
15998 newsize = relax_immediate(fragp, 5, 2);
15999 break;
16000 case T_MNEM_ldrh:
16001 case T_MNEM_strh:
16002 newsize = relax_immediate(fragp, 5, 1);
16003 break;
16004 case T_MNEM_ldrb:
16005 case T_MNEM_strb:
16006 newsize = relax_immediate(fragp, 5, 0);
16007 break;
16008 case T_MNEM_adr:
16009 newsize = relax_adr(fragp, sec);
16010 break;
16011 case T_MNEM_mov:
16012 case T_MNEM_movs:
16013 case T_MNEM_cmp:
16014 case T_MNEM_cmn:
16015 newsize = relax_immediate(fragp, 8, 0);
16016 break;
16017 case T_MNEM_b:
16018 newsize = relax_branch(fragp, sec, 11);
16019 break;
16020 case T_MNEM_bcond:
16021 newsize = relax_branch(fragp, sec, 8);
16022 break;
16023 case T_MNEM_add_sp:
16024 case T_MNEM_add_pc:
16025 newsize = relax_immediate (fragp, 8, 2);
16026 break;
16027 case T_MNEM_inc_sp:
16028 case T_MNEM_dec_sp:
16029 newsize = relax_immediate (fragp, 7, 2);
16030 break;
16031 case T_MNEM_addi:
16032 case T_MNEM_addis:
16033 case T_MNEM_subi:
16034 case T_MNEM_subis:
16035 newsize = relax_addsub (fragp, sec);
16036 break;
16037 default:
16038 abort();
16039 }
16040 if (newsize < 0)
16041 {
16042 fragp->fr_var = -newsize;
16043 md_convert_frag (sec->owner, sec, fragp);
16044 frag_wane(fragp);
16045 return -(newsize + oldsize);
16046 }
16047 fragp->fr_var = newsize;
16048 return newsize - oldsize;
16049 }
16050
16051 /* Round up a section size to the appropriate boundary. */
16052
16053 valueT
16054 md_section_align (segT segment ATTRIBUTE_UNUSED,
16055 valueT size)
16056 {
16057 #ifdef OBJ_ELF
16058 return size;
16059 #else
16060 /* Round all sects to multiple of 4. */
16061 return (size + 3) & ~3;
16062 #endif
16063 }
16064
16065 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16066 of an rs_align_code fragment. */
16067
16068 void
16069 arm_handle_align (fragS * fragP)
16070 {
16071 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16072 static char const thumb_noop[2] = { 0xc0, 0x46 };
16073 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16074 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16075
16076 int bytes, fix, noop_size;
16077 char * p;
16078 const char * noop;
16079
16080 if (fragP->fr_type != rs_align_code)
16081 return;
16082
16083 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16084 p = fragP->fr_literal + fragP->fr_fix;
16085 fix = 0;
16086
16087 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16088 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16089
16090 if (fragP->tc_frag_data)
16091 {
16092 if (target_big_endian)
16093 noop = thumb_bigend_noop;
16094 else
16095 noop = thumb_noop;
16096 noop_size = sizeof (thumb_noop);
16097 }
16098 else
16099 {
16100 if (target_big_endian)
16101 noop = arm_bigend_noop;
16102 else
16103 noop = arm_noop;
16104 noop_size = sizeof (arm_noop);
16105 }
16106
16107 if (bytes & (noop_size - 1))
16108 {
16109 fix = bytes & (noop_size - 1);
16110 memset (p, 0, fix);
16111 p += fix;
16112 bytes -= fix;
16113 }
16114
16115 while (bytes >= noop_size)
16116 {
16117 memcpy (p, noop, noop_size);
16118 p += noop_size;
16119 bytes -= noop_size;
16120 fix += noop_size;
16121 }
16122
16123 fragP->fr_fix += fix;
16124 fragP->fr_var = noop_size;
16125 }
16126
16127 /* Called from md_do_align. Used to create an alignment
16128 frag in a code section. */
16129
16130 void
16131 arm_frag_align_code (int n, int max)
16132 {
16133 char * p;
16134
16135 /* We assume that there will never be a requirement
16136 to support alignments greater than 32 bytes. */
16137 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16138 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16139
16140 p = frag_var (rs_align_code,
16141 MAX_MEM_FOR_RS_ALIGN_CODE,
16142 1,
16143 (relax_substateT) max,
16144 (symbolS *) NULL,
16145 (offsetT) n,
16146 (char *) NULL);
16147 *p = 0;
16148 }
16149
16150 /* Perform target specific initialisation of a frag. */
16151
16152 void
16153 arm_init_frag (fragS * fragP)
16154 {
16155 /* Record whether this frag is in an ARM or a THUMB area. */
16156 fragP->tc_frag_data = thumb_mode;
16157 }
16158
16159 #ifdef OBJ_ELF
16160 /* When we change sections we need to issue a new mapping symbol. */
16161
16162 void
16163 arm_elf_change_section (void)
16164 {
16165 flagword flags;
16166 segment_info_type *seginfo;
16167
16168 /* Link an unlinked unwind index table section to the .text section. */
16169 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16170 && elf_linked_to_section (now_seg) == NULL)
16171 elf_linked_to_section (now_seg) = text_section;
16172
16173 if (!SEG_NORMAL (now_seg))
16174 return;
16175
16176 flags = bfd_get_section_flags (stdoutput, now_seg);
16177
16178 /* We can ignore sections that only contain debug info. */
16179 if ((flags & SEC_ALLOC) == 0)
16180 return;
16181
16182 seginfo = seg_info (now_seg);
16183 mapstate = seginfo->tc_segment_info_data.mapstate;
16184 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16185 }
16186
16187 int
16188 arm_elf_section_type (const char * str, size_t len)
16189 {
16190 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16191 return SHT_ARM_EXIDX;
16192
16193 return -1;
16194 }
16195 \f
16196 /* Code to deal with unwinding tables. */
16197
16198 static void add_unwind_adjustsp (offsetT);
16199
16200 /* Cenerate and deferred unwind frame offset. */
16201
16202 static void
16203 flush_pending_unwind (void)
16204 {
16205 offsetT offset;
16206
16207 offset = unwind.pending_offset;
16208 unwind.pending_offset = 0;
16209 if (offset != 0)
16210 add_unwind_adjustsp (offset);
16211 }
16212
16213 /* Add an opcode to this list for this function. Two-byte opcodes should
16214 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16215 order. */
16216
16217 static void
16218 add_unwind_opcode (valueT op, int length)
16219 {
16220 /* Add any deferred stack adjustment. */
16221 if (unwind.pending_offset)
16222 flush_pending_unwind ();
16223
16224 unwind.sp_restored = 0;
16225
16226 if (unwind.opcode_count + length > unwind.opcode_alloc)
16227 {
16228 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16229 if (unwind.opcodes)
16230 unwind.opcodes = xrealloc (unwind.opcodes,
16231 unwind.opcode_alloc);
16232 else
16233 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16234 }
16235 while (length > 0)
16236 {
16237 length--;
16238 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16239 op >>= 8;
16240 unwind.opcode_count++;
16241 }
16242 }
16243
16244 /* Add unwind opcodes to adjust the stack pointer. */
16245
16246 static void
16247 add_unwind_adjustsp (offsetT offset)
16248 {
16249 valueT op;
16250
16251 if (offset > 0x200)
16252 {
16253 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16254 char bytes[5];
16255 int n;
16256 valueT o;
16257
16258 /* Long form: 0xb2, uleb128. */
16259 /* This might not fit in a word so add the individual bytes,
16260 remembering the list is built in reverse order. */
16261 o = (valueT) ((offset - 0x204) >> 2);
16262 if (o == 0)
16263 add_unwind_opcode (0, 1);
16264
16265 /* Calculate the uleb128 encoding of the offset. */
16266 n = 0;
16267 while (o)
16268 {
16269 bytes[n] = o & 0x7f;
16270 o >>= 7;
16271 if (o)
16272 bytes[n] |= 0x80;
16273 n++;
16274 }
16275 /* Add the insn. */
16276 for (; n; n--)
16277 add_unwind_opcode (bytes[n - 1], 1);
16278 add_unwind_opcode (0xb2, 1);
16279 }
16280 else if (offset > 0x100)
16281 {
16282 /* Two short opcodes. */
16283 add_unwind_opcode (0x3f, 1);
16284 op = (offset - 0x104) >> 2;
16285 add_unwind_opcode (op, 1);
16286 }
16287 else if (offset > 0)
16288 {
16289 /* Short opcode. */
16290 op = (offset - 4) >> 2;
16291 add_unwind_opcode (op, 1);
16292 }
16293 else if (offset < 0)
16294 {
16295 offset = -offset;
16296 while (offset > 0x100)
16297 {
16298 add_unwind_opcode (0x7f, 1);
16299 offset -= 0x100;
16300 }
16301 op = ((offset - 4) >> 2) | 0x40;
16302 add_unwind_opcode (op, 1);
16303 }
16304 }
16305
16306 /* Finish the list of unwind opcodes for this function. */
16307 static void
16308 finish_unwind_opcodes (void)
16309 {
16310 valueT op;
16311
16312 if (unwind.fp_used)
16313 {
16314 /* Adjust sp as necessary. */
16315 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
16316 flush_pending_unwind ();
16317
16318 /* After restoring sp from the frame pointer. */
16319 op = 0x90 | unwind.fp_reg;
16320 add_unwind_opcode (op, 1);
16321 }
16322 else
16323 flush_pending_unwind ();
16324 }
16325
16326
16327 /* Start an exception table entry. If idx is nonzero this is an index table
16328 entry. */
16329
16330 static void
16331 start_unwind_section (const segT text_seg, int idx)
16332 {
16333 const char * text_name;
16334 const char * prefix;
16335 const char * prefix_once;
16336 const char * group_name;
16337 size_t prefix_len;
16338 size_t text_len;
16339 char * sec_name;
16340 size_t sec_name_len;
16341 int type;
16342 int flags;
16343 int linkonce;
16344
16345 if (idx)
16346 {
16347 prefix = ELF_STRING_ARM_unwind;
16348 prefix_once = ELF_STRING_ARM_unwind_once;
16349 type = SHT_ARM_EXIDX;
16350 }
16351 else
16352 {
16353 prefix = ELF_STRING_ARM_unwind_info;
16354 prefix_once = ELF_STRING_ARM_unwind_info_once;
16355 type = SHT_PROGBITS;
16356 }
16357
16358 text_name = segment_name (text_seg);
16359 if (streq (text_name, ".text"))
16360 text_name = "";
16361
16362 if (strncmp (text_name, ".gnu.linkonce.t.",
16363 strlen (".gnu.linkonce.t.")) == 0)
16364 {
16365 prefix = prefix_once;
16366 text_name += strlen (".gnu.linkonce.t.");
16367 }
16368
16369 prefix_len = strlen (prefix);
16370 text_len = strlen (text_name);
16371 sec_name_len = prefix_len + text_len;
16372 sec_name = xmalloc (sec_name_len + 1);
16373 memcpy (sec_name, prefix, prefix_len);
16374 memcpy (sec_name + prefix_len, text_name, text_len);
16375 sec_name[prefix_len + text_len] = '\0';
16376
16377 flags = SHF_ALLOC;
16378 linkonce = 0;
16379 group_name = 0;
16380
16381 /* Handle COMDAT group. */
16382 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
16383 {
16384 group_name = elf_group_name (text_seg);
16385 if (group_name == NULL)
16386 {
16387 as_bad ("Group section `%s' has no group signature",
16388 segment_name (text_seg));
16389 ignore_rest_of_line ();
16390 return;
16391 }
16392 flags |= SHF_GROUP;
16393 linkonce = 1;
16394 }
16395
16396 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
16397
16398 /* Set the setion link for index tables. */
16399 if (idx)
16400 elf_linked_to_section (now_seg) = text_seg;
16401 }
16402
16403
16404 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
16405 personality routine data. Returns zero, or the index table value for
16406 and inline entry. */
16407
16408 static valueT
16409 create_unwind_entry (int have_data)
16410 {
16411 int size;
16412 addressT where;
16413 char *ptr;
16414 /* The current word of data. */
16415 valueT data;
16416 /* The number of bytes left in this word. */
16417 int n;
16418
16419 finish_unwind_opcodes ();
16420
16421 /* Remember the current text section. */
16422 unwind.saved_seg = now_seg;
16423 unwind.saved_subseg = now_subseg;
16424
16425 start_unwind_section (now_seg, 0);
16426
16427 if (unwind.personality_routine == NULL)
16428 {
16429 if (unwind.personality_index == -2)
16430 {
16431 if (have_data)
16432 as_bad (_("handerdata in cantunwind frame"));
16433 return 1; /* EXIDX_CANTUNWIND. */
16434 }
16435
16436 /* Use a default personality routine if none is specified. */
16437 if (unwind.personality_index == -1)
16438 {
16439 if (unwind.opcode_count > 3)
16440 unwind.personality_index = 1;
16441 else
16442 unwind.personality_index = 0;
16443 }
16444
16445 /* Space for the personality routine entry. */
16446 if (unwind.personality_index == 0)
16447 {
16448 if (unwind.opcode_count > 3)
16449 as_bad (_("too many unwind opcodes for personality routine 0"));
16450
16451 if (!have_data)
16452 {
16453 /* All the data is inline in the index table. */
16454 data = 0x80;
16455 n = 3;
16456 while (unwind.opcode_count > 0)
16457 {
16458 unwind.opcode_count--;
16459 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
16460 n--;
16461 }
16462
16463 /* Pad with "finish" opcodes. */
16464 while (n--)
16465 data = (data << 8) | 0xb0;
16466
16467 return data;
16468 }
16469 size = 0;
16470 }
16471 else
16472 /* We get two opcodes "free" in the first word. */
16473 size = unwind.opcode_count - 2;
16474 }
16475 else
16476 /* An extra byte is required for the opcode count. */
16477 size = unwind.opcode_count + 1;
16478
16479 size = (size + 3) >> 2;
16480 if (size > 0xff)
16481 as_bad (_("too many unwind opcodes"));
16482
16483 frag_align (2, 0, 0);
16484 record_alignment (now_seg, 2);
16485 unwind.table_entry = expr_build_dot ();
16486
16487 /* Allocate the table entry. */
16488 ptr = frag_more ((size << 2) + 4);
16489 where = frag_now_fix () - ((size << 2) + 4);
16490
16491 switch (unwind.personality_index)
16492 {
16493 case -1:
16494 /* ??? Should this be a PLT generating relocation? */
16495 /* Custom personality routine. */
16496 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
16497 BFD_RELOC_ARM_PREL31);
16498
16499 where += 4;
16500 ptr += 4;
16501
16502 /* Set the first byte to the number of additional words. */
16503 data = size - 1;
16504 n = 3;
16505 break;
16506
16507 /* ABI defined personality routines. */
16508 case 0:
16509 /* Three opcodes bytes are packed into the first word. */
16510 data = 0x80;
16511 n = 3;
16512 break;
16513
16514 case 1:
16515 case 2:
16516 /* The size and first two opcode bytes go in the first word. */
16517 data = ((0x80 + unwind.personality_index) << 8) | size;
16518 n = 2;
16519 break;
16520
16521 default:
16522 /* Should never happen. */
16523 abort ();
16524 }
16525
16526 /* Pack the opcodes into words (MSB first), reversing the list at the same
16527 time. */
16528 while (unwind.opcode_count > 0)
16529 {
16530 if (n == 0)
16531 {
16532 md_number_to_chars (ptr, data, 4);
16533 ptr += 4;
16534 n = 4;
16535 data = 0;
16536 }
16537 unwind.opcode_count--;
16538 n--;
16539 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
16540 }
16541
16542 /* Finish off the last word. */
16543 if (n < 4)
16544 {
16545 /* Pad with "finish" opcodes. */
16546 while (n--)
16547 data = (data << 8) | 0xb0;
16548
16549 md_number_to_chars (ptr, data, 4);
16550 }
16551
16552 if (!have_data)
16553 {
16554 /* Add an empty descriptor if there is no user-specified data. */
16555 ptr = frag_more (4);
16556 md_number_to_chars (ptr, 0, 4);
16557 }
16558
16559 return 0;
16560 }
16561
16562 /* Convert REGNAME to a DWARF-2 register number. */
16563
16564 int
16565 tc_arm_regname_to_dw2regnum (char *regname)
16566 {
16567 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
16568
16569 if (reg == FAIL)
16570 return -1;
16571
16572 return reg;
16573 }
16574
16575 /* Initialize the DWARF-2 unwind information for this procedure. */
16576
16577 void
16578 tc_arm_frame_initial_instructions (void)
16579 {
16580 cfi_add_CFA_def_cfa (REG_SP, 0);
16581 }
16582 #endif /* OBJ_ELF */
16583
16584
16585 /* MD interface: Symbol and relocation handling. */
16586
16587 /* Return the address within the segment that a PC-relative fixup is
16588 relative to. For ARM, PC-relative fixups applied to instructions
16589 are generally relative to the location of the fixup plus 8 bytes.
16590 Thumb branches are offset by 4, and Thumb loads relative to PC
16591 require special handling. */
16592
16593 long
16594 md_pcrel_from_section (fixS * fixP, segT seg)
16595 {
16596 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
16597
16598 /* If this is pc-relative and we are going to emit a relocation
16599 then we just want to put out any pipeline compensation that the linker
16600 will need. Otherwise we want to use the calculated base.
16601 For WinCE we skip the bias for externals as well, since this
16602 is how the MS ARM-CE assembler behaves and we want to be compatible. */
16603 if (fixP->fx_pcrel
16604 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
16605 || (arm_force_relocation (fixP)
16606 #ifdef TE_WINCE
16607 && !S_IS_EXTERNAL (fixP->fx_addsy)
16608 #endif
16609 )))
16610 base = 0;
16611
16612 switch (fixP->fx_r_type)
16613 {
16614 /* PC relative addressing on the Thumb is slightly odd as the
16615 bottom two bits of the PC are forced to zero for the
16616 calculation. This happens *after* application of the
16617 pipeline offset. However, Thumb adrl already adjusts for
16618 this, so we need not do it again. */
16619 case BFD_RELOC_ARM_THUMB_ADD:
16620 return base & ~3;
16621
16622 case BFD_RELOC_ARM_THUMB_OFFSET:
16623 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16624 case BFD_RELOC_ARM_T32_ADD_PC12:
16625 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16626 return (base + 4) & ~3;
16627
16628 /* Thumb branches are simply offset by +4. */
16629 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16630 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16631 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16632 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16633 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16634 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16635 case BFD_RELOC_THUMB_PCREL_BLX:
16636 return base + 4;
16637
16638 /* ARM mode branches are offset by +8. However, the Windows CE
16639 loader expects the relocation not to take this into account. */
16640 case BFD_RELOC_ARM_PCREL_BRANCH:
16641 case BFD_RELOC_ARM_PCREL_CALL:
16642 case BFD_RELOC_ARM_PCREL_JUMP:
16643 case BFD_RELOC_ARM_PCREL_BLX:
16644 case BFD_RELOC_ARM_PLT32:
16645 #ifdef TE_WINCE
16646 /* When handling fixups immediately, because we have already
16647 discovered the value of a symbol, or the address of the frag involved
16648 we must account for the offset by +8, as the OS loader will never see the reloc.
16649 see fixup_segment() in write.c
16650 The S_IS_EXTERNAL test handles the case of global symbols.
16651 Those need the calculated base, not just the pipe compensation the linker will need. */
16652 if (fixP->fx_pcrel
16653 && fixP->fx_addsy != NULL
16654 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
16655 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
16656 return base + 8;
16657 return base;
16658 #else
16659 return base + 8;
16660 #endif
16661
16662 /* ARM mode loads relative to PC are also offset by +8. Unlike
16663 branches, the Windows CE loader *does* expect the relocation
16664 to take this into account. */
16665 case BFD_RELOC_ARM_OFFSET_IMM:
16666 case BFD_RELOC_ARM_OFFSET_IMM8:
16667 case BFD_RELOC_ARM_HWLITERAL:
16668 case BFD_RELOC_ARM_LITERAL:
16669 case BFD_RELOC_ARM_CP_OFF_IMM:
16670 return base + 8;
16671
16672
16673 /* Other PC-relative relocations are un-offset. */
16674 default:
16675 return base;
16676 }
16677 }
16678
16679 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
16680 Otherwise we have no need to default values of symbols. */
16681
16682 symbolS *
16683 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
16684 {
16685 #ifdef OBJ_ELF
16686 if (name[0] == '_' && name[1] == 'G'
16687 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
16688 {
16689 if (!GOT_symbol)
16690 {
16691 if (symbol_find (name))
16692 as_bad ("GOT already in the symbol table");
16693
16694 GOT_symbol = symbol_new (name, undefined_section,
16695 (valueT) 0, & zero_address_frag);
16696 }
16697
16698 return GOT_symbol;
16699 }
16700 #endif
16701
16702 return 0;
16703 }
16704
16705 /* Subroutine of md_apply_fix. Check to see if an immediate can be
16706 computed as two separate immediate values, added together. We
16707 already know that this value cannot be computed by just one ARM
16708 instruction. */
16709
16710 static unsigned int
16711 validate_immediate_twopart (unsigned int val,
16712 unsigned int * highpart)
16713 {
16714 unsigned int a;
16715 unsigned int i;
16716
16717 for (i = 0; i < 32; i += 2)
16718 if (((a = rotate_left (val, i)) & 0xff) != 0)
16719 {
16720 if (a & 0xff00)
16721 {
16722 if (a & ~ 0xffff)
16723 continue;
16724 * highpart = (a >> 8) | ((i + 24) << 7);
16725 }
16726 else if (a & 0xff0000)
16727 {
16728 if (a & 0xff000000)
16729 continue;
16730 * highpart = (a >> 16) | ((i + 16) << 7);
16731 }
16732 else
16733 {
16734 assert (a & 0xff000000);
16735 * highpart = (a >> 24) | ((i + 8) << 7);
16736 }
16737
16738 return (a & 0xff) | (i << 7);
16739 }
16740
16741 return FAIL;
16742 }
16743
16744 static int
16745 validate_offset_imm (unsigned int val, int hwse)
16746 {
16747 if ((hwse && val > 255) || val > 4095)
16748 return FAIL;
16749 return val;
16750 }
16751
16752 /* Subroutine of md_apply_fix. Do those data_ops which can take a
16753 negative immediate constant by altering the instruction. A bit of
16754 a hack really.
16755 MOV <-> MVN
16756 AND <-> BIC
16757 ADC <-> SBC
16758 by inverting the second operand, and
16759 ADD <-> SUB
16760 CMP <-> CMN
16761 by negating the second operand. */
16762
16763 static int
16764 negate_data_op (unsigned long * instruction,
16765 unsigned long value)
16766 {
16767 int op, new_inst;
16768 unsigned long negated, inverted;
16769
16770 negated = encode_arm_immediate (-value);
16771 inverted = encode_arm_immediate (~value);
16772
16773 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
16774 switch (op)
16775 {
16776 /* First negates. */
16777 case OPCODE_SUB: /* ADD <-> SUB */
16778 new_inst = OPCODE_ADD;
16779 value = negated;
16780 break;
16781
16782 case OPCODE_ADD:
16783 new_inst = OPCODE_SUB;
16784 value = negated;
16785 break;
16786
16787 case OPCODE_CMP: /* CMP <-> CMN */
16788 new_inst = OPCODE_CMN;
16789 value = negated;
16790 break;
16791
16792 case OPCODE_CMN:
16793 new_inst = OPCODE_CMP;
16794 value = negated;
16795 break;
16796
16797 /* Now Inverted ops. */
16798 case OPCODE_MOV: /* MOV <-> MVN */
16799 new_inst = OPCODE_MVN;
16800 value = inverted;
16801 break;
16802
16803 case OPCODE_MVN:
16804 new_inst = OPCODE_MOV;
16805 value = inverted;
16806 break;
16807
16808 case OPCODE_AND: /* AND <-> BIC */
16809 new_inst = OPCODE_BIC;
16810 value = inverted;
16811 break;
16812
16813 case OPCODE_BIC:
16814 new_inst = OPCODE_AND;
16815 value = inverted;
16816 break;
16817
16818 case OPCODE_ADC: /* ADC <-> SBC */
16819 new_inst = OPCODE_SBC;
16820 value = inverted;
16821 break;
16822
16823 case OPCODE_SBC:
16824 new_inst = OPCODE_ADC;
16825 value = inverted;
16826 break;
16827
16828 /* We cannot do anything. */
16829 default:
16830 return FAIL;
16831 }
16832
16833 if (value == (unsigned) FAIL)
16834 return FAIL;
16835
16836 *instruction &= OPCODE_MASK;
16837 *instruction |= new_inst << DATA_OP_SHIFT;
16838 return value;
16839 }
16840
16841 /* Like negate_data_op, but for Thumb-2. */
16842
16843 static unsigned int
16844 thumb32_negate_data_op (offsetT *instruction, offsetT value)
16845 {
16846 int op, new_inst;
16847 int rd;
16848 offsetT negated, inverted;
16849
16850 negated = encode_thumb32_immediate (-value);
16851 inverted = encode_thumb32_immediate (~value);
16852
16853 rd = (*instruction >> 8) & 0xf;
16854 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
16855 switch (op)
16856 {
16857 /* ADD <-> SUB. Includes CMP <-> CMN. */
16858 case T2_OPCODE_SUB:
16859 new_inst = T2_OPCODE_ADD;
16860 value = negated;
16861 break;
16862
16863 case T2_OPCODE_ADD:
16864 new_inst = T2_OPCODE_SUB;
16865 value = negated;
16866 break;
16867
16868 /* ORR <-> ORN. Includes MOV <-> MVN. */
16869 case T2_OPCODE_ORR:
16870 new_inst = T2_OPCODE_ORN;
16871 value = inverted;
16872 break;
16873
16874 case T2_OPCODE_ORN:
16875 new_inst = T2_OPCODE_ORR;
16876 value = inverted;
16877 break;
16878
16879 /* AND <-> BIC. TST has no inverted equivalent. */
16880 case T2_OPCODE_AND:
16881 new_inst = T2_OPCODE_BIC;
16882 if (rd == 15)
16883 value = FAIL;
16884 else
16885 value = inverted;
16886 break;
16887
16888 case T2_OPCODE_BIC:
16889 new_inst = T2_OPCODE_AND;
16890 value = inverted;
16891 break;
16892
16893 /* ADC <-> SBC */
16894 case T2_OPCODE_ADC:
16895 new_inst = T2_OPCODE_SBC;
16896 value = inverted;
16897 break;
16898
16899 case T2_OPCODE_SBC:
16900 new_inst = T2_OPCODE_ADC;
16901 value = inverted;
16902 break;
16903
16904 /* We cannot do anything. */
16905 default:
16906 return FAIL;
16907 }
16908
16909 if (value == FAIL)
16910 return FAIL;
16911
16912 *instruction &= T2_OPCODE_MASK;
16913 *instruction |= new_inst << T2_DATA_OP_SHIFT;
16914 return value;
16915 }
16916
16917 /* Read a 32-bit thumb instruction from buf. */
16918 static unsigned long
16919 get_thumb32_insn (char * buf)
16920 {
16921 unsigned long insn;
16922 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
16923 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16924
16925 return insn;
16926 }
16927
16928
16929 /* We usually want to set the low bit on the address of thumb function
16930 symbols. In particular .word foo - . should have the low bit set.
16931 Generic code tries to fold the difference of two symbols to
16932 a constant. Prevent this and force a relocation when the first symbols
16933 is a thumb function. */
16934 int
16935 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
16936 {
16937 if (op == O_subtract
16938 && l->X_op == O_symbol
16939 && r->X_op == O_symbol
16940 && THUMB_IS_FUNC (l->X_add_symbol))
16941 {
16942 l->X_op = O_subtract;
16943 l->X_op_symbol = r->X_add_symbol;
16944 l->X_add_number -= r->X_add_number;
16945 return 1;
16946 }
16947 /* Process as normal. */
16948 return 0;
16949 }
16950
16951 void
16952 md_apply_fix (fixS * fixP,
16953 valueT * valP,
16954 segT seg)
16955 {
16956 offsetT value = * valP;
16957 offsetT newval;
16958 unsigned int newimm;
16959 unsigned long temp;
16960 int sign;
16961 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
16962
16963 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
16964
16965 /* Note whether this will delete the relocation. */
16966 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
16967 fixP->fx_done = 1;
16968
16969 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16970 consistency with the behavior on 32-bit hosts. Remember value
16971 for emit_reloc. */
16972 value &= 0xffffffff;
16973 value ^= 0x80000000;
16974 value -= 0x80000000;
16975
16976 *valP = value;
16977 fixP->fx_addnumber = value;
16978
16979 /* Same treatment for fixP->fx_offset. */
16980 fixP->fx_offset &= 0xffffffff;
16981 fixP->fx_offset ^= 0x80000000;
16982 fixP->fx_offset -= 0x80000000;
16983
16984 switch (fixP->fx_r_type)
16985 {
16986 case BFD_RELOC_NONE:
16987 /* This will need to go in the object file. */
16988 fixP->fx_done = 0;
16989 break;
16990
16991 case BFD_RELOC_ARM_IMMEDIATE:
16992 /* We claim that this fixup has been processed here,
16993 even if in fact we generate an error because we do
16994 not have a reloc for it, so tc_gen_reloc will reject it. */
16995 fixP->fx_done = 1;
16996
16997 if (fixP->fx_addsy
16998 && ! S_IS_DEFINED (fixP->fx_addsy))
16999 {
17000 as_bad_where (fixP->fx_file, fixP->fx_line,
17001 _("undefined symbol %s used as an immediate value"),
17002 S_GET_NAME (fixP->fx_addsy));
17003 break;
17004 }
17005
17006 newimm = encode_arm_immediate (value);
17007 temp = md_chars_to_number (buf, INSN_SIZE);
17008
17009 /* If the instruction will fail, see if we can fix things up by
17010 changing the opcode. */
17011 if (newimm == (unsigned int) FAIL
17012 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17013 {
17014 as_bad_where (fixP->fx_file, fixP->fx_line,
17015 _("invalid constant (%lx) after fixup"),
17016 (unsigned long) value);
17017 break;
17018 }
17019
17020 newimm |= (temp & 0xfffff000);
17021 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17022 break;
17023
17024 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17025 {
17026 unsigned int highpart = 0;
17027 unsigned int newinsn = 0xe1a00000; /* nop. */
17028
17029 newimm = encode_arm_immediate (value);
17030 temp = md_chars_to_number (buf, INSN_SIZE);
17031
17032 /* If the instruction will fail, see if we can fix things up by
17033 changing the opcode. */
17034 if (newimm == (unsigned int) FAIL
17035 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17036 {
17037 /* No ? OK - try using two ADD instructions to generate
17038 the value. */
17039 newimm = validate_immediate_twopart (value, & highpart);
17040
17041 /* Yes - then make sure that the second instruction is
17042 also an add. */
17043 if (newimm != (unsigned int) FAIL)
17044 newinsn = temp;
17045 /* Still No ? Try using a negated value. */
17046 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17047 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17048 /* Otherwise - give up. */
17049 else
17050 {
17051 as_bad_where (fixP->fx_file, fixP->fx_line,
17052 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17053 (long) value);
17054 break;
17055 }
17056
17057 /* Replace the first operand in the 2nd instruction (which
17058 is the PC) with the destination register. We have
17059 already added in the PC in the first instruction and we
17060 do not want to do it again. */
17061 newinsn &= ~ 0xf0000;
17062 newinsn |= ((newinsn & 0x0f000) << 4);
17063 }
17064
17065 newimm |= (temp & 0xfffff000);
17066 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17067
17068 highpart |= (newinsn & 0xfffff000);
17069 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17070 }
17071 break;
17072
17073 case BFD_RELOC_ARM_OFFSET_IMM:
17074 if (!fixP->fx_done && seg->use_rela_p)
17075 value = 0;
17076
17077 case BFD_RELOC_ARM_LITERAL:
17078 sign = value >= 0;
17079
17080 if (value < 0)
17081 value = - value;
17082
17083 if (validate_offset_imm (value, 0) == FAIL)
17084 {
17085 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17086 as_bad_where (fixP->fx_file, fixP->fx_line,
17087 _("invalid literal constant: pool needs to be closer"));
17088 else
17089 as_bad_where (fixP->fx_file, fixP->fx_line,
17090 _("bad immediate value for offset (%ld)"),
17091 (long) value);
17092 break;
17093 }
17094
17095 newval = md_chars_to_number (buf, INSN_SIZE);
17096 newval &= 0xff7ff000;
17097 newval |= value | (sign ? INDEX_UP : 0);
17098 md_number_to_chars (buf, newval, INSN_SIZE);
17099 break;
17100
17101 case BFD_RELOC_ARM_OFFSET_IMM8:
17102 case BFD_RELOC_ARM_HWLITERAL:
17103 sign = value >= 0;
17104
17105 if (value < 0)
17106 value = - value;
17107
17108 if (validate_offset_imm (value, 1) == FAIL)
17109 {
17110 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17111 as_bad_where (fixP->fx_file, fixP->fx_line,
17112 _("invalid literal constant: pool needs to be closer"));
17113 else
17114 as_bad (_("bad immediate value for half-word offset (%ld)"),
17115 (long) value);
17116 break;
17117 }
17118
17119 newval = md_chars_to_number (buf, INSN_SIZE);
17120 newval &= 0xff7ff0f0;
17121 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17122 md_number_to_chars (buf, newval, INSN_SIZE);
17123 break;
17124
17125 case BFD_RELOC_ARM_T32_OFFSET_U8:
17126 if (value < 0 || value > 1020 || value % 4 != 0)
17127 as_bad_where (fixP->fx_file, fixP->fx_line,
17128 _("bad immediate value for offset (%ld)"), (long) value);
17129 value /= 4;
17130
17131 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17132 newval |= value;
17133 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17134 break;
17135
17136 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17137 /* This is a complicated relocation used for all varieties of Thumb32
17138 load/store instruction with immediate offset:
17139
17140 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17141 *4, optional writeback(W)
17142 (doubleword load/store)
17143
17144 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17145 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17146 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17147 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17148 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17149
17150 Uppercase letters indicate bits that are already encoded at
17151 this point. Lowercase letters are our problem. For the
17152 second block of instructions, the secondary opcode nybble
17153 (bits 8..11) is present, and bit 23 is zero, even if this is
17154 a PC-relative operation. */
17155 newval = md_chars_to_number (buf, THUMB_SIZE);
17156 newval <<= 16;
17157 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17158
17159 if ((newval & 0xf0000000) == 0xe0000000)
17160 {
17161 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17162 if (value >= 0)
17163 newval |= (1 << 23);
17164 else
17165 value = -value;
17166 if (value % 4 != 0)
17167 {
17168 as_bad_where (fixP->fx_file, fixP->fx_line,
17169 _("offset not a multiple of 4"));
17170 break;
17171 }
17172 value /= 4;
17173 if (value > 0xff)
17174 {
17175 as_bad_where (fixP->fx_file, fixP->fx_line,
17176 _("offset out of range"));
17177 break;
17178 }
17179 newval &= ~0xff;
17180 }
17181 else if ((newval & 0x000f0000) == 0x000f0000)
17182 {
17183 /* PC-relative, 12-bit offset. */
17184 if (value >= 0)
17185 newval |= (1 << 23);
17186 else
17187 value = -value;
17188 if (value > 0xfff)
17189 {
17190 as_bad_where (fixP->fx_file, fixP->fx_line,
17191 _("offset out of range"));
17192 break;
17193 }
17194 newval &= ~0xfff;
17195 }
17196 else if ((newval & 0x00000100) == 0x00000100)
17197 {
17198 /* Writeback: 8-bit, +/- offset. */
17199 if (value >= 0)
17200 newval |= (1 << 9);
17201 else
17202 value = -value;
17203 if (value > 0xff)
17204 {
17205 as_bad_where (fixP->fx_file, fixP->fx_line,
17206 _("offset out of range"));
17207 break;
17208 }
17209 newval &= ~0xff;
17210 }
17211 else if ((newval & 0x00000f00) == 0x00000e00)
17212 {
17213 /* T-instruction: positive 8-bit offset. */
17214 if (value < 0 || value > 0xff)
17215 {
17216 as_bad_where (fixP->fx_file, fixP->fx_line,
17217 _("offset out of range"));
17218 break;
17219 }
17220 newval &= ~0xff;
17221 newval |= value;
17222 }
17223 else
17224 {
17225 /* Positive 12-bit or negative 8-bit offset. */
17226 int limit;
17227 if (value >= 0)
17228 {
17229 newval |= (1 << 23);
17230 limit = 0xfff;
17231 }
17232 else
17233 {
17234 value = -value;
17235 limit = 0xff;
17236 }
17237 if (value > limit)
17238 {
17239 as_bad_where (fixP->fx_file, fixP->fx_line,
17240 _("offset out of range"));
17241 break;
17242 }
17243 newval &= ~limit;
17244 }
17245
17246 newval |= value;
17247 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17248 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17249 break;
17250
17251 case BFD_RELOC_ARM_SHIFT_IMM:
17252 newval = md_chars_to_number (buf, INSN_SIZE);
17253 if (((unsigned long) value) > 32
17254 || (value == 32
17255 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17256 {
17257 as_bad_where (fixP->fx_file, fixP->fx_line,
17258 _("shift expression is too large"));
17259 break;
17260 }
17261
17262 if (value == 0)
17263 /* Shifts of zero must be done as lsl. */
17264 newval &= ~0x60;
17265 else if (value == 32)
17266 value = 0;
17267 newval &= 0xfffff07f;
17268 newval |= (value & 0x1f) << 7;
17269 md_number_to_chars (buf, newval, INSN_SIZE);
17270 break;
17271
17272 case BFD_RELOC_ARM_T32_IMMEDIATE:
17273 case BFD_RELOC_ARM_T32_IMM12:
17274 case BFD_RELOC_ARM_T32_ADD_PC12:
17275 /* We claim that this fixup has been processed here,
17276 even if in fact we generate an error because we do
17277 not have a reloc for it, so tc_gen_reloc will reject it. */
17278 fixP->fx_done = 1;
17279
17280 if (fixP->fx_addsy
17281 && ! S_IS_DEFINED (fixP->fx_addsy))
17282 {
17283 as_bad_where (fixP->fx_file, fixP->fx_line,
17284 _("undefined symbol %s used as an immediate value"),
17285 S_GET_NAME (fixP->fx_addsy));
17286 break;
17287 }
17288
17289 newval = md_chars_to_number (buf, THUMB_SIZE);
17290 newval <<= 16;
17291 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
17292
17293 /* FUTURE: Implement analogue of negate_data_op for T32. */
17294 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
17295 {
17296 newimm = encode_thumb32_immediate (value);
17297 if (newimm == (unsigned int) FAIL)
17298 newimm = thumb32_negate_data_op (&newval, value);
17299 }
17300 else
17301 {
17302 /* 12 bit immediate for addw/subw. */
17303 if (value < 0)
17304 {
17305 value = -value;
17306 newval ^= 0x00a00000;
17307 }
17308 if (value > 0xfff)
17309 newimm = (unsigned int) FAIL;
17310 else
17311 newimm = value;
17312 }
17313
17314 if (newimm == (unsigned int)FAIL)
17315 {
17316 as_bad_where (fixP->fx_file, fixP->fx_line,
17317 _("invalid constant (%lx) after fixup"),
17318 (unsigned long) value);
17319 break;
17320 }
17321
17322 newval |= (newimm & 0x800) << 15;
17323 newval |= (newimm & 0x700) << 4;
17324 newval |= (newimm & 0x0ff);
17325
17326 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
17327 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
17328 break;
17329
17330 case BFD_RELOC_ARM_SMC:
17331 if (((unsigned long) value) > 0xffff)
17332 as_bad_where (fixP->fx_file, fixP->fx_line,
17333 _("invalid smc expression"));
17334 newval = md_chars_to_number (buf, INSN_SIZE);
17335 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
17336 md_number_to_chars (buf, newval, INSN_SIZE);
17337 break;
17338
17339 case BFD_RELOC_ARM_SWI:
17340 if (fixP->tc_fix_data != 0)
17341 {
17342 if (((unsigned long) value) > 0xff)
17343 as_bad_where (fixP->fx_file, fixP->fx_line,
17344 _("invalid swi expression"));
17345 newval = md_chars_to_number (buf, THUMB_SIZE);
17346 newval |= value;
17347 md_number_to_chars (buf, newval, THUMB_SIZE);
17348 }
17349 else
17350 {
17351 if (((unsigned long) value) > 0x00ffffff)
17352 as_bad_where (fixP->fx_file, fixP->fx_line,
17353 _("invalid swi expression"));
17354 newval = md_chars_to_number (buf, INSN_SIZE);
17355 newval |= value;
17356 md_number_to_chars (buf, newval, INSN_SIZE);
17357 }
17358 break;
17359
17360 case BFD_RELOC_ARM_MULTI:
17361 if (((unsigned long) value) > 0xffff)
17362 as_bad_where (fixP->fx_file, fixP->fx_line,
17363 _("invalid expression in load/store multiple"));
17364 newval = value | md_chars_to_number (buf, INSN_SIZE);
17365 md_number_to_chars (buf, newval, INSN_SIZE);
17366 break;
17367
17368 #ifdef OBJ_ELF
17369 case BFD_RELOC_ARM_PCREL_CALL:
17370 newval = md_chars_to_number (buf, INSN_SIZE);
17371 if ((newval & 0xf0000000) == 0xf0000000)
17372 temp = 1;
17373 else
17374 temp = 3;
17375 goto arm_branch_common;
17376
17377 case BFD_RELOC_ARM_PCREL_JUMP:
17378 case BFD_RELOC_ARM_PLT32:
17379 #endif
17380 case BFD_RELOC_ARM_PCREL_BRANCH:
17381 temp = 3;
17382 goto arm_branch_common;
17383
17384 case BFD_RELOC_ARM_PCREL_BLX:
17385 temp = 1;
17386 arm_branch_common:
17387 /* We are going to store value (shifted right by two) in the
17388 instruction, in a 24 bit, signed field. Bits 26 through 32 either
17389 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
17390 also be be clear. */
17391 if (value & temp)
17392 as_bad_where (fixP->fx_file, fixP->fx_line,
17393 _("misaligned branch destination"));
17394 if ((value & (offsetT)0xfe000000) != (offsetT)0
17395 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
17396 as_bad_where (fixP->fx_file, fixP->fx_line,
17397 _("branch out of range"));
17398
17399 if (fixP->fx_done || !seg->use_rela_p)
17400 {
17401 newval = md_chars_to_number (buf, INSN_SIZE);
17402 newval |= (value >> 2) & 0x00ffffff;
17403 /* Set the H bit on BLX instructions. */
17404 if (temp == 1)
17405 {
17406 if (value & 2)
17407 newval |= 0x01000000;
17408 else
17409 newval &= ~0x01000000;
17410 }
17411 md_number_to_chars (buf, newval, INSN_SIZE);
17412 }
17413 break;
17414
17415 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
17416 /* CZB can only branch forward. */
17417 if (value & ~0x7e)
17418 as_bad_where (fixP->fx_file, fixP->fx_line,
17419 _("branch out of range"));
17420
17421 if (fixP->fx_done || !seg->use_rela_p)
17422 {
17423 newval = md_chars_to_number (buf, THUMB_SIZE);
17424 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
17425 md_number_to_chars (buf, newval, THUMB_SIZE);
17426 }
17427 break;
17428
17429 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
17430 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
17431 as_bad_where (fixP->fx_file, fixP->fx_line,
17432 _("branch out of range"));
17433
17434 if (fixP->fx_done || !seg->use_rela_p)
17435 {
17436 newval = md_chars_to_number (buf, THUMB_SIZE);
17437 newval |= (value & 0x1ff) >> 1;
17438 md_number_to_chars (buf, newval, THUMB_SIZE);
17439 }
17440 break;
17441
17442 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
17443 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
17444 as_bad_where (fixP->fx_file, fixP->fx_line,
17445 _("branch out of range"));
17446
17447 if (fixP->fx_done || !seg->use_rela_p)
17448 {
17449 newval = md_chars_to_number (buf, THUMB_SIZE);
17450 newval |= (value & 0xfff) >> 1;
17451 md_number_to_chars (buf, newval, THUMB_SIZE);
17452 }
17453 break;
17454
17455 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17456 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
17457 as_bad_where (fixP->fx_file, fixP->fx_line,
17458 _("conditional branch out of range"));
17459
17460 if (fixP->fx_done || !seg->use_rela_p)
17461 {
17462 offsetT newval2;
17463 addressT S, J1, J2, lo, hi;
17464
17465 S = (value & 0x00100000) >> 20;
17466 J2 = (value & 0x00080000) >> 19;
17467 J1 = (value & 0x00040000) >> 18;
17468 hi = (value & 0x0003f000) >> 12;
17469 lo = (value & 0x00000ffe) >> 1;
17470
17471 newval = md_chars_to_number (buf, THUMB_SIZE);
17472 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17473 newval |= (S << 10) | hi;
17474 newval2 |= (J1 << 13) | (J2 << 11) | lo;
17475 md_number_to_chars (buf, newval, THUMB_SIZE);
17476 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
17477 }
17478 break;
17479
17480 case BFD_RELOC_THUMB_PCREL_BLX:
17481 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17482 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
17483 as_bad_where (fixP->fx_file, fixP->fx_line,
17484 _("branch out of range"));
17485
17486 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
17487 /* For a BLX instruction, make sure that the relocation is rounded up
17488 to a word boundary. This follows the semantics of the instruction
17489 which specifies that bit 1 of the target address will come from bit
17490 1 of the base address. */
17491 value = (value + 1) & ~ 1;
17492
17493 if (fixP->fx_done || !seg->use_rela_p)
17494 {
17495 offsetT newval2;
17496
17497 newval = md_chars_to_number (buf, THUMB_SIZE);
17498 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17499 newval |= (value & 0x7fffff) >> 12;
17500 newval2 |= (value & 0xfff) >> 1;
17501 md_number_to_chars (buf, newval, THUMB_SIZE);
17502 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
17503 }
17504 break;
17505
17506 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17507 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
17508 as_bad_where (fixP->fx_file, fixP->fx_line,
17509 _("branch out of range"));
17510
17511 if (fixP->fx_done || !seg->use_rela_p)
17512 {
17513 offsetT newval2;
17514 addressT S, I1, I2, lo, hi;
17515
17516 S = (value & 0x01000000) >> 24;
17517 I1 = (value & 0x00800000) >> 23;
17518 I2 = (value & 0x00400000) >> 22;
17519 hi = (value & 0x003ff000) >> 12;
17520 lo = (value & 0x00000ffe) >> 1;
17521
17522 I1 = !(I1 ^ S);
17523 I2 = !(I2 ^ S);
17524
17525 newval = md_chars_to_number (buf, THUMB_SIZE);
17526 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17527 newval |= (S << 10) | hi;
17528 newval2 |= (I1 << 13) | (I2 << 11) | lo;
17529 md_number_to_chars (buf, newval, THUMB_SIZE);
17530 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
17531 }
17532 break;
17533
17534 case BFD_RELOC_8:
17535 if (fixP->fx_done || !seg->use_rela_p)
17536 md_number_to_chars (buf, value, 1);
17537 break;
17538
17539 case BFD_RELOC_16:
17540 if (fixP->fx_done || !seg->use_rela_p)
17541 md_number_to_chars (buf, value, 2);
17542 break;
17543
17544 #ifdef OBJ_ELF
17545 case BFD_RELOC_ARM_TLS_GD32:
17546 case BFD_RELOC_ARM_TLS_LE32:
17547 case BFD_RELOC_ARM_TLS_IE32:
17548 case BFD_RELOC_ARM_TLS_LDM32:
17549 case BFD_RELOC_ARM_TLS_LDO32:
17550 S_SET_THREAD_LOCAL (fixP->fx_addsy);
17551 /* fall through */
17552
17553 case BFD_RELOC_ARM_GOT32:
17554 case BFD_RELOC_ARM_GOTOFF:
17555 case BFD_RELOC_ARM_TARGET2:
17556 if (fixP->fx_done || !seg->use_rela_p)
17557 md_number_to_chars (buf, 0, 4);
17558 break;
17559 #endif
17560
17561 case BFD_RELOC_RVA:
17562 case BFD_RELOC_32:
17563 case BFD_RELOC_ARM_TARGET1:
17564 case BFD_RELOC_ARM_ROSEGREL32:
17565 case BFD_RELOC_ARM_SBREL32:
17566 case BFD_RELOC_32_PCREL:
17567 if (fixP->fx_done || !seg->use_rela_p)
17568 #ifdef TE_WINCE
17569 /* For WinCE we only do this for pcrel fixups. */
17570 if (fixP->fx_done || fixP->fx_pcrel)
17571 #endif
17572 md_number_to_chars (buf, value, 4);
17573 break;
17574
17575 #ifdef OBJ_ELF
17576 case BFD_RELOC_ARM_PREL31:
17577 if (fixP->fx_done || !seg->use_rela_p)
17578 {
17579 newval = md_chars_to_number (buf, 4) & 0x80000000;
17580 if ((value ^ (value >> 1)) & 0x40000000)
17581 {
17582 as_bad_where (fixP->fx_file, fixP->fx_line,
17583 _("rel31 relocation overflow"));
17584 }
17585 newval |= value & 0x7fffffff;
17586 md_number_to_chars (buf, newval, 4);
17587 }
17588 break;
17589 #endif
17590
17591 case BFD_RELOC_ARM_CP_OFF_IMM:
17592 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17593 if (value < -1023 || value > 1023 || (value & 3))
17594 as_bad_where (fixP->fx_file, fixP->fx_line,
17595 _("co-processor offset out of range"));
17596 cp_off_common:
17597 sign = value >= 0;
17598 if (value < 0)
17599 value = -value;
17600 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
17601 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
17602 newval = md_chars_to_number (buf, INSN_SIZE);
17603 else
17604 newval = get_thumb32_insn (buf);
17605 newval &= 0xff7fff00;
17606 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
17607 if (value == 0)
17608 newval &= ~WRITE_BACK;
17609 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
17610 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
17611 md_number_to_chars (buf, newval, INSN_SIZE);
17612 else
17613 put_thumb32_insn (buf, newval);
17614 break;
17615
17616 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
17617 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
17618 if (value < -255 || value > 255)
17619 as_bad_where (fixP->fx_file, fixP->fx_line,
17620 _("co-processor offset out of range"));
17621 value *= 4;
17622 goto cp_off_common;
17623
17624 case BFD_RELOC_ARM_THUMB_OFFSET:
17625 newval = md_chars_to_number (buf, THUMB_SIZE);
17626 /* Exactly what ranges, and where the offset is inserted depends
17627 on the type of instruction, we can establish this from the
17628 top 4 bits. */
17629 switch (newval >> 12)
17630 {
17631 case 4: /* PC load. */
17632 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
17633 forced to zero for these loads; md_pcrel_from has already
17634 compensated for this. */
17635 if (value & 3)
17636 as_bad_where (fixP->fx_file, fixP->fx_line,
17637 _("invalid offset, target not word aligned (0x%08lX)"),
17638 (((unsigned long) fixP->fx_frag->fr_address
17639 + (unsigned long) fixP->fx_where) & ~3)
17640 + (unsigned long) value);
17641
17642 if (value & ~0x3fc)
17643 as_bad_where (fixP->fx_file, fixP->fx_line,
17644 _("invalid offset, value too big (0x%08lX)"),
17645 (long) value);
17646
17647 newval |= value >> 2;
17648 break;
17649
17650 case 9: /* SP load/store. */
17651 if (value & ~0x3fc)
17652 as_bad_where (fixP->fx_file, fixP->fx_line,
17653 _("invalid offset, value too big (0x%08lX)"),
17654 (long) value);
17655 newval |= value >> 2;
17656 break;
17657
17658 case 6: /* Word load/store. */
17659 if (value & ~0x7c)
17660 as_bad_where (fixP->fx_file, fixP->fx_line,
17661 _("invalid offset, value too big (0x%08lX)"),
17662 (long) value);
17663 newval |= value << 4; /* 6 - 2. */
17664 break;
17665
17666 case 7: /* Byte load/store. */
17667 if (value & ~0x1f)
17668 as_bad_where (fixP->fx_file, fixP->fx_line,
17669 _("invalid offset, value too big (0x%08lX)"),
17670 (long) value);
17671 newval |= value << 6;
17672 break;
17673
17674 case 8: /* Halfword load/store. */
17675 if (value & ~0x3e)
17676 as_bad_where (fixP->fx_file, fixP->fx_line,
17677 _("invalid offset, value too big (0x%08lX)"),
17678 (long) value);
17679 newval |= value << 5; /* 6 - 1. */
17680 break;
17681
17682 default:
17683 as_bad_where (fixP->fx_file, fixP->fx_line,
17684 "Unable to process relocation for thumb opcode: %lx",
17685 (unsigned long) newval);
17686 break;
17687 }
17688 md_number_to_chars (buf, newval, THUMB_SIZE);
17689 break;
17690
17691 case BFD_RELOC_ARM_THUMB_ADD:
17692 /* This is a complicated relocation, since we use it for all of
17693 the following immediate relocations:
17694
17695 3bit ADD/SUB
17696 8bit ADD/SUB
17697 9bit ADD/SUB SP word-aligned
17698 10bit ADD PC/SP word-aligned
17699
17700 The type of instruction being processed is encoded in the
17701 instruction field:
17702
17703 0x8000 SUB
17704 0x00F0 Rd
17705 0x000F Rs
17706 */
17707 newval = md_chars_to_number (buf, THUMB_SIZE);
17708 {
17709 int rd = (newval >> 4) & 0xf;
17710 int rs = newval & 0xf;
17711 int subtract = !!(newval & 0x8000);
17712
17713 /* Check for HI regs, only very restricted cases allowed:
17714 Adjusting SP, and using PC or SP to get an address. */
17715 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
17716 || (rs > 7 && rs != REG_SP && rs != REG_PC))
17717 as_bad_where (fixP->fx_file, fixP->fx_line,
17718 _("invalid Hi register with immediate"));
17719
17720 /* If value is negative, choose the opposite instruction. */
17721 if (value < 0)
17722 {
17723 value = -value;
17724 subtract = !subtract;
17725 if (value < 0)
17726 as_bad_where (fixP->fx_file, fixP->fx_line,
17727 _("immediate value out of range"));
17728 }
17729
17730 if (rd == REG_SP)
17731 {
17732 if (value & ~0x1fc)
17733 as_bad_where (fixP->fx_file, fixP->fx_line,
17734 _("invalid immediate for stack address calculation"));
17735 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
17736 newval |= value >> 2;
17737 }
17738 else if (rs == REG_PC || rs == REG_SP)
17739 {
17740 if (subtract || value & ~0x3fc)
17741 as_bad_where (fixP->fx_file, fixP->fx_line,
17742 _("invalid immediate for address calculation (value = 0x%08lX)"),
17743 (unsigned long) value);
17744 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
17745 newval |= rd << 8;
17746 newval |= value >> 2;
17747 }
17748 else if (rs == rd)
17749 {
17750 if (value & ~0xff)
17751 as_bad_where (fixP->fx_file, fixP->fx_line,
17752 _("immediate value out of range"));
17753 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
17754 newval |= (rd << 8) | value;
17755 }
17756 else
17757 {
17758 if (value & ~0x7)
17759 as_bad_where (fixP->fx_file, fixP->fx_line,
17760 _("immediate value out of range"));
17761 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
17762 newval |= rd | (rs << 3) | (value << 6);
17763 }
17764 }
17765 md_number_to_chars (buf, newval, THUMB_SIZE);
17766 break;
17767
17768 case BFD_RELOC_ARM_THUMB_IMM:
17769 newval = md_chars_to_number (buf, THUMB_SIZE);
17770 if (value < 0 || value > 255)
17771 as_bad_where (fixP->fx_file, fixP->fx_line,
17772 _("invalid immediate: %ld is too large"),
17773 (long) value);
17774 newval |= value;
17775 md_number_to_chars (buf, newval, THUMB_SIZE);
17776 break;
17777
17778 case BFD_RELOC_ARM_THUMB_SHIFT:
17779 /* 5bit shift value (0..32). LSL cannot take 32. */
17780 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
17781 temp = newval & 0xf800;
17782 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
17783 as_bad_where (fixP->fx_file, fixP->fx_line,
17784 _("invalid shift value: %ld"), (long) value);
17785 /* Shifts of zero must be encoded as LSL. */
17786 if (value == 0)
17787 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
17788 /* Shifts of 32 are encoded as zero. */
17789 else if (value == 32)
17790 value = 0;
17791 newval |= value << 6;
17792 md_number_to_chars (buf, newval, THUMB_SIZE);
17793 break;
17794
17795 case BFD_RELOC_VTABLE_INHERIT:
17796 case BFD_RELOC_VTABLE_ENTRY:
17797 fixP->fx_done = 0;
17798 return;
17799
17800 case BFD_RELOC_ARM_MOVW:
17801 case BFD_RELOC_ARM_MOVT:
17802 case BFD_RELOC_ARM_THUMB_MOVW:
17803 case BFD_RELOC_ARM_THUMB_MOVT:
17804 if (fixP->fx_done || !seg->use_rela_p)
17805 {
17806 /* REL format relocations are limited to a 16-bit addend. */
17807 if (!fixP->fx_done)
17808 {
17809 if (value < -0x1000 || value > 0xffff)
17810 as_bad_where (fixP->fx_file, fixP->fx_line,
17811 _("offset too big"));
17812 }
17813 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
17814 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
17815 {
17816 value >>= 16;
17817 }
17818
17819 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
17820 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
17821 {
17822 newval = get_thumb32_insn (buf);
17823 newval &= 0xfbf08f00;
17824 newval |= (value & 0xf000) << 4;
17825 newval |= (value & 0x0800) << 15;
17826 newval |= (value & 0x0700) << 4;
17827 newval |= (value & 0x00ff);
17828 put_thumb32_insn (buf, newval);
17829 }
17830 else
17831 {
17832 newval = md_chars_to_number (buf, 4);
17833 newval &= 0xfff0f000;
17834 newval |= value & 0x0fff;
17835 newval |= (value & 0xf000) << 4;
17836 md_number_to_chars (buf, newval, 4);
17837 }
17838 }
17839 return;
17840
17841 case BFD_RELOC_UNUSED:
17842 default:
17843 as_bad_where (fixP->fx_file, fixP->fx_line,
17844 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
17845 }
17846 }
17847
17848 /* Translate internal representation of relocation info to BFD target
17849 format. */
17850
17851 arelent *
17852 tc_gen_reloc (asection *section, fixS *fixp)
17853 {
17854 arelent * reloc;
17855 bfd_reloc_code_real_type code;
17856
17857 reloc = xmalloc (sizeof (arelent));
17858
17859 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
17860 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
17861 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
17862
17863 if (fixp->fx_pcrel)
17864 {
17865 if (section->use_rela_p)
17866 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
17867 else
17868 fixp->fx_offset = reloc->address;
17869 }
17870 reloc->addend = fixp->fx_offset;
17871
17872 switch (fixp->fx_r_type)
17873 {
17874 case BFD_RELOC_8:
17875 if (fixp->fx_pcrel)
17876 {
17877 code = BFD_RELOC_8_PCREL;
17878 break;
17879 }
17880
17881 case BFD_RELOC_16:
17882 if (fixp->fx_pcrel)
17883 {
17884 code = BFD_RELOC_16_PCREL;
17885 break;
17886 }
17887
17888 case BFD_RELOC_32:
17889 if (fixp->fx_pcrel)
17890 {
17891 code = BFD_RELOC_32_PCREL;
17892 break;
17893 }
17894
17895 case BFD_RELOC_ARM_MOVW:
17896 if (fixp->fx_pcrel)
17897 {
17898 code = BFD_RELOC_ARM_MOVW_PCREL;
17899 break;
17900 }
17901
17902 case BFD_RELOC_ARM_MOVT:
17903 if (fixp->fx_pcrel)
17904 {
17905 code = BFD_RELOC_ARM_MOVT_PCREL;
17906 break;
17907 }
17908
17909 case BFD_RELOC_ARM_THUMB_MOVW:
17910 if (fixp->fx_pcrel)
17911 {
17912 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
17913 break;
17914 }
17915
17916 case BFD_RELOC_ARM_THUMB_MOVT:
17917 if (fixp->fx_pcrel)
17918 {
17919 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
17920 break;
17921 }
17922
17923 case BFD_RELOC_NONE:
17924 case BFD_RELOC_ARM_PCREL_BRANCH:
17925 case BFD_RELOC_ARM_PCREL_BLX:
17926 case BFD_RELOC_RVA:
17927 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17928 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17929 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17930 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17931 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17932 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17933 case BFD_RELOC_THUMB_PCREL_BLX:
17934 case BFD_RELOC_VTABLE_ENTRY:
17935 case BFD_RELOC_VTABLE_INHERIT:
17936 code = fixp->fx_r_type;
17937 break;
17938
17939 case BFD_RELOC_ARM_LITERAL:
17940 case BFD_RELOC_ARM_HWLITERAL:
17941 /* If this is called then the a literal has
17942 been referenced across a section boundary. */
17943 as_bad_where (fixp->fx_file, fixp->fx_line,
17944 _("literal referenced across section boundary"));
17945 return NULL;
17946
17947 #ifdef OBJ_ELF
17948 case BFD_RELOC_ARM_GOT32:
17949 case BFD_RELOC_ARM_GOTOFF:
17950 case BFD_RELOC_ARM_PLT32:
17951 case BFD_RELOC_ARM_TARGET1:
17952 case BFD_RELOC_ARM_ROSEGREL32:
17953 case BFD_RELOC_ARM_SBREL32:
17954 case BFD_RELOC_ARM_PREL31:
17955 case BFD_RELOC_ARM_TARGET2:
17956 case BFD_RELOC_ARM_TLS_LE32:
17957 case BFD_RELOC_ARM_TLS_LDO32:
17958 case BFD_RELOC_ARM_PCREL_CALL:
17959 case BFD_RELOC_ARM_PCREL_JUMP:
17960 code = fixp->fx_r_type;
17961 break;
17962
17963 case BFD_RELOC_ARM_TLS_GD32:
17964 case BFD_RELOC_ARM_TLS_IE32:
17965 case BFD_RELOC_ARM_TLS_LDM32:
17966 /* BFD will include the symbol's address in the addend.
17967 But we don't want that, so subtract it out again here. */
17968 if (!S_IS_COMMON (fixp->fx_addsy))
17969 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
17970 code = fixp->fx_r_type;
17971 break;
17972 #endif
17973
17974 case BFD_RELOC_ARM_IMMEDIATE:
17975 as_bad_where (fixp->fx_file, fixp->fx_line,
17976 _("internal relocation (type: IMMEDIATE) not fixed up"));
17977 return NULL;
17978
17979 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17980 as_bad_where (fixp->fx_file, fixp->fx_line,
17981 _("ADRL used for a symbol not defined in the same file"));
17982 return NULL;
17983
17984 case BFD_RELOC_ARM_OFFSET_IMM:
17985 if (section->use_rela_p)
17986 {
17987 code = fixp->fx_r_type;
17988 break;
17989 }
17990
17991 if (fixp->fx_addsy != NULL
17992 && !S_IS_DEFINED (fixp->fx_addsy)
17993 && S_IS_LOCAL (fixp->fx_addsy))
17994 {
17995 as_bad_where (fixp->fx_file, fixp->fx_line,
17996 _("undefined local label `%s'"),
17997 S_GET_NAME (fixp->fx_addsy));
17998 return NULL;
17999 }
18000
18001 as_bad_where (fixp->fx_file, fixp->fx_line,
18002 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18003 return NULL;
18004
18005 default:
18006 {
18007 char * type;
18008
18009 switch (fixp->fx_r_type)
18010 {
18011 case BFD_RELOC_NONE: type = "NONE"; break;
18012 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18013 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18014 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18015 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18016 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18017 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18018 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18019 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18020 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18021 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18022 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18023 default: type = _("<unknown>"); break;
18024 }
18025 as_bad_where (fixp->fx_file, fixp->fx_line,
18026 _("cannot represent %s relocation in this object file format"),
18027 type);
18028 return NULL;
18029 }
18030 }
18031
18032 #ifdef OBJ_ELF
18033 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18034 && GOT_symbol
18035 && fixp->fx_addsy == GOT_symbol)
18036 {
18037 code = BFD_RELOC_ARM_GOTPC;
18038 reloc->addend = fixp->fx_offset = reloc->address;
18039 }
18040 #endif
18041
18042 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18043
18044 if (reloc->howto == NULL)
18045 {
18046 as_bad_where (fixp->fx_file, fixp->fx_line,
18047 _("cannot represent %s relocation in this object file format"),
18048 bfd_get_reloc_code_name (code));
18049 return NULL;
18050 }
18051
18052 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18053 vtable entry to be used in the relocation's section offset. */
18054 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18055 reloc->address = fixp->fx_offset;
18056
18057 return reloc;
18058 }
18059
18060 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18061
18062 void
18063 cons_fix_new_arm (fragS * frag,
18064 int where,
18065 int size,
18066 expressionS * exp)
18067 {
18068 bfd_reloc_code_real_type type;
18069 int pcrel = 0;
18070
18071 /* Pick a reloc.
18072 FIXME: @@ Should look at CPU word size. */
18073 switch (size)
18074 {
18075 case 1:
18076 type = BFD_RELOC_8;
18077 break;
18078 case 2:
18079 type = BFD_RELOC_16;
18080 break;
18081 case 4:
18082 default:
18083 type = BFD_RELOC_32;
18084 break;
18085 case 8:
18086 type = BFD_RELOC_64;
18087 break;
18088 }
18089
18090 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
18091 }
18092
18093 #if defined OBJ_COFF || defined OBJ_ELF
18094 void
18095 arm_validate_fix (fixS * fixP)
18096 {
18097 /* If the destination of the branch is a defined symbol which does not have
18098 the THUMB_FUNC attribute, then we must be calling a function which has
18099 the (interfacearm) attribute. We look for the Thumb entry point to that
18100 function and change the branch to refer to that function instead. */
18101 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
18102 && fixP->fx_addsy != NULL
18103 && S_IS_DEFINED (fixP->fx_addsy)
18104 && ! THUMB_IS_FUNC (fixP->fx_addsy))
18105 {
18106 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
18107 }
18108 }
18109 #endif
18110
18111 int
18112 arm_force_relocation (struct fix * fixp)
18113 {
18114 #if defined (OBJ_COFF) && defined (TE_PE)
18115 if (fixp->fx_r_type == BFD_RELOC_RVA)
18116 return 1;
18117 #endif
18118
18119 /* Resolve these relocations even if the symbol is extern or weak. */
18120 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
18121 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
18122 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
18123 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18124 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
18125 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
18126 return 0;
18127
18128 return generic_force_reloc (fixp);
18129 }
18130
18131 #ifdef OBJ_COFF
18132 bfd_boolean
18133 arm_fix_adjustable (fixS * fixP)
18134 {
18135 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
18136 local labels from being added to the output symbol table when they
18137 are used with the ADRL pseudo op. The ADRL relocation should always
18138 be resolved before the binbary is emitted, so it is safe to say that
18139 it is adjustable. */
18140 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
18141 return 1;
18142
18143 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
18144 to be cloned, and without this test relocs would still be generated
18145 against the original, pre-cloned symbol. Such symbols would not appear
18146 in the symbol table however, and so a valid reloc could not be
18147 generated. So check to see if the fixup is against a symbol which has
18148 been removed from the symbol chain, and if it is, then allow it to be
18149 adjusted into a reloc against a section symbol. */
18150 if (fixP->fx_addsy != NULL
18151 && ! S_IS_LOCAL (fixP->fx_addsy)
18152 && symbol_next (fixP->fx_addsy) == NULL
18153 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
18154 return 1;
18155
18156 return 0;
18157 }
18158 #endif
18159
18160 #ifdef OBJ_ELF
18161 /* Relocations against function names must be left unadjusted,
18162 so that the linker can use this information to generate interworking
18163 stubs. The MIPS version of this function
18164 also prevents relocations that are mips-16 specific, but I do not
18165 know why it does this.
18166
18167 FIXME:
18168 There is one other problem that ought to be addressed here, but
18169 which currently is not: Taking the address of a label (rather
18170 than a function) and then later jumping to that address. Such
18171 addresses also ought to have their bottom bit set (assuming that
18172 they reside in Thumb code), but at the moment they will not. */
18173
18174 bfd_boolean
18175 arm_fix_adjustable (fixS * fixP)
18176 {
18177 if (fixP->fx_addsy == NULL)
18178 return 1;
18179
18180 /* Preserve relocations against symbols with function type. */
18181 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
18182 return 0;
18183
18184 if (THUMB_IS_FUNC (fixP->fx_addsy)
18185 && fixP->fx_subsy == NULL)
18186 return 0;
18187
18188 /* We need the symbol name for the VTABLE entries. */
18189 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
18190 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18191 return 0;
18192
18193 /* Don't allow symbols to be discarded on GOT related relocs. */
18194 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
18195 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
18196 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
18197 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
18198 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
18199 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
18200 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
18201 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
18202 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
18203 return 0;
18204
18205 return 1;
18206 }
18207
18208 const char *
18209 elf32_arm_target_format (void)
18210 {
18211 #ifdef TE_SYMBIAN
18212 return (target_big_endian
18213 ? "elf32-bigarm-symbian"
18214 : "elf32-littlearm-symbian");
18215 #elif defined (TE_VXWORKS)
18216 return (target_big_endian
18217 ? "elf32-bigarm-vxworks"
18218 : "elf32-littlearm-vxworks");
18219 #else
18220 if (target_big_endian)
18221 return "elf32-bigarm";
18222 else
18223 return "elf32-littlearm";
18224 #endif
18225 }
18226
18227 void
18228 armelf_frob_symbol (symbolS * symp,
18229 int * puntp)
18230 {
18231 elf_frob_symbol (symp, puntp);
18232 }
18233 #endif
18234
18235 /* MD interface: Finalization. */
18236
18237 /* A good place to do this, although this was probably not intended
18238 for this kind of use. We need to dump the literal pool before
18239 references are made to a null symbol pointer. */
18240
18241 void
18242 arm_cleanup (void)
18243 {
18244 literal_pool * pool;
18245
18246 for (pool = list_of_pools; pool; pool = pool->next)
18247 {
18248 /* Put it at the end of the relevent section. */
18249 subseg_set (pool->section, pool->sub_section);
18250 #ifdef OBJ_ELF
18251 arm_elf_change_section ();
18252 #endif
18253 s_ltorg (0);
18254 }
18255 }
18256
18257 /* Adjust the symbol table. This marks Thumb symbols as distinct from
18258 ARM ones. */
18259
18260 void
18261 arm_adjust_symtab (void)
18262 {
18263 #ifdef OBJ_COFF
18264 symbolS * sym;
18265
18266 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
18267 {
18268 if (ARM_IS_THUMB (sym))
18269 {
18270 if (THUMB_IS_FUNC (sym))
18271 {
18272 /* Mark the symbol as a Thumb function. */
18273 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
18274 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
18275 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
18276
18277 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
18278 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
18279 else
18280 as_bad (_("%s: unexpected function type: %d"),
18281 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
18282 }
18283 else switch (S_GET_STORAGE_CLASS (sym))
18284 {
18285 case C_EXT:
18286 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
18287 break;
18288 case C_STAT:
18289 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
18290 break;
18291 case C_LABEL:
18292 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
18293 break;
18294 default:
18295 /* Do nothing. */
18296 break;
18297 }
18298 }
18299
18300 if (ARM_IS_INTERWORK (sym))
18301 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
18302 }
18303 #endif
18304 #ifdef OBJ_ELF
18305 symbolS * sym;
18306 char bind;
18307
18308 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
18309 {
18310 if (ARM_IS_THUMB (sym))
18311 {
18312 elf_symbol_type * elf_sym;
18313
18314 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
18315 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
18316
18317 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
18318 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
18319 {
18320 /* If it's a .thumb_func, declare it as so,
18321 otherwise tag label as .code 16. */
18322 if (THUMB_IS_FUNC (sym))
18323 elf_sym->internal_elf_sym.st_info =
18324 ELF_ST_INFO (bind, STT_ARM_TFUNC);
18325 else
18326 elf_sym->internal_elf_sym.st_info =
18327 ELF_ST_INFO (bind, STT_ARM_16BIT);
18328 }
18329 }
18330 }
18331 #endif
18332 }
18333
18334 /* MD interface: Initialization. */
18335
18336 static void
18337 set_constant_flonums (void)
18338 {
18339 int i;
18340
18341 for (i = 0; i < NUM_FLOAT_VALS; i++)
18342 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
18343 abort ();
18344 }
18345
18346 void
18347 md_begin (void)
18348 {
18349 unsigned mach;
18350 unsigned int i;
18351
18352 if ( (arm_ops_hsh = hash_new ()) == NULL
18353 || (arm_cond_hsh = hash_new ()) == NULL
18354 || (arm_shift_hsh = hash_new ()) == NULL
18355 || (arm_psr_hsh = hash_new ()) == NULL
18356 || (arm_v7m_psr_hsh = hash_new ()) == NULL
18357 || (arm_reg_hsh = hash_new ()) == NULL
18358 || (arm_reloc_hsh = hash_new ()) == NULL
18359 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
18360 as_fatal (_("virtual memory exhausted"));
18361
18362 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
18363 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
18364 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
18365 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
18366 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
18367 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
18368 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
18369 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
18370 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
18371 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
18372 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
18373 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
18374 for (i = 0;
18375 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
18376 i++)
18377 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
18378 (PTR) (barrier_opt_names + i));
18379 #ifdef OBJ_ELF
18380 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
18381 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
18382 #endif
18383
18384 set_constant_flonums ();
18385
18386 /* Set the cpu variant based on the command-line options. We prefer
18387 -mcpu= over -march= if both are set (as for GCC); and we prefer
18388 -mfpu= over any other way of setting the floating point unit.
18389 Use of legacy options with new options are faulted. */
18390 if (legacy_cpu)
18391 {
18392 if (mcpu_cpu_opt || march_cpu_opt)
18393 as_bad (_("use of old and new-style options to set CPU type"));
18394
18395 mcpu_cpu_opt = legacy_cpu;
18396 }
18397 else if (!mcpu_cpu_opt)
18398 mcpu_cpu_opt = march_cpu_opt;
18399
18400 if (legacy_fpu)
18401 {
18402 if (mfpu_opt)
18403 as_bad (_("use of old and new-style options to set FPU type"));
18404
18405 mfpu_opt = legacy_fpu;
18406 }
18407 else if (!mfpu_opt)
18408 {
18409 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
18410 /* Some environments specify a default FPU. If they don't, infer it
18411 from the processor. */
18412 if (mcpu_fpu_opt)
18413 mfpu_opt = mcpu_fpu_opt;
18414 else
18415 mfpu_opt = march_fpu_opt;
18416 #else
18417 mfpu_opt = &fpu_default;
18418 #endif
18419 }
18420
18421 if (!mfpu_opt)
18422 {
18423 if (!mcpu_cpu_opt)
18424 mfpu_opt = &fpu_default;
18425 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
18426 mfpu_opt = &fpu_arch_vfp_v2;
18427 else
18428 mfpu_opt = &fpu_arch_fpa;
18429 }
18430
18431 #ifdef CPU_DEFAULT
18432 if (!mcpu_cpu_opt)
18433 {
18434 mcpu_cpu_opt = &cpu_default;
18435 selected_cpu = cpu_default;
18436 }
18437 #else
18438 if (mcpu_cpu_opt)
18439 selected_cpu = *mcpu_cpu_opt;
18440 else
18441 mcpu_cpu_opt = &arm_arch_any;
18442 #endif
18443
18444 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18445
18446 arm_arch_used = thumb_arch_used = arm_arch_none;
18447
18448 #if defined OBJ_COFF || defined OBJ_ELF
18449 {
18450 unsigned int flags = 0;
18451
18452 #if defined OBJ_ELF
18453 flags = meabi_flags;
18454
18455 switch (meabi_flags)
18456 {
18457 case EF_ARM_EABI_UNKNOWN:
18458 #endif
18459 /* Set the flags in the private structure. */
18460 if (uses_apcs_26) flags |= F_APCS26;
18461 if (support_interwork) flags |= F_INTERWORK;
18462 if (uses_apcs_float) flags |= F_APCS_FLOAT;
18463 if (pic_code) flags |= F_PIC;
18464 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
18465 flags |= F_SOFT_FLOAT;
18466
18467 switch (mfloat_abi_opt)
18468 {
18469 case ARM_FLOAT_ABI_SOFT:
18470 case ARM_FLOAT_ABI_SOFTFP:
18471 flags |= F_SOFT_FLOAT;
18472 break;
18473
18474 case ARM_FLOAT_ABI_HARD:
18475 if (flags & F_SOFT_FLOAT)
18476 as_bad (_("hard-float conflicts with specified fpu"));
18477 break;
18478 }
18479
18480 /* Using pure-endian doubles (even if soft-float). */
18481 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
18482 flags |= F_VFP_FLOAT;
18483
18484 #if defined OBJ_ELF
18485 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
18486 flags |= EF_ARM_MAVERICK_FLOAT;
18487 break;
18488
18489 case EF_ARM_EABI_VER4:
18490 case EF_ARM_EABI_VER5:
18491 /* No additional flags to set. */
18492 break;
18493
18494 default:
18495 abort ();
18496 }
18497 #endif
18498 bfd_set_private_flags (stdoutput, flags);
18499
18500 /* We have run out flags in the COFF header to encode the
18501 status of ATPCS support, so instead we create a dummy,
18502 empty, debug section called .arm.atpcs. */
18503 if (atpcs)
18504 {
18505 asection * sec;
18506
18507 sec = bfd_make_section (stdoutput, ".arm.atpcs");
18508
18509 if (sec != NULL)
18510 {
18511 bfd_set_section_flags
18512 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
18513 bfd_set_section_size (stdoutput, sec, 0);
18514 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
18515 }
18516 }
18517 }
18518 #endif
18519
18520 /* Record the CPU type as well. */
18521 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
18522 mach = bfd_mach_arm_iWMMXt;
18523 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
18524 mach = bfd_mach_arm_XScale;
18525 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
18526 mach = bfd_mach_arm_ep9312;
18527 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
18528 mach = bfd_mach_arm_5TE;
18529 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
18530 {
18531 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
18532 mach = bfd_mach_arm_5T;
18533 else
18534 mach = bfd_mach_arm_5;
18535 }
18536 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
18537 {
18538 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
18539 mach = bfd_mach_arm_4T;
18540 else
18541 mach = bfd_mach_arm_4;
18542 }
18543 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
18544 mach = bfd_mach_arm_3M;
18545 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
18546 mach = bfd_mach_arm_3;
18547 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
18548 mach = bfd_mach_arm_2a;
18549 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
18550 mach = bfd_mach_arm_2;
18551 else
18552 mach = bfd_mach_arm_unknown;
18553
18554 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
18555 }
18556
18557 /* Command line processing. */
18558
18559 /* md_parse_option
18560 Invocation line includes a switch not recognized by the base assembler.
18561 See if it's a processor-specific option.
18562
18563 This routine is somewhat complicated by the need for backwards
18564 compatibility (since older releases of gcc can't be changed).
18565 The new options try to make the interface as compatible as
18566 possible with GCC.
18567
18568 New options (supported) are:
18569
18570 -mcpu=<cpu name> Assemble for selected processor
18571 -march=<architecture name> Assemble for selected architecture
18572 -mfpu=<fpu architecture> Assemble for selected FPU.
18573 -EB/-mbig-endian Big-endian
18574 -EL/-mlittle-endian Little-endian
18575 -k Generate PIC code
18576 -mthumb Start in Thumb mode
18577 -mthumb-interwork Code supports ARM/Thumb interworking
18578
18579 For now we will also provide support for:
18580
18581 -mapcs-32 32-bit Program counter
18582 -mapcs-26 26-bit Program counter
18583 -macps-float Floats passed in FP registers
18584 -mapcs-reentrant Reentrant code
18585 -matpcs
18586 (sometime these will probably be replaced with -mapcs=<list of options>
18587 and -matpcs=<list of options>)
18588
18589 The remaining options are only supported for back-wards compatibility.
18590 Cpu variants, the arm part is optional:
18591 -m[arm]1 Currently not supported.
18592 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
18593 -m[arm]3 Arm 3 processor
18594 -m[arm]6[xx], Arm 6 processors
18595 -m[arm]7[xx][t][[d]m] Arm 7 processors
18596 -m[arm]8[10] Arm 8 processors
18597 -m[arm]9[20][tdmi] Arm 9 processors
18598 -mstrongarm[110[0]] StrongARM processors
18599 -mxscale XScale processors
18600 -m[arm]v[2345[t[e]]] Arm architectures
18601 -mall All (except the ARM1)
18602 FP variants:
18603 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
18604 -mfpe-old (No float load/store multiples)
18605 -mvfpxd VFP Single precision
18606 -mvfp All VFP
18607 -mno-fpu Disable all floating point instructions
18608
18609 The following CPU names are recognized:
18610 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
18611 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
18612 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
18613 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
18614 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
18615 arm10t arm10e, arm1020t, arm1020e, arm10200e,
18616 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
18617
18618 */
18619
18620 const char * md_shortopts = "m:k";
18621
18622 #ifdef ARM_BI_ENDIAN
18623 #define OPTION_EB (OPTION_MD_BASE + 0)
18624 #define OPTION_EL (OPTION_MD_BASE + 1)
18625 #else
18626 #if TARGET_BYTES_BIG_ENDIAN
18627 #define OPTION_EB (OPTION_MD_BASE + 0)
18628 #else
18629 #define OPTION_EL (OPTION_MD_BASE + 1)
18630 #endif
18631 #endif
18632
18633 struct option md_longopts[] =
18634 {
18635 #ifdef OPTION_EB
18636 {"EB", no_argument, NULL, OPTION_EB},
18637 #endif
18638 #ifdef OPTION_EL
18639 {"EL", no_argument, NULL, OPTION_EL},
18640 #endif
18641 {NULL, no_argument, NULL, 0}
18642 };
18643
18644 size_t md_longopts_size = sizeof (md_longopts);
18645
18646 struct arm_option_table
18647 {
18648 char *option; /* Option name to match. */
18649 char *help; /* Help information. */
18650 int *var; /* Variable to change. */
18651 int value; /* What to change it to. */
18652 char *deprecated; /* If non-null, print this message. */
18653 };
18654
18655 struct arm_option_table arm_opts[] =
18656 {
18657 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
18658 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
18659 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
18660 &support_interwork, 1, NULL},
18661 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
18662 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
18663 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
18664 1, NULL},
18665 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
18666 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
18667 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
18668 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
18669 NULL},
18670
18671 /* These are recognized by the assembler, but have no affect on code. */
18672 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
18673 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
18674 {NULL, NULL, NULL, 0, NULL}
18675 };
18676
18677 struct arm_legacy_option_table
18678 {
18679 char *option; /* Option name to match. */
18680 const arm_feature_set **var; /* Variable to change. */
18681 const arm_feature_set value; /* What to change it to. */
18682 char *deprecated; /* If non-null, print this message. */
18683 };
18684
18685 const struct arm_legacy_option_table arm_legacy_opts[] =
18686 {
18687 /* DON'T add any new processors to this list -- we want the whole list
18688 to go away... Add them to the processors table instead. */
18689 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
18690 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
18691 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
18692 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
18693 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
18694 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
18695 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
18696 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
18697 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
18698 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
18699 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
18700 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
18701 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
18702 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
18703 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
18704 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
18705 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
18706 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
18707 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
18708 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
18709 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
18710 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
18711 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
18712 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
18713 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
18714 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
18715 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
18716 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
18717 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
18718 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
18719 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
18720 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
18721 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
18722 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
18723 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
18724 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
18725 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
18726 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
18727 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
18728 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
18729 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
18730 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
18731 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
18732 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
18733 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
18734 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
18735 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
18736 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
18737 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
18738 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
18739 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
18740 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
18741 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
18742 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
18743 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
18744 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
18745 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
18746 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
18747 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
18748 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
18749 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
18750 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
18751 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
18752 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
18753 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
18754 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
18755 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
18756 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
18757 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
18758 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
18759 N_("use -mcpu=strongarm110")},
18760 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
18761 N_("use -mcpu=strongarm1100")},
18762 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
18763 N_("use -mcpu=strongarm1110")},
18764 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
18765 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
18766 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
18767
18768 /* Architecture variants -- don't add any more to this list either. */
18769 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
18770 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
18771 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
18772 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
18773 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
18774 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
18775 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
18776 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
18777 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
18778 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
18779 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
18780 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
18781 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
18782 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
18783 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
18784 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
18785 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
18786 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
18787
18788 /* Floating point variants -- don't add any more to this list either. */
18789 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
18790 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
18791 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
18792 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
18793 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
18794
18795 {NULL, NULL, ARM_ARCH_NONE, NULL}
18796 };
18797
18798 struct arm_cpu_option_table
18799 {
18800 char *name;
18801 const arm_feature_set value;
18802 /* For some CPUs we assume an FPU unless the user explicitly sets
18803 -mfpu=... */
18804 const arm_feature_set default_fpu;
18805 /* The canonical name of the CPU, or NULL to use NAME converted to upper
18806 case. */
18807 const char *canonical_name;
18808 };
18809
18810 /* This list should, at a minimum, contain all the cpu names
18811 recognized by GCC. */
18812 static const struct arm_cpu_option_table arm_cpus[] =
18813 {
18814 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
18815 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
18816 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
18817 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
18818 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
18819 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18820 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18821 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18822 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18823 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18824 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18825 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
18826 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18827 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
18828 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18829 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
18830 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18831 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18832 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18833 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18834 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18835 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18836 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18837 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18838 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18839 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18840 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18841 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
18842 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18843 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18844 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18845 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18846 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18847 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18848 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18849 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18850 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18851 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
18852 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18853 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
18854 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18855 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18856 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18857 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
18858 /* For V5 or later processors we default to using VFP; but the user
18859 should really set the FPU type explicitly. */
18860 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
18861 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18862 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
18863 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
18864 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
18865 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
18866 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
18867 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18868 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
18869 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
18870 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18871 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18872 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
18873 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
18874 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18875 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
18876 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
18877 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18878 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
18879 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
18880 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
18881 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
18882 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
18883 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
18884 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
18885 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
18886 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
18887 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
18888 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
18889 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
18890 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
18891 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
18892 | FPU_NEON_EXT_V1),
18893 NULL},
18894 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
18895 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
18896 /* ??? XSCALE is really an architecture. */
18897 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
18898 /* ??? iwmmxt is not a processor. */
18899 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
18900 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
18901 /* Maverick */
18902 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
18903 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
18904 };
18905
18906 struct arm_arch_option_table
18907 {
18908 char *name;
18909 const arm_feature_set value;
18910 const arm_feature_set default_fpu;
18911 };
18912
18913 /* This list should, at a minimum, contain all the architecture names
18914 recognized by GCC. */
18915 static const struct arm_arch_option_table arm_archs[] =
18916 {
18917 {"all", ARM_ANY, FPU_ARCH_FPA},
18918 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
18919 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
18920 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
18921 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
18922 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
18923 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
18924 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
18925 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
18926 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
18927 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
18928 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
18929 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
18930 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
18931 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
18932 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
18933 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
18934 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
18935 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
18936 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
18937 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
18938 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
18939 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
18940 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
18941 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
18942 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
18943 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
18944 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
18945 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
18946 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
18947 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
18948 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
18949 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
18950 };
18951
18952 /* ISA extensions in the co-processor space. */
18953 struct arm_option_cpu_value_table
18954 {
18955 char *name;
18956 const arm_feature_set value;
18957 };
18958
18959 static const struct arm_option_cpu_value_table arm_extensions[] =
18960 {
18961 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
18962 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
18963 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
18964 {NULL, ARM_ARCH_NONE}
18965 };
18966
18967 /* This list should, at a minimum, contain all the fpu names
18968 recognized by GCC. */
18969 static const struct arm_option_cpu_value_table arm_fpus[] =
18970 {
18971 {"softfpa", FPU_NONE},
18972 {"fpe", FPU_ARCH_FPE},
18973 {"fpe2", FPU_ARCH_FPE},
18974 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
18975 {"fpa", FPU_ARCH_FPA},
18976 {"fpa10", FPU_ARCH_FPA},
18977 {"fpa11", FPU_ARCH_FPA},
18978 {"arm7500fe", FPU_ARCH_FPA},
18979 {"softvfp", FPU_ARCH_VFP},
18980 {"softvfp+vfp", FPU_ARCH_VFP_V2},
18981 {"vfp", FPU_ARCH_VFP_V2},
18982 {"vfp9", FPU_ARCH_VFP_V2},
18983 {"vfp3", FPU_ARCH_VFP_V3},
18984 {"vfp10", FPU_ARCH_VFP_V2},
18985 {"vfp10-r0", FPU_ARCH_VFP_V1},
18986 {"vfpxd", FPU_ARCH_VFP_V1xD},
18987 {"arm1020t", FPU_ARCH_VFP_V1},
18988 {"arm1020e", FPU_ARCH_VFP_V2},
18989 {"arm1136jfs", FPU_ARCH_VFP_V2},
18990 {"arm1136jf-s", FPU_ARCH_VFP_V2},
18991 {"maverick", FPU_ARCH_MAVERICK},
18992 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
18993 {NULL, ARM_ARCH_NONE}
18994 };
18995
18996 struct arm_option_value_table
18997 {
18998 char *name;
18999 long value;
19000 };
19001
19002 static const struct arm_option_value_table arm_float_abis[] =
19003 {
19004 {"hard", ARM_FLOAT_ABI_HARD},
19005 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19006 {"soft", ARM_FLOAT_ABI_SOFT},
19007 {NULL, 0}
19008 };
19009
19010 #ifdef OBJ_ELF
19011 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19012 static const struct arm_option_value_table arm_eabis[] =
19013 {
19014 {"gnu", EF_ARM_EABI_UNKNOWN},
19015 {"4", EF_ARM_EABI_VER4},
19016 {"5", EF_ARM_EABI_VER5},
19017 {NULL, 0}
19018 };
19019 #endif
19020
19021 struct arm_long_option_table
19022 {
19023 char * option; /* Substring to match. */
19024 char * help; /* Help information. */
19025 int (* func) (char * subopt); /* Function to decode sub-option. */
19026 char * deprecated; /* If non-null, print this message. */
19027 };
19028
19029 static int
19030 arm_parse_extension (char * str, const arm_feature_set **opt_p)
19031 {
19032 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
19033
19034 /* Copy the feature set, so that we can modify it. */
19035 *ext_set = **opt_p;
19036 *opt_p = ext_set;
19037
19038 while (str != NULL && *str != 0)
19039 {
19040 const struct arm_option_cpu_value_table * opt;
19041 char * ext;
19042 int optlen;
19043
19044 if (*str != '+')
19045 {
19046 as_bad (_("invalid architectural extension"));
19047 return 0;
19048 }
19049
19050 str++;
19051 ext = strchr (str, '+');
19052
19053 if (ext != NULL)
19054 optlen = ext - str;
19055 else
19056 optlen = strlen (str);
19057
19058 if (optlen == 0)
19059 {
19060 as_bad (_("missing architectural extension"));
19061 return 0;
19062 }
19063
19064 for (opt = arm_extensions; opt->name != NULL; opt++)
19065 if (strncmp (opt->name, str, optlen) == 0)
19066 {
19067 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
19068 break;
19069 }
19070
19071 if (opt->name == NULL)
19072 {
19073 as_bad (_("unknown architectural extnsion `%s'"), str);
19074 return 0;
19075 }
19076
19077 str = ext;
19078 };
19079
19080 return 1;
19081 }
19082
19083 static int
19084 arm_parse_cpu (char * str)
19085 {
19086 const struct arm_cpu_option_table * opt;
19087 char * ext = strchr (str, '+');
19088 int optlen;
19089
19090 if (ext != NULL)
19091 optlen = ext - str;
19092 else
19093 optlen = strlen (str);
19094
19095 if (optlen == 0)
19096 {
19097 as_bad (_("missing cpu name `%s'"), str);
19098 return 0;
19099 }
19100
19101 for (opt = arm_cpus; opt->name != NULL; opt++)
19102 if (strncmp (opt->name, str, optlen) == 0)
19103 {
19104 mcpu_cpu_opt = &opt->value;
19105 mcpu_fpu_opt = &opt->default_fpu;
19106 if (opt->canonical_name)
19107 strcpy(selected_cpu_name, opt->canonical_name);
19108 else
19109 {
19110 int i;
19111 for (i = 0; i < optlen; i++)
19112 selected_cpu_name[i] = TOUPPER (opt->name[i]);
19113 selected_cpu_name[i] = 0;
19114 }
19115
19116 if (ext != NULL)
19117 return arm_parse_extension (ext, &mcpu_cpu_opt);
19118
19119 return 1;
19120 }
19121
19122 as_bad (_("unknown cpu `%s'"), str);
19123 return 0;
19124 }
19125
19126 static int
19127 arm_parse_arch (char * str)
19128 {
19129 const struct arm_arch_option_table *opt;
19130 char *ext = strchr (str, '+');
19131 int optlen;
19132
19133 if (ext != NULL)
19134 optlen = ext - str;
19135 else
19136 optlen = strlen (str);
19137
19138 if (optlen == 0)
19139 {
19140 as_bad (_("missing architecture name `%s'"), str);
19141 return 0;
19142 }
19143
19144 for (opt = arm_archs; opt->name != NULL; opt++)
19145 if (streq (opt->name, str))
19146 {
19147 march_cpu_opt = &opt->value;
19148 march_fpu_opt = &opt->default_fpu;
19149 strcpy(selected_cpu_name, opt->name);
19150
19151 if (ext != NULL)
19152 return arm_parse_extension (ext, &march_cpu_opt);
19153
19154 return 1;
19155 }
19156
19157 as_bad (_("unknown architecture `%s'\n"), str);
19158 return 0;
19159 }
19160
19161 static int
19162 arm_parse_fpu (char * str)
19163 {
19164 const struct arm_option_cpu_value_table * opt;
19165
19166 for (opt = arm_fpus; opt->name != NULL; opt++)
19167 if (streq (opt->name, str))
19168 {
19169 mfpu_opt = &opt->value;
19170 return 1;
19171 }
19172
19173 as_bad (_("unknown floating point format `%s'\n"), str);
19174 return 0;
19175 }
19176
19177 static int
19178 arm_parse_float_abi (char * str)
19179 {
19180 const struct arm_option_value_table * opt;
19181
19182 for (opt = arm_float_abis; opt->name != NULL; opt++)
19183 if (streq (opt->name, str))
19184 {
19185 mfloat_abi_opt = opt->value;
19186 return 1;
19187 }
19188
19189 as_bad (_("unknown floating point abi `%s'\n"), str);
19190 return 0;
19191 }
19192
19193 #ifdef OBJ_ELF
19194 static int
19195 arm_parse_eabi (char * str)
19196 {
19197 const struct arm_option_value_table *opt;
19198
19199 for (opt = arm_eabis; opt->name != NULL; opt++)
19200 if (streq (opt->name, str))
19201 {
19202 meabi_flags = opt->value;
19203 return 1;
19204 }
19205 as_bad (_("unknown EABI `%s'\n"), str);
19206 return 0;
19207 }
19208 #endif
19209
19210 struct arm_long_option_table arm_long_opts[] =
19211 {
19212 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
19213 arm_parse_cpu, NULL},
19214 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
19215 arm_parse_arch, NULL},
19216 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
19217 arm_parse_fpu, NULL},
19218 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
19219 arm_parse_float_abi, NULL},
19220 #ifdef OBJ_ELF
19221 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
19222 arm_parse_eabi, NULL},
19223 #endif
19224 {NULL, NULL, 0, NULL}
19225 };
19226
19227 int
19228 md_parse_option (int c, char * arg)
19229 {
19230 struct arm_option_table *opt;
19231 const struct arm_legacy_option_table *fopt;
19232 struct arm_long_option_table *lopt;
19233
19234 switch (c)
19235 {
19236 #ifdef OPTION_EB
19237 case OPTION_EB:
19238 target_big_endian = 1;
19239 break;
19240 #endif
19241
19242 #ifdef OPTION_EL
19243 case OPTION_EL:
19244 target_big_endian = 0;
19245 break;
19246 #endif
19247
19248 case 'a':
19249 /* Listing option. Just ignore these, we don't support additional
19250 ones. */
19251 return 0;
19252
19253 default:
19254 for (opt = arm_opts; opt->option != NULL; opt++)
19255 {
19256 if (c == opt->option[0]
19257 && ((arg == NULL && opt->option[1] == 0)
19258 || streq (arg, opt->option + 1)))
19259 {
19260 #if WARN_DEPRECATED
19261 /* If the option is deprecated, tell the user. */
19262 if (opt->deprecated != NULL)
19263 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
19264 arg ? arg : "", _(opt->deprecated));
19265 #endif
19266
19267 if (opt->var != NULL)
19268 *opt->var = opt->value;
19269
19270 return 1;
19271 }
19272 }
19273
19274 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
19275 {
19276 if (c == fopt->option[0]
19277 && ((arg == NULL && fopt->option[1] == 0)
19278 || streq (arg, fopt->option + 1)))
19279 {
19280 #if WARN_DEPRECATED
19281 /* If the option is deprecated, tell the user. */
19282 if (fopt->deprecated != NULL)
19283 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
19284 arg ? arg : "", _(fopt->deprecated));
19285 #endif
19286
19287 if (fopt->var != NULL)
19288 *fopt->var = &fopt->value;
19289
19290 return 1;
19291 }
19292 }
19293
19294 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
19295 {
19296 /* These options are expected to have an argument. */
19297 if (c == lopt->option[0]
19298 && arg != NULL
19299 && strncmp (arg, lopt->option + 1,
19300 strlen (lopt->option + 1)) == 0)
19301 {
19302 #if WARN_DEPRECATED
19303 /* If the option is deprecated, tell the user. */
19304 if (lopt->deprecated != NULL)
19305 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
19306 _(lopt->deprecated));
19307 #endif
19308
19309 /* Call the sup-option parser. */
19310 return lopt->func (arg + strlen (lopt->option) - 1);
19311 }
19312 }
19313
19314 return 0;
19315 }
19316
19317 return 1;
19318 }
19319
19320 void
19321 md_show_usage (FILE * fp)
19322 {
19323 struct arm_option_table *opt;
19324 struct arm_long_option_table *lopt;
19325
19326 fprintf (fp, _(" ARM-specific assembler options:\n"));
19327
19328 for (opt = arm_opts; opt->option != NULL; opt++)
19329 if (opt->help != NULL)
19330 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
19331
19332 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
19333 if (lopt->help != NULL)
19334 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
19335
19336 #ifdef OPTION_EB
19337 fprintf (fp, _("\
19338 -EB assemble code for a big-endian cpu\n"));
19339 #endif
19340
19341 #ifdef OPTION_EL
19342 fprintf (fp, _("\
19343 -EL assemble code for a little-endian cpu\n"));
19344 #endif
19345 }
19346
19347
19348 #ifdef OBJ_ELF
19349 typedef struct
19350 {
19351 int val;
19352 arm_feature_set flags;
19353 } cpu_arch_ver_table;
19354
19355 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
19356 least features first. */
19357 static const cpu_arch_ver_table cpu_arch_ver[] =
19358 {
19359 {1, ARM_ARCH_V4},
19360 {2, ARM_ARCH_V4T},
19361 {3, ARM_ARCH_V5},
19362 {4, ARM_ARCH_V5TE},
19363 {5, ARM_ARCH_V5TEJ},
19364 {6, ARM_ARCH_V6},
19365 {7, ARM_ARCH_V6Z},
19366 {8, ARM_ARCH_V6K},
19367 {9, ARM_ARCH_V6T2},
19368 {10, ARM_ARCH_V7A},
19369 {10, ARM_ARCH_V7R},
19370 {10, ARM_ARCH_V7M},
19371 {0, ARM_ARCH_NONE}
19372 };
19373
19374 /* Set the public EABI object attributes. */
19375 static void
19376 aeabi_set_public_attributes (void)
19377 {
19378 int arch;
19379 arm_feature_set flags;
19380 arm_feature_set tmp;
19381 const cpu_arch_ver_table *p;
19382
19383 /* Choose the architecture based on the capabilities of the requested cpu
19384 (if any) and/or the instructions actually used. */
19385 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
19386 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
19387 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
19388
19389 tmp = flags;
19390 arch = 0;
19391 for (p = cpu_arch_ver; p->val; p++)
19392 {
19393 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
19394 {
19395 arch = p->val;
19396 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
19397 }
19398 }
19399
19400 /* Tag_CPU_name. */
19401 if (selected_cpu_name[0])
19402 {
19403 char *p;
19404
19405 p = selected_cpu_name;
19406 if (strncmp(p, "armv", 4) == 0)
19407 {
19408 int i;
19409
19410 p += 4;
19411 for (i = 0; p[i]; i++)
19412 p[i] = TOUPPER (p[i]);
19413 }
19414 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
19415 }
19416 /* Tag_CPU_arch. */
19417 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
19418 /* Tag_CPU_arch_profile. */
19419 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
19420 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
19421 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
19422 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
19423 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
19424 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
19425 /* Tag_ARM_ISA_use. */
19426 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
19427 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
19428 /* Tag_THUMB_ISA_use. */
19429 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
19430 elf32_arm_add_eabi_attr_int (stdoutput, 9,
19431 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
19432 /* Tag_VFP_arch. */
19433 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
19434 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
19435 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
19436 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
19437 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
19438 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
19439 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
19440 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
19441 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
19442 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
19443 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
19444 /* Tag_WMMX_arch. */
19445 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
19446 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
19447 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
19448 /* Tag_NEON_arch. */
19449 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
19450 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
19451 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
19452 }
19453
19454 /* Add the .ARM.attributes section. */
19455 void
19456 arm_md_end (void)
19457 {
19458 segT s;
19459 char *p;
19460 addressT addr;
19461 offsetT size;
19462
19463 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19464 return;
19465
19466 aeabi_set_public_attributes ();
19467 size = elf32_arm_eabi_attr_size (stdoutput);
19468 s = subseg_new (".ARM.attributes", 0);
19469 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
19470 addr = frag_now_fix ();
19471 p = frag_more (size);
19472 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
19473 }
19474 #endif /* OBJ_ELF */
19475
19476
19477 /* Parse a .cpu directive. */
19478
19479 static void
19480 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
19481 {
19482 const struct arm_cpu_option_table *opt;
19483 char *name;
19484 char saved_char;
19485
19486 name = input_line_pointer;
19487 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
19488 input_line_pointer++;
19489 saved_char = *input_line_pointer;
19490 *input_line_pointer = 0;
19491
19492 /* Skip the first "all" entry. */
19493 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
19494 if (streq (opt->name, name))
19495 {
19496 mcpu_cpu_opt = &opt->value;
19497 selected_cpu = opt->value;
19498 if (opt->canonical_name)
19499 strcpy(selected_cpu_name, opt->canonical_name);
19500 else
19501 {
19502 int i;
19503 for (i = 0; opt->name[i]; i++)
19504 selected_cpu_name[i] = TOUPPER (opt->name[i]);
19505 selected_cpu_name[i] = 0;
19506 }
19507 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19508 *input_line_pointer = saved_char;
19509 demand_empty_rest_of_line ();
19510 return;
19511 }
19512 as_bad (_("unknown cpu `%s'"), name);
19513 *input_line_pointer = saved_char;
19514 ignore_rest_of_line ();
19515 }
19516
19517
19518 /* Parse a .arch directive. */
19519
19520 static void
19521 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
19522 {
19523 const struct arm_arch_option_table *opt;
19524 char saved_char;
19525 char *name;
19526
19527 name = input_line_pointer;
19528 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
19529 input_line_pointer++;
19530 saved_char = *input_line_pointer;
19531 *input_line_pointer = 0;
19532
19533 /* Skip the first "all" entry. */
19534 for (opt = arm_archs + 1; opt->name != NULL; opt++)
19535 if (streq (opt->name, name))
19536 {
19537 mcpu_cpu_opt = &opt->value;
19538 selected_cpu = opt->value;
19539 strcpy(selected_cpu_name, opt->name);
19540 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19541 *input_line_pointer = saved_char;
19542 demand_empty_rest_of_line ();
19543 return;
19544 }
19545
19546 as_bad (_("unknown architecture `%s'\n"), name);
19547 *input_line_pointer = saved_char;
19548 ignore_rest_of_line ();
19549 }
19550
19551
19552 /* Parse a .fpu directive. */
19553
19554 static void
19555 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
19556 {
19557 const struct arm_option_cpu_value_table *opt;
19558 char saved_char;
19559 char *name;
19560
19561 name = input_line_pointer;
19562 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
19563 input_line_pointer++;
19564 saved_char = *input_line_pointer;
19565 *input_line_pointer = 0;
19566
19567 for (opt = arm_fpus; opt->name != NULL; opt++)
19568 if (streq (opt->name, name))
19569 {
19570 mfpu_opt = &opt->value;
19571 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19572 *input_line_pointer = saved_char;
19573 demand_empty_rest_of_line ();
19574 return;
19575 }
19576
19577 as_bad (_("unknown floating point format `%s'\n"), name);
19578 *input_line_pointer = saved_char;
19579 ignore_rest_of_line ();
19580 }
19581
This page took 0.437253 seconds and 5 git commands to generate.