2007-04-19 Paul Brook <paul@codesourcery.com>
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include <limits.h>
29 #include <stdarg.h>
30 #define NO_RELOC 0
31 #include "as.h"
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #define WARN_DEPRECATED 1
46
47 #ifdef OBJ_ELF
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
50
51 /* This structure holds the unwinding state. */
52
53 static struct
54 {
55 symbolS * proc_start;
56 symbolS * table_entry;
57 symbolS * personality_routine;
58 int personality_index;
59 /* The segment containing the function. */
60 segT saved_seg;
61 subsegT saved_subseg;
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes;
64 int opcode_count;
65 int opcode_alloc;
66 /* The number of bytes pushed to the stack. */
67 offsetT frame_size;
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
74 offsetT fp_offset;
75 int fp_reg;
76 /* Nonzero if an unwind_setfp directive has been seen. */
77 unsigned fp_used:1;
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored:1;
80 } unwind;
81
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency = 0;
86
87 #endif /* OBJ_ELF */
88
89 /* Results from operand parsing worker functions. */
90
91 typedef enum
92 {
93 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result;
97
98 enum arm_float_abi
99 {
100 ARM_FLOAT_ABI_HARD,
101 ARM_FLOAT_ABI_SOFTFP,
102 ARM_FLOAT_ABI_SOFT
103 };
104
105 /* Types of processor to assemble for. */
106 #ifndef CPU_DEFAULT
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
109 #else
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
112 #endif
113 #endif
114 #endif
115
116 #ifndef FPU_DEFAULT
117 # ifdef TE_LINUX
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
120 # ifdef OBJ_ELF
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
122 # else
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
125 # endif
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
128 # else
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
131 # endif
132 #endif /* ifndef FPU_DEFAULT */
133
134 #define streq(a, b) (strcmp (a, b) == 0)
135
136 static arm_feature_set cpu_variant;
137 static arm_feature_set arm_arch_used;
138 static arm_feature_set thumb_arch_used;
139
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26 = FALSE;
142 static int atpcs = FALSE;
143 static int support_interwork = FALSE;
144 static int uses_apcs_float = FALSE;
145 static int pic_code = FALSE;
146
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
149 assembly flags. */
150 static const arm_feature_set *legacy_cpu = NULL;
151 static const arm_feature_set *legacy_fpu = NULL;
152
153 static const arm_feature_set *mcpu_cpu_opt = NULL;
154 static const arm_feature_set *mcpu_fpu_opt = NULL;
155 static const arm_feature_set *march_cpu_opt = NULL;
156 static const arm_feature_set *march_fpu_opt = NULL;
157 static const arm_feature_set *mfpu_opt = NULL;
158 static const arm_feature_set *object_arch = NULL;
159
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default = FPU_DEFAULT;
162 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
163 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
164 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
165 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
166 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
167 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
168 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
169 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
170
171 #ifdef CPU_DEFAULT
172 static const arm_feature_set cpu_default = CPU_DEFAULT;
173 #endif
174
175 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
176 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
177 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
178 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
179 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
180 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
181 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
182 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
183 static const arm_feature_set arm_ext_v4t_5 =
184 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
185 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
186 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
187 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
188 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
189 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
190 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
191 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
192 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
193 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199
200 static const arm_feature_set arm_arch_any = ARM_ANY;
201 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
203 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
204
205 static const arm_feature_set arm_cext_iwmmxt2 =
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
207 static const arm_feature_set arm_cext_iwmmxt =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
209 static const arm_feature_set arm_cext_xscale =
210 ARM_FEATURE (0, ARM_CEXT_XSCALE);
211 static const arm_feature_set arm_cext_maverick =
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
213 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
214 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
215 static const arm_feature_set fpu_vfp_ext_v1xd =
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
217 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
218 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
219 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
220 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
222 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
223
224 static int mfloat_abi_opt = -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name[16];
229 #ifdef OBJ_ELF
230 # ifdef EABI_DEFAULT
231 static int meabi_flags = EABI_DEFAULT;
232 # else
233 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
234 # endif
235
236 bfd_boolean
237 arm_is_eabi(void)
238 {
239 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
240 }
241 #endif
242
243 #ifdef OBJ_ELF
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
246 #endif
247
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
251 instructions. */
252 static int thumb_mode = 0;
253
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
256
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
261 there.)
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
264 machine code.
265
266 Important differences from the old Thumb mode:
267
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
276
277 static bfd_boolean unified_syntax = FALSE;
278
279 enum neon_el_type
280 {
281 NT_invtype,
282 NT_untyped,
283 NT_integer,
284 NT_float,
285 NT_poly,
286 NT_signed,
287 NT_unsigned
288 };
289
290 struct neon_type_el
291 {
292 enum neon_el_type type;
293 unsigned size;
294 };
295
296 #define NEON_MAX_TYPE_ELS 4
297
298 struct neon_type
299 {
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
301 unsigned elems;
302 };
303
304 struct arm_it
305 {
306 const char * error;
307 unsigned long instruction;
308 int size;
309 int size_req;
310 int cond;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
313 appropriate. */
314 int uncond_value;
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
318 unsigned long relax;
319 struct
320 {
321 bfd_reloc_code_real_type type;
322 expressionS exp;
323 int pc_rel;
324 } reloc;
325
326 struct
327 {
328 unsigned reg;
329 signed int imm;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
337 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
338 instructions. This allows us to disambiguate ARM <-> vector insns. */
339 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
340 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
341 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
342 unsigned issingle : 1; /* Operand is VFP single-precision register. */
343 unsigned hasreloc : 1; /* Operand has relocation suffix. */
344 unsigned writeback : 1; /* Operand has trailing ! */
345 unsigned preind : 1; /* Preindexed address. */
346 unsigned postind : 1; /* Postindexed address. */
347 unsigned negative : 1; /* Index register was negated. */
348 unsigned shifted : 1; /* Shift applied to operation. */
349 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
350 } operands[6];
351 };
352
353 static struct arm_it inst;
354
355 #define NUM_FLOAT_VALS 8
356
357 const char * fp_const[] =
358 {
359 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
360 };
361
362 /* Number of littlenums required to hold an extended precision number. */
363 #define MAX_LITTLENUMS 6
364
365 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
366
367 #define FAIL (-1)
368 #define SUCCESS (0)
369
370 #define SUFF_S 1
371 #define SUFF_D 2
372 #define SUFF_E 3
373 #define SUFF_P 4
374
375 #define CP_T_X 0x00008000
376 #define CP_T_Y 0x00400000
377
378 #define CONDS_BIT 0x00100000
379 #define LOAD_BIT 0x00100000
380
381 #define DOUBLE_LOAD_FLAG 0x00000001
382
383 struct asm_cond
384 {
385 const char * template;
386 unsigned long value;
387 };
388
389 #define COND_ALWAYS 0xE
390
391 struct asm_psr
392 {
393 const char *template;
394 unsigned long field;
395 };
396
397 struct asm_barrier_opt
398 {
399 const char *template;
400 unsigned long value;
401 };
402
403 /* The bit that distinguishes CPSR and SPSR. */
404 #define SPSR_BIT (1 << 22)
405
406 /* The individual PSR flag bits. */
407 #define PSR_c (1 << 16)
408 #define PSR_x (1 << 17)
409 #define PSR_s (1 << 18)
410 #define PSR_f (1 << 19)
411
412 struct reloc_entry
413 {
414 char *name;
415 bfd_reloc_code_real_type reloc;
416 };
417
418 enum vfp_reg_pos
419 {
420 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
421 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
422 };
423
424 enum vfp_ldstm_type
425 {
426 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
427 };
428
429 /* Bits for DEFINED field in neon_typed_alias. */
430 #define NTA_HASTYPE 1
431 #define NTA_HASINDEX 2
432
433 struct neon_typed_alias
434 {
435 unsigned char defined;
436 unsigned char index;
437 struct neon_type_el eltype;
438 };
439
440 /* ARM register categories. This includes coprocessor numbers and various
441 architecture extensions' registers. */
442 enum arm_reg_type
443 {
444 REG_TYPE_RN,
445 REG_TYPE_CP,
446 REG_TYPE_CN,
447 REG_TYPE_FN,
448 REG_TYPE_VFS,
449 REG_TYPE_VFD,
450 REG_TYPE_NQ,
451 REG_TYPE_VFSD,
452 REG_TYPE_NDQ,
453 REG_TYPE_NSDQ,
454 REG_TYPE_VFC,
455 REG_TYPE_MVF,
456 REG_TYPE_MVD,
457 REG_TYPE_MVFX,
458 REG_TYPE_MVDX,
459 REG_TYPE_MVAX,
460 REG_TYPE_DSPSC,
461 REG_TYPE_MMXWR,
462 REG_TYPE_MMXWC,
463 REG_TYPE_MMXWCG,
464 REG_TYPE_XSCALE,
465 };
466
467 /* Structure for a hash table entry for a register.
468 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
469 information which states whether a vector type or index is specified (for a
470 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
471 struct reg_entry
472 {
473 const char *name;
474 unsigned char number;
475 unsigned char type;
476 unsigned char builtin;
477 struct neon_typed_alias *neon;
478 };
479
480 /* Diagnostics used when we don't get a register of the expected type. */
481 const char *const reg_expected_msgs[] =
482 {
483 N_("ARM register expected"),
484 N_("bad or missing co-processor number"),
485 N_("co-processor register expected"),
486 N_("FPA register expected"),
487 N_("VFP single precision register expected"),
488 N_("VFP/Neon double precision register expected"),
489 N_("Neon quad precision register expected"),
490 N_("VFP single or double precision register expected"),
491 N_("Neon double or quad precision register expected"),
492 N_("VFP single, double or Neon quad precision register expected"),
493 N_("VFP system register expected"),
494 N_("Maverick MVF register expected"),
495 N_("Maverick MVD register expected"),
496 N_("Maverick MVFX register expected"),
497 N_("Maverick MVDX register expected"),
498 N_("Maverick MVAX register expected"),
499 N_("Maverick DSPSC register expected"),
500 N_("iWMMXt data register expected"),
501 N_("iWMMXt control register expected"),
502 N_("iWMMXt scalar register expected"),
503 N_("XScale accumulator register expected"),
504 };
505
506 /* Some well known registers that we refer to directly elsewhere. */
507 #define REG_SP 13
508 #define REG_LR 14
509 #define REG_PC 15
510
511 /* ARM instructions take 4bytes in the object file, Thumb instructions
512 take 2: */
513 #define INSN_SIZE 4
514
515 struct asm_opcode
516 {
517 /* Basic string to match. */
518 const char *template;
519
520 /* Parameters to instruction. */
521 unsigned char operands[8];
522
523 /* Conditional tag - see opcode_lookup. */
524 unsigned int tag : 4;
525
526 /* Basic instruction code. */
527 unsigned int avalue : 28;
528
529 /* Thumb-format instruction code. */
530 unsigned int tvalue;
531
532 /* Which architecture variant provides this instruction. */
533 const arm_feature_set *avariant;
534 const arm_feature_set *tvariant;
535
536 /* Function to call to encode instruction in ARM format. */
537 void (* aencode) (void);
538
539 /* Function to call to encode instruction in Thumb format. */
540 void (* tencode) (void);
541 };
542
543 /* Defines for various bits that we will want to toggle. */
544 #define INST_IMMEDIATE 0x02000000
545 #define OFFSET_REG 0x02000000
546 #define HWOFFSET_IMM 0x00400000
547 #define SHIFT_BY_REG 0x00000010
548 #define PRE_INDEX 0x01000000
549 #define INDEX_UP 0x00800000
550 #define WRITE_BACK 0x00200000
551 #define LDM_TYPE_2_OR_3 0x00400000
552 #define CPSI_MMOD 0x00020000
553
554 #define LITERAL_MASK 0xf000f000
555 #define OPCODE_MASK 0xfe1fffff
556 #define V4_STR_BIT 0x00000020
557
558 #define DATA_OP_SHIFT 21
559
560 #define T2_OPCODE_MASK 0xfe1fffff
561 #define T2_DATA_OP_SHIFT 21
562
563 /* Codes to distinguish the arithmetic instructions. */
564 #define OPCODE_AND 0
565 #define OPCODE_EOR 1
566 #define OPCODE_SUB 2
567 #define OPCODE_RSB 3
568 #define OPCODE_ADD 4
569 #define OPCODE_ADC 5
570 #define OPCODE_SBC 6
571 #define OPCODE_RSC 7
572 #define OPCODE_TST 8
573 #define OPCODE_TEQ 9
574 #define OPCODE_CMP 10
575 #define OPCODE_CMN 11
576 #define OPCODE_ORR 12
577 #define OPCODE_MOV 13
578 #define OPCODE_BIC 14
579 #define OPCODE_MVN 15
580
581 #define T2_OPCODE_AND 0
582 #define T2_OPCODE_BIC 1
583 #define T2_OPCODE_ORR 2
584 #define T2_OPCODE_ORN 3
585 #define T2_OPCODE_EOR 4
586 #define T2_OPCODE_ADD 8
587 #define T2_OPCODE_ADC 10
588 #define T2_OPCODE_SBC 11
589 #define T2_OPCODE_SUB 13
590 #define T2_OPCODE_RSB 14
591
592 #define T_OPCODE_MUL 0x4340
593 #define T_OPCODE_TST 0x4200
594 #define T_OPCODE_CMN 0x42c0
595 #define T_OPCODE_NEG 0x4240
596 #define T_OPCODE_MVN 0x43c0
597
598 #define T_OPCODE_ADD_R3 0x1800
599 #define T_OPCODE_SUB_R3 0x1a00
600 #define T_OPCODE_ADD_HI 0x4400
601 #define T_OPCODE_ADD_ST 0xb000
602 #define T_OPCODE_SUB_ST 0xb080
603 #define T_OPCODE_ADD_SP 0xa800
604 #define T_OPCODE_ADD_PC 0xa000
605 #define T_OPCODE_ADD_I8 0x3000
606 #define T_OPCODE_SUB_I8 0x3800
607 #define T_OPCODE_ADD_I3 0x1c00
608 #define T_OPCODE_SUB_I3 0x1e00
609
610 #define T_OPCODE_ASR_R 0x4100
611 #define T_OPCODE_LSL_R 0x4080
612 #define T_OPCODE_LSR_R 0x40c0
613 #define T_OPCODE_ROR_R 0x41c0
614 #define T_OPCODE_ASR_I 0x1000
615 #define T_OPCODE_LSL_I 0x0000
616 #define T_OPCODE_LSR_I 0x0800
617
618 #define T_OPCODE_MOV_I8 0x2000
619 #define T_OPCODE_CMP_I8 0x2800
620 #define T_OPCODE_CMP_LR 0x4280
621 #define T_OPCODE_MOV_HR 0x4600
622 #define T_OPCODE_CMP_HR 0x4500
623
624 #define T_OPCODE_LDR_PC 0x4800
625 #define T_OPCODE_LDR_SP 0x9800
626 #define T_OPCODE_STR_SP 0x9000
627 #define T_OPCODE_LDR_IW 0x6800
628 #define T_OPCODE_STR_IW 0x6000
629 #define T_OPCODE_LDR_IH 0x8800
630 #define T_OPCODE_STR_IH 0x8000
631 #define T_OPCODE_LDR_IB 0x7800
632 #define T_OPCODE_STR_IB 0x7000
633 #define T_OPCODE_LDR_RW 0x5800
634 #define T_OPCODE_STR_RW 0x5000
635 #define T_OPCODE_LDR_RH 0x5a00
636 #define T_OPCODE_STR_RH 0x5200
637 #define T_OPCODE_LDR_RB 0x5c00
638 #define T_OPCODE_STR_RB 0x5400
639
640 #define T_OPCODE_PUSH 0xb400
641 #define T_OPCODE_POP 0xbc00
642
643 #define T_OPCODE_BRANCH 0xe000
644
645 #define THUMB_SIZE 2 /* Size of thumb instruction. */
646 #define THUMB_PP_PC_LR 0x0100
647 #define THUMB_LOAD_BIT 0x0800
648 #define THUMB2_LOAD_BIT 0x00100000
649
650 #define BAD_ARGS _("bad arguments to instruction")
651 #define BAD_PC _("r15 not allowed here")
652 #define BAD_COND _("instruction cannot be conditional")
653 #define BAD_OVERLAP _("registers may not be the same")
654 #define BAD_HIREG _("lo register required")
655 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
656 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
657 #define BAD_BRANCH _("branch must be last instruction in IT block")
658 #define BAD_NOT_IT _("instruction not allowed in IT block")
659 #define BAD_FPU _("selected FPU does not support instruction")
660
661 static struct hash_control *arm_ops_hsh;
662 static struct hash_control *arm_cond_hsh;
663 static struct hash_control *arm_shift_hsh;
664 static struct hash_control *arm_psr_hsh;
665 static struct hash_control *arm_v7m_psr_hsh;
666 static struct hash_control *arm_reg_hsh;
667 static struct hash_control *arm_reloc_hsh;
668 static struct hash_control *arm_barrier_opt_hsh;
669
670 /* Stuff needed to resolve the label ambiguity
671 As:
672 ...
673 label: <insn>
674 may differ from:
675 ...
676 label:
677 <insn>
678 */
679
680 symbolS * last_label_seen;
681 static int label_is_thumb_function_name = FALSE;
682 \f
683 /* Literal pool structure. Held on a per-section
684 and per-sub-section basis. */
685
686 #define MAX_LITERAL_POOL_SIZE 1024
687 typedef struct literal_pool
688 {
689 expressionS literals [MAX_LITERAL_POOL_SIZE];
690 unsigned int next_free_entry;
691 unsigned int id;
692 symbolS * symbol;
693 segT section;
694 subsegT sub_section;
695 struct literal_pool * next;
696 } literal_pool;
697
698 /* Pointer to a linked list of literal pools. */
699 literal_pool * list_of_pools = NULL;
700
701 /* State variables for IT block handling. */
702 static bfd_boolean current_it_mask = 0;
703 static int current_cc;
704
705 \f
706 /* Pure syntax. */
707
708 /* This array holds the chars that always start a comment. If the
709 pre-processor is disabled, these aren't very useful. */
710 const char comment_chars[] = "@";
711
712 /* This array holds the chars that only start a comment at the beginning of
713 a line. If the line seems to have the form '# 123 filename'
714 .line and .file directives will appear in the pre-processed output. */
715 /* Note that input_file.c hand checks for '#' at the beginning of the
716 first line of the input file. This is because the compiler outputs
717 #NO_APP at the beginning of its output. */
718 /* Also note that comments like this one will always work. */
719 const char line_comment_chars[] = "#";
720
721 const char line_separator_chars[] = ";";
722
723 /* Chars that can be used to separate mant
724 from exp in floating point numbers. */
725 const char EXP_CHARS[] = "eE";
726
727 /* Chars that mean this number is a floating point constant. */
728 /* As in 0f12.456 */
729 /* or 0d1.2345e12 */
730
731 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
732
733 /* Prefix characters that indicate the start of an immediate
734 value. */
735 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
736
737 /* Separator character handling. */
738
739 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
740
741 static inline int
742 skip_past_char (char ** str, char c)
743 {
744 if (**str == c)
745 {
746 (*str)++;
747 return SUCCESS;
748 }
749 else
750 return FAIL;
751 }
752 #define skip_past_comma(str) skip_past_char (str, ',')
753
754 /* Arithmetic expressions (possibly involving symbols). */
755
756 /* Return TRUE if anything in the expression is a bignum. */
757
758 static int
759 walk_no_bignums (symbolS * sp)
760 {
761 if (symbol_get_value_expression (sp)->X_op == O_big)
762 return 1;
763
764 if (symbol_get_value_expression (sp)->X_add_symbol)
765 {
766 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
767 || (symbol_get_value_expression (sp)->X_op_symbol
768 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
769 }
770
771 return 0;
772 }
773
774 static int in_my_get_expression = 0;
775
776 /* Third argument to my_get_expression. */
777 #define GE_NO_PREFIX 0
778 #define GE_IMM_PREFIX 1
779 #define GE_OPT_PREFIX 2
780 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
781 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
782 #define GE_OPT_PREFIX_BIG 3
783
784 static int
785 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
786 {
787 char * save_in;
788 segT seg;
789
790 /* In unified syntax, all prefixes are optional. */
791 if (unified_syntax)
792 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
793 : GE_OPT_PREFIX;
794
795 switch (prefix_mode)
796 {
797 case GE_NO_PREFIX: break;
798 case GE_IMM_PREFIX:
799 if (!is_immediate_prefix (**str))
800 {
801 inst.error = _("immediate expression requires a # prefix");
802 return FAIL;
803 }
804 (*str)++;
805 break;
806 case GE_OPT_PREFIX:
807 case GE_OPT_PREFIX_BIG:
808 if (is_immediate_prefix (**str))
809 (*str)++;
810 break;
811 default: abort ();
812 }
813
814 memset (ep, 0, sizeof (expressionS));
815
816 save_in = input_line_pointer;
817 input_line_pointer = *str;
818 in_my_get_expression = 1;
819 seg = expression (ep);
820 in_my_get_expression = 0;
821
822 if (ep->X_op == O_illegal)
823 {
824 /* We found a bad expression in md_operand(). */
825 *str = input_line_pointer;
826 input_line_pointer = save_in;
827 if (inst.error == NULL)
828 inst.error = _("bad expression");
829 return 1;
830 }
831
832 #ifdef OBJ_AOUT
833 if (seg != absolute_section
834 && seg != text_section
835 && seg != data_section
836 && seg != bss_section
837 && seg != undefined_section)
838 {
839 inst.error = _("bad segment");
840 *str = input_line_pointer;
841 input_line_pointer = save_in;
842 return 1;
843 }
844 #endif
845
846 /* Get rid of any bignums now, so that we don't generate an error for which
847 we can't establish a line number later on. Big numbers are never valid
848 in instructions, which is where this routine is always called. */
849 if (prefix_mode != GE_OPT_PREFIX_BIG
850 && (ep->X_op == O_big
851 || (ep->X_add_symbol
852 && (walk_no_bignums (ep->X_add_symbol)
853 || (ep->X_op_symbol
854 && walk_no_bignums (ep->X_op_symbol))))))
855 {
856 inst.error = _("invalid constant");
857 *str = input_line_pointer;
858 input_line_pointer = save_in;
859 return 1;
860 }
861
862 *str = input_line_pointer;
863 input_line_pointer = save_in;
864 return 0;
865 }
866
867 /* Turn a string in input_line_pointer into a floating point constant
868 of type TYPE, and store the appropriate bytes in *LITP. The number
869 of LITTLENUMS emitted is stored in *SIZEP. An error message is
870 returned, or NULL on OK.
871
872 Note that fp constants aren't represent in the normal way on the ARM.
873 In big endian mode, things are as expected. However, in little endian
874 mode fp constants are big-endian word-wise, and little-endian byte-wise
875 within the words. For example, (double) 1.1 in big endian mode is
876 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
877 the byte sequence 99 99 f1 3f 9a 99 99 99.
878
879 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
880
881 char *
882 md_atof (int type, char * litP, int * sizeP)
883 {
884 int prec;
885 LITTLENUM_TYPE words[MAX_LITTLENUMS];
886 char *t;
887 int i;
888
889 switch (type)
890 {
891 case 'f':
892 case 'F':
893 case 's':
894 case 'S':
895 prec = 2;
896 break;
897
898 case 'd':
899 case 'D':
900 case 'r':
901 case 'R':
902 prec = 4;
903 break;
904
905 case 'x':
906 case 'X':
907 prec = 6;
908 break;
909
910 case 'p':
911 case 'P':
912 prec = 6;
913 break;
914
915 default:
916 *sizeP = 0;
917 return _("bad call to MD_ATOF()");
918 }
919
920 t = atof_ieee (input_line_pointer, type, words);
921 if (t)
922 input_line_pointer = t;
923 *sizeP = prec * 2;
924
925 if (target_big_endian)
926 {
927 for (i = 0; i < prec; i++)
928 {
929 md_number_to_chars (litP, (valueT) words[i], 2);
930 litP += 2;
931 }
932 }
933 else
934 {
935 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
936 for (i = prec - 1; i >= 0; i--)
937 {
938 md_number_to_chars (litP, (valueT) words[i], 2);
939 litP += 2;
940 }
941 else
942 /* For a 4 byte float the order of elements in `words' is 1 0.
943 For an 8 byte float the order is 1 0 3 2. */
944 for (i = 0; i < prec; i += 2)
945 {
946 md_number_to_chars (litP, (valueT) words[i + 1], 2);
947 md_number_to_chars (litP + 2, (valueT) words[i], 2);
948 litP += 4;
949 }
950 }
951
952 return 0;
953 }
954
955 /* We handle all bad expressions here, so that we can report the faulty
956 instruction in the error message. */
957 void
958 md_operand (expressionS * expr)
959 {
960 if (in_my_get_expression)
961 expr->X_op = O_illegal;
962 }
963
964 /* Immediate values. */
965
966 /* Generic immediate-value read function for use in directives.
967 Accepts anything that 'expression' can fold to a constant.
968 *val receives the number. */
969 #ifdef OBJ_ELF
970 static int
971 immediate_for_directive (int *val)
972 {
973 expressionS exp;
974 exp.X_op = O_illegal;
975
976 if (is_immediate_prefix (*input_line_pointer))
977 {
978 input_line_pointer++;
979 expression (&exp);
980 }
981
982 if (exp.X_op != O_constant)
983 {
984 as_bad (_("expected #constant"));
985 ignore_rest_of_line ();
986 return FAIL;
987 }
988 *val = exp.X_add_number;
989 return SUCCESS;
990 }
991 #endif
992
993 /* Register parsing. */
994
995 /* Generic register parser. CCP points to what should be the
996 beginning of a register name. If it is indeed a valid register
997 name, advance CCP over it and return the reg_entry structure;
998 otherwise return NULL. Does not issue diagnostics. */
999
1000 static struct reg_entry *
1001 arm_reg_parse_multi (char **ccp)
1002 {
1003 char *start = *ccp;
1004 char *p;
1005 struct reg_entry *reg;
1006
1007 #ifdef REGISTER_PREFIX
1008 if (*start != REGISTER_PREFIX)
1009 return NULL;
1010 start++;
1011 #endif
1012 #ifdef OPTIONAL_REGISTER_PREFIX
1013 if (*start == OPTIONAL_REGISTER_PREFIX)
1014 start++;
1015 #endif
1016
1017 p = start;
1018 if (!ISALPHA (*p) || !is_name_beginner (*p))
1019 return NULL;
1020
1021 do
1022 p++;
1023 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1024
1025 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1026
1027 if (!reg)
1028 return NULL;
1029
1030 *ccp = p;
1031 return reg;
1032 }
1033
1034 static int
1035 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1036 enum arm_reg_type type)
1037 {
1038 /* Alternative syntaxes are accepted for a few register classes. */
1039 switch (type)
1040 {
1041 case REG_TYPE_MVF:
1042 case REG_TYPE_MVD:
1043 case REG_TYPE_MVFX:
1044 case REG_TYPE_MVDX:
1045 /* Generic coprocessor register names are allowed for these. */
1046 if (reg && reg->type == REG_TYPE_CN)
1047 return reg->number;
1048 break;
1049
1050 case REG_TYPE_CP:
1051 /* For backward compatibility, a bare number is valid here. */
1052 {
1053 unsigned long processor = strtoul (start, ccp, 10);
1054 if (*ccp != start && processor <= 15)
1055 return processor;
1056 }
1057
1058 case REG_TYPE_MMXWC:
1059 /* WC includes WCG. ??? I'm not sure this is true for all
1060 instructions that take WC registers. */
1061 if (reg && reg->type == REG_TYPE_MMXWCG)
1062 return reg->number;
1063 break;
1064
1065 default:
1066 break;
1067 }
1068
1069 return FAIL;
1070 }
1071
1072 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1073 return value is the register number or FAIL. */
1074
1075 static int
1076 arm_reg_parse (char **ccp, enum arm_reg_type type)
1077 {
1078 char *start = *ccp;
1079 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1080 int ret;
1081
1082 /* Do not allow a scalar (reg+index) to parse as a register. */
1083 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1084 return FAIL;
1085
1086 if (reg && reg->type == type)
1087 return reg->number;
1088
1089 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1090 return ret;
1091
1092 *ccp = start;
1093 return FAIL;
1094 }
1095
1096 /* Parse a Neon type specifier. *STR should point at the leading '.'
1097 character. Does no verification at this stage that the type fits the opcode
1098 properly. E.g.,
1099
1100 .i32.i32.s16
1101 .s32.f32
1102 .u16
1103
1104 Can all be legally parsed by this function.
1105
1106 Fills in neon_type struct pointer with parsed information, and updates STR
1107 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1108 type, FAIL if not. */
1109
1110 static int
1111 parse_neon_type (struct neon_type *type, char **str)
1112 {
1113 char *ptr = *str;
1114
1115 if (type)
1116 type->elems = 0;
1117
1118 while (type->elems < NEON_MAX_TYPE_ELS)
1119 {
1120 enum neon_el_type thistype = NT_untyped;
1121 unsigned thissize = -1u;
1122
1123 if (*ptr != '.')
1124 break;
1125
1126 ptr++;
1127
1128 /* Just a size without an explicit type. */
1129 if (ISDIGIT (*ptr))
1130 goto parsesize;
1131
1132 switch (TOLOWER (*ptr))
1133 {
1134 case 'i': thistype = NT_integer; break;
1135 case 'f': thistype = NT_float; break;
1136 case 'p': thistype = NT_poly; break;
1137 case 's': thistype = NT_signed; break;
1138 case 'u': thistype = NT_unsigned; break;
1139 case 'd':
1140 thistype = NT_float;
1141 thissize = 64;
1142 ptr++;
1143 goto done;
1144 default:
1145 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1146 return FAIL;
1147 }
1148
1149 ptr++;
1150
1151 /* .f is an abbreviation for .f32. */
1152 if (thistype == NT_float && !ISDIGIT (*ptr))
1153 thissize = 32;
1154 else
1155 {
1156 parsesize:
1157 thissize = strtoul (ptr, &ptr, 10);
1158
1159 if (thissize != 8 && thissize != 16 && thissize != 32
1160 && thissize != 64)
1161 {
1162 as_bad (_("bad size %d in type specifier"), thissize);
1163 return FAIL;
1164 }
1165 }
1166
1167 done:
1168 if (type)
1169 {
1170 type->el[type->elems].type = thistype;
1171 type->el[type->elems].size = thissize;
1172 type->elems++;
1173 }
1174 }
1175
1176 /* Empty/missing type is not a successful parse. */
1177 if (type->elems == 0)
1178 return FAIL;
1179
1180 *str = ptr;
1181
1182 return SUCCESS;
1183 }
1184
1185 /* Errors may be set multiple times during parsing or bit encoding
1186 (particularly in the Neon bits), but usually the earliest error which is set
1187 will be the most meaningful. Avoid overwriting it with later (cascading)
1188 errors by calling this function. */
1189
1190 static void
1191 first_error (const char *err)
1192 {
1193 if (!inst.error)
1194 inst.error = err;
1195 }
1196
1197 /* Parse a single type, e.g. ".s32", leading period included. */
1198 static int
1199 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1200 {
1201 char *str = *ccp;
1202 struct neon_type optype;
1203
1204 if (*str == '.')
1205 {
1206 if (parse_neon_type (&optype, &str) == SUCCESS)
1207 {
1208 if (optype.elems == 1)
1209 *vectype = optype.el[0];
1210 else
1211 {
1212 first_error (_("only one type should be specified for operand"));
1213 return FAIL;
1214 }
1215 }
1216 else
1217 {
1218 first_error (_("vector type expected"));
1219 return FAIL;
1220 }
1221 }
1222 else
1223 return FAIL;
1224
1225 *ccp = str;
1226
1227 return SUCCESS;
1228 }
1229
1230 /* Special meanings for indices (which have a range of 0-7), which will fit into
1231 a 4-bit integer. */
1232
1233 #define NEON_ALL_LANES 15
1234 #define NEON_INTERLEAVE_LANES 14
1235
1236 /* Parse either a register or a scalar, with an optional type. Return the
1237 register number, and optionally fill in the actual type of the register
1238 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1239 type/index information in *TYPEINFO. */
1240
1241 static int
1242 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1243 enum arm_reg_type *rtype,
1244 struct neon_typed_alias *typeinfo)
1245 {
1246 char *str = *ccp;
1247 struct reg_entry *reg = arm_reg_parse_multi (&str);
1248 struct neon_typed_alias atype;
1249 struct neon_type_el parsetype;
1250
1251 atype.defined = 0;
1252 atype.index = -1;
1253 atype.eltype.type = NT_invtype;
1254 atype.eltype.size = -1;
1255
1256 /* Try alternate syntax for some types of register. Note these are mutually
1257 exclusive with the Neon syntax extensions. */
1258 if (reg == NULL)
1259 {
1260 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1261 if (altreg != FAIL)
1262 *ccp = str;
1263 if (typeinfo)
1264 *typeinfo = atype;
1265 return altreg;
1266 }
1267
1268 /* Undo polymorphism when a set of register types may be accepted. */
1269 if ((type == REG_TYPE_NDQ
1270 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1271 || (type == REG_TYPE_VFSD
1272 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1273 || (type == REG_TYPE_NSDQ
1274 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1275 || reg->type == REG_TYPE_NQ))
1276 || (type == REG_TYPE_MMXWC
1277 && (reg->type == REG_TYPE_MMXWCG)))
1278 type = reg->type;
1279
1280 if (type != reg->type)
1281 return FAIL;
1282
1283 if (reg->neon)
1284 atype = *reg->neon;
1285
1286 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1287 {
1288 if ((atype.defined & NTA_HASTYPE) != 0)
1289 {
1290 first_error (_("can't redefine type for operand"));
1291 return FAIL;
1292 }
1293 atype.defined |= NTA_HASTYPE;
1294 atype.eltype = parsetype;
1295 }
1296
1297 if (skip_past_char (&str, '[') == SUCCESS)
1298 {
1299 if (type != REG_TYPE_VFD)
1300 {
1301 first_error (_("only D registers may be indexed"));
1302 return FAIL;
1303 }
1304
1305 if ((atype.defined & NTA_HASINDEX) != 0)
1306 {
1307 first_error (_("can't change index for operand"));
1308 return FAIL;
1309 }
1310
1311 atype.defined |= NTA_HASINDEX;
1312
1313 if (skip_past_char (&str, ']') == SUCCESS)
1314 atype.index = NEON_ALL_LANES;
1315 else
1316 {
1317 expressionS exp;
1318
1319 my_get_expression (&exp, &str, GE_NO_PREFIX);
1320
1321 if (exp.X_op != O_constant)
1322 {
1323 first_error (_("constant expression required"));
1324 return FAIL;
1325 }
1326
1327 if (skip_past_char (&str, ']') == FAIL)
1328 return FAIL;
1329
1330 atype.index = exp.X_add_number;
1331 }
1332 }
1333
1334 if (typeinfo)
1335 *typeinfo = atype;
1336
1337 if (rtype)
1338 *rtype = type;
1339
1340 *ccp = str;
1341
1342 return reg->number;
1343 }
1344
1345 /* Like arm_reg_parse, but allow allow the following extra features:
1346 - If RTYPE is non-zero, return the (possibly restricted) type of the
1347 register (e.g. Neon double or quad reg when either has been requested).
1348 - If this is a Neon vector type with additional type information, fill
1349 in the struct pointed to by VECTYPE (if non-NULL).
1350 This function will fault on encountering a scalar.
1351 */
1352
1353 static int
1354 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1355 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1356 {
1357 struct neon_typed_alias atype;
1358 char *str = *ccp;
1359 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1360
1361 if (reg == FAIL)
1362 return FAIL;
1363
1364 /* Do not allow a scalar (reg+index) to parse as a register. */
1365 if ((atype.defined & NTA_HASINDEX) != 0)
1366 {
1367 first_error (_("register operand expected, but got scalar"));
1368 return FAIL;
1369 }
1370
1371 if (vectype)
1372 *vectype = atype.eltype;
1373
1374 *ccp = str;
1375
1376 return reg;
1377 }
1378
1379 #define NEON_SCALAR_REG(X) ((X) >> 4)
1380 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1381
1382 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1383 have enough information to be able to do a good job bounds-checking. So, we
1384 just do easy checks here, and do further checks later. */
1385
1386 static int
1387 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1388 {
1389 int reg;
1390 char *str = *ccp;
1391 struct neon_typed_alias atype;
1392
1393 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1394
1395 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1396 return FAIL;
1397
1398 if (atype.index == NEON_ALL_LANES)
1399 {
1400 first_error (_("scalar must have an index"));
1401 return FAIL;
1402 }
1403 else if (atype.index >= 64 / elsize)
1404 {
1405 first_error (_("scalar index out of range"));
1406 return FAIL;
1407 }
1408
1409 if (type)
1410 *type = atype.eltype;
1411
1412 *ccp = str;
1413
1414 return reg * 16 + atype.index;
1415 }
1416
1417 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1418 static long
1419 parse_reg_list (char ** strp)
1420 {
1421 char * str = * strp;
1422 long range = 0;
1423 int another_range;
1424
1425 /* We come back here if we get ranges concatenated by '+' or '|'. */
1426 do
1427 {
1428 another_range = 0;
1429
1430 if (*str == '{')
1431 {
1432 int in_range = 0;
1433 int cur_reg = -1;
1434
1435 str++;
1436 do
1437 {
1438 int reg;
1439
1440 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1441 {
1442 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1443 return FAIL;
1444 }
1445
1446 if (in_range)
1447 {
1448 int i;
1449
1450 if (reg <= cur_reg)
1451 {
1452 first_error (_("bad range in register list"));
1453 return FAIL;
1454 }
1455
1456 for (i = cur_reg + 1; i < reg; i++)
1457 {
1458 if (range & (1 << i))
1459 as_tsktsk
1460 (_("Warning: duplicated register (r%d) in register list"),
1461 i);
1462 else
1463 range |= 1 << i;
1464 }
1465 in_range = 0;
1466 }
1467
1468 if (range & (1 << reg))
1469 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1470 reg);
1471 else if (reg <= cur_reg)
1472 as_tsktsk (_("Warning: register range not in ascending order"));
1473
1474 range |= 1 << reg;
1475 cur_reg = reg;
1476 }
1477 while (skip_past_comma (&str) != FAIL
1478 || (in_range = 1, *str++ == '-'));
1479 str--;
1480
1481 if (*str++ != '}')
1482 {
1483 first_error (_("missing `}'"));
1484 return FAIL;
1485 }
1486 }
1487 else
1488 {
1489 expressionS expr;
1490
1491 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1492 return FAIL;
1493
1494 if (expr.X_op == O_constant)
1495 {
1496 if (expr.X_add_number
1497 != (expr.X_add_number & 0x0000ffff))
1498 {
1499 inst.error = _("invalid register mask");
1500 return FAIL;
1501 }
1502
1503 if ((range & expr.X_add_number) != 0)
1504 {
1505 int regno = range & expr.X_add_number;
1506
1507 regno &= -regno;
1508 regno = (1 << regno) - 1;
1509 as_tsktsk
1510 (_("Warning: duplicated register (r%d) in register list"),
1511 regno);
1512 }
1513
1514 range |= expr.X_add_number;
1515 }
1516 else
1517 {
1518 if (inst.reloc.type != 0)
1519 {
1520 inst.error = _("expression too complex");
1521 return FAIL;
1522 }
1523
1524 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1525 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1526 inst.reloc.pc_rel = 0;
1527 }
1528 }
1529
1530 if (*str == '|' || *str == '+')
1531 {
1532 str++;
1533 another_range = 1;
1534 }
1535 }
1536 while (another_range);
1537
1538 *strp = str;
1539 return range;
1540 }
1541
1542 /* Types of registers in a list. */
1543
1544 enum reg_list_els
1545 {
1546 REGLIST_VFP_S,
1547 REGLIST_VFP_D,
1548 REGLIST_NEON_D
1549 };
1550
1551 /* Parse a VFP register list. If the string is invalid return FAIL.
1552 Otherwise return the number of registers, and set PBASE to the first
1553 register. Parses registers of type ETYPE.
1554 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1555 - Q registers can be used to specify pairs of D registers
1556 - { } can be omitted from around a singleton register list
1557 FIXME: This is not implemented, as it would require backtracking in
1558 some cases, e.g.:
1559 vtbl.8 d3,d4,d5
1560 This could be done (the meaning isn't really ambiguous), but doesn't
1561 fit in well with the current parsing framework.
1562 - 32 D registers may be used (also true for VFPv3).
1563 FIXME: Types are ignored in these register lists, which is probably a
1564 bug. */
1565
1566 static int
1567 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1568 {
1569 char *str = *ccp;
1570 int base_reg;
1571 int new_base;
1572 enum arm_reg_type regtype = 0;
1573 int max_regs = 0;
1574 int count = 0;
1575 int warned = 0;
1576 unsigned long mask = 0;
1577 int i;
1578
1579 if (*str != '{')
1580 {
1581 inst.error = _("expecting {");
1582 return FAIL;
1583 }
1584
1585 str++;
1586
1587 switch (etype)
1588 {
1589 case REGLIST_VFP_S:
1590 regtype = REG_TYPE_VFS;
1591 max_regs = 32;
1592 break;
1593
1594 case REGLIST_VFP_D:
1595 regtype = REG_TYPE_VFD;
1596 break;
1597
1598 case REGLIST_NEON_D:
1599 regtype = REG_TYPE_NDQ;
1600 break;
1601 }
1602
1603 if (etype != REGLIST_VFP_S)
1604 {
1605 /* VFPv3 allows 32 D registers. */
1606 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1607 {
1608 max_regs = 32;
1609 if (thumb_mode)
1610 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1611 fpu_vfp_ext_v3);
1612 else
1613 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1614 fpu_vfp_ext_v3);
1615 }
1616 else
1617 max_regs = 16;
1618 }
1619
1620 base_reg = max_regs;
1621
1622 do
1623 {
1624 int setmask = 1, addregs = 1;
1625
1626 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1627
1628 if (new_base == FAIL)
1629 {
1630 first_error (_(reg_expected_msgs[regtype]));
1631 return FAIL;
1632 }
1633
1634 if (new_base >= max_regs)
1635 {
1636 first_error (_("register out of range in list"));
1637 return FAIL;
1638 }
1639
1640 /* Note: a value of 2 * n is returned for the register Q<n>. */
1641 if (regtype == REG_TYPE_NQ)
1642 {
1643 setmask = 3;
1644 addregs = 2;
1645 }
1646
1647 if (new_base < base_reg)
1648 base_reg = new_base;
1649
1650 if (mask & (setmask << new_base))
1651 {
1652 first_error (_("invalid register list"));
1653 return FAIL;
1654 }
1655
1656 if ((mask >> new_base) != 0 && ! warned)
1657 {
1658 as_tsktsk (_("register list not in ascending order"));
1659 warned = 1;
1660 }
1661
1662 mask |= setmask << new_base;
1663 count += addregs;
1664
1665 if (*str == '-') /* We have the start of a range expression */
1666 {
1667 int high_range;
1668
1669 str++;
1670
1671 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1672 == FAIL)
1673 {
1674 inst.error = gettext (reg_expected_msgs[regtype]);
1675 return FAIL;
1676 }
1677
1678 if (high_range >= max_regs)
1679 {
1680 first_error (_("register out of range in list"));
1681 return FAIL;
1682 }
1683
1684 if (regtype == REG_TYPE_NQ)
1685 high_range = high_range + 1;
1686
1687 if (high_range <= new_base)
1688 {
1689 inst.error = _("register range not in ascending order");
1690 return FAIL;
1691 }
1692
1693 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1694 {
1695 if (mask & (setmask << new_base))
1696 {
1697 inst.error = _("invalid register list");
1698 return FAIL;
1699 }
1700
1701 mask |= setmask << new_base;
1702 count += addregs;
1703 }
1704 }
1705 }
1706 while (skip_past_comma (&str) != FAIL);
1707
1708 str++;
1709
1710 /* Sanity check -- should have raised a parse error above. */
1711 if (count == 0 || count > max_regs)
1712 abort ();
1713
1714 *pbase = base_reg;
1715
1716 /* Final test -- the registers must be consecutive. */
1717 mask >>= base_reg;
1718 for (i = 0; i < count; i++)
1719 {
1720 if ((mask & (1u << i)) == 0)
1721 {
1722 inst.error = _("non-contiguous register range");
1723 return FAIL;
1724 }
1725 }
1726
1727 *ccp = str;
1728
1729 return count;
1730 }
1731
1732 /* True if two alias types are the same. */
1733
1734 static int
1735 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1736 {
1737 if (!a && !b)
1738 return 1;
1739
1740 if (!a || !b)
1741 return 0;
1742
1743 if (a->defined != b->defined)
1744 return 0;
1745
1746 if ((a->defined & NTA_HASTYPE) != 0
1747 && (a->eltype.type != b->eltype.type
1748 || a->eltype.size != b->eltype.size))
1749 return 0;
1750
1751 if ((a->defined & NTA_HASINDEX) != 0
1752 && (a->index != b->index))
1753 return 0;
1754
1755 return 1;
1756 }
1757
1758 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1759 The base register is put in *PBASE.
1760 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1761 the return value.
1762 The register stride (minus one) is put in bit 4 of the return value.
1763 Bits [6:5] encode the list length (minus one).
1764 The type of the list elements is put in *ELTYPE, if non-NULL. */
1765
1766 #define NEON_LANE(X) ((X) & 0xf)
1767 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1768 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1769
1770 static int
1771 parse_neon_el_struct_list (char **str, unsigned *pbase,
1772 struct neon_type_el *eltype)
1773 {
1774 char *ptr = *str;
1775 int base_reg = -1;
1776 int reg_incr = -1;
1777 int count = 0;
1778 int lane = -1;
1779 int leading_brace = 0;
1780 enum arm_reg_type rtype = REG_TYPE_NDQ;
1781 int addregs = 1;
1782 const char *const incr_error = "register stride must be 1 or 2";
1783 const char *const type_error = "mismatched element/structure types in list";
1784 struct neon_typed_alias firsttype;
1785
1786 if (skip_past_char (&ptr, '{') == SUCCESS)
1787 leading_brace = 1;
1788
1789 do
1790 {
1791 struct neon_typed_alias atype;
1792 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1793
1794 if (getreg == FAIL)
1795 {
1796 first_error (_(reg_expected_msgs[rtype]));
1797 return FAIL;
1798 }
1799
1800 if (base_reg == -1)
1801 {
1802 base_reg = getreg;
1803 if (rtype == REG_TYPE_NQ)
1804 {
1805 reg_incr = 1;
1806 addregs = 2;
1807 }
1808 firsttype = atype;
1809 }
1810 else if (reg_incr == -1)
1811 {
1812 reg_incr = getreg - base_reg;
1813 if (reg_incr < 1 || reg_incr > 2)
1814 {
1815 first_error (_(incr_error));
1816 return FAIL;
1817 }
1818 }
1819 else if (getreg != base_reg + reg_incr * count)
1820 {
1821 first_error (_(incr_error));
1822 return FAIL;
1823 }
1824
1825 if (!neon_alias_types_same (&atype, &firsttype))
1826 {
1827 first_error (_(type_error));
1828 return FAIL;
1829 }
1830
1831 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1832 modes. */
1833 if (ptr[0] == '-')
1834 {
1835 struct neon_typed_alias htype;
1836 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1837 if (lane == -1)
1838 lane = NEON_INTERLEAVE_LANES;
1839 else if (lane != NEON_INTERLEAVE_LANES)
1840 {
1841 first_error (_(type_error));
1842 return FAIL;
1843 }
1844 if (reg_incr == -1)
1845 reg_incr = 1;
1846 else if (reg_incr != 1)
1847 {
1848 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1849 return FAIL;
1850 }
1851 ptr++;
1852 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1853 if (hireg == FAIL)
1854 {
1855 first_error (_(reg_expected_msgs[rtype]));
1856 return FAIL;
1857 }
1858 if (!neon_alias_types_same (&htype, &firsttype))
1859 {
1860 first_error (_(type_error));
1861 return FAIL;
1862 }
1863 count += hireg + dregs - getreg;
1864 continue;
1865 }
1866
1867 /* If we're using Q registers, we can't use [] or [n] syntax. */
1868 if (rtype == REG_TYPE_NQ)
1869 {
1870 count += 2;
1871 continue;
1872 }
1873
1874 if ((atype.defined & NTA_HASINDEX) != 0)
1875 {
1876 if (lane == -1)
1877 lane = atype.index;
1878 else if (lane != atype.index)
1879 {
1880 first_error (_(type_error));
1881 return FAIL;
1882 }
1883 }
1884 else if (lane == -1)
1885 lane = NEON_INTERLEAVE_LANES;
1886 else if (lane != NEON_INTERLEAVE_LANES)
1887 {
1888 first_error (_(type_error));
1889 return FAIL;
1890 }
1891 count++;
1892 }
1893 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1894
1895 /* No lane set by [x]. We must be interleaving structures. */
1896 if (lane == -1)
1897 lane = NEON_INTERLEAVE_LANES;
1898
1899 /* Sanity check. */
1900 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1901 || (count > 1 && reg_incr == -1))
1902 {
1903 first_error (_("error parsing element/structure list"));
1904 return FAIL;
1905 }
1906
1907 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1908 {
1909 first_error (_("expected }"));
1910 return FAIL;
1911 }
1912
1913 if (reg_incr == -1)
1914 reg_incr = 1;
1915
1916 if (eltype)
1917 *eltype = firsttype.eltype;
1918
1919 *pbase = base_reg;
1920 *str = ptr;
1921
1922 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1923 }
1924
1925 /* Parse an explicit relocation suffix on an expression. This is
1926 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1927 arm_reloc_hsh contains no entries, so this function can only
1928 succeed if there is no () after the word. Returns -1 on error,
1929 BFD_RELOC_UNUSED if there wasn't any suffix. */
1930 static int
1931 parse_reloc (char **str)
1932 {
1933 struct reloc_entry *r;
1934 char *p, *q;
1935
1936 if (**str != '(')
1937 return BFD_RELOC_UNUSED;
1938
1939 p = *str + 1;
1940 q = p;
1941
1942 while (*q && *q != ')' && *q != ',')
1943 q++;
1944 if (*q != ')')
1945 return -1;
1946
1947 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1948 return -1;
1949
1950 *str = q + 1;
1951 return r->reloc;
1952 }
1953
1954 /* Directives: register aliases. */
1955
1956 static struct reg_entry *
1957 insert_reg_alias (char *str, int number, int type)
1958 {
1959 struct reg_entry *new;
1960 const char *name;
1961
1962 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1963 {
1964 if (new->builtin)
1965 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1966
1967 /* Only warn about a redefinition if it's not defined as the
1968 same register. */
1969 else if (new->number != number || new->type != type)
1970 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1971
1972 return 0;
1973 }
1974
1975 name = xstrdup (str);
1976 new = xmalloc (sizeof (struct reg_entry));
1977
1978 new->name = name;
1979 new->number = number;
1980 new->type = type;
1981 new->builtin = FALSE;
1982 new->neon = NULL;
1983
1984 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1985 abort ();
1986
1987 return new;
1988 }
1989
1990 static void
1991 insert_neon_reg_alias (char *str, int number, int type,
1992 struct neon_typed_alias *atype)
1993 {
1994 struct reg_entry *reg = insert_reg_alias (str, number, type);
1995
1996 if (!reg)
1997 {
1998 first_error (_("attempt to redefine typed alias"));
1999 return;
2000 }
2001
2002 if (atype)
2003 {
2004 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2005 *reg->neon = *atype;
2006 }
2007 }
2008
2009 /* Look for the .req directive. This is of the form:
2010
2011 new_register_name .req existing_register_name
2012
2013 If we find one, or if it looks sufficiently like one that we want to
2014 handle any error here, return non-zero. Otherwise return zero. */
2015
2016 static int
2017 create_register_alias (char * newname, char *p)
2018 {
2019 struct reg_entry *old;
2020 char *oldname, *nbuf;
2021 size_t nlen;
2022
2023 /* The input scrubber ensures that whitespace after the mnemonic is
2024 collapsed to single spaces. */
2025 oldname = p;
2026 if (strncmp (oldname, " .req ", 6) != 0)
2027 return 0;
2028
2029 oldname += 6;
2030 if (*oldname == '\0')
2031 return 0;
2032
2033 old = hash_find (arm_reg_hsh, oldname);
2034 if (!old)
2035 {
2036 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2037 return 1;
2038 }
2039
2040 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2041 the desired alias name, and p points to its end. If not, then
2042 the desired alias name is in the global original_case_string. */
2043 #ifdef TC_CASE_SENSITIVE
2044 nlen = p - newname;
2045 #else
2046 newname = original_case_string;
2047 nlen = strlen (newname);
2048 #endif
2049
2050 nbuf = alloca (nlen + 1);
2051 memcpy (nbuf, newname, nlen);
2052 nbuf[nlen] = '\0';
2053
2054 /* Create aliases under the new name as stated; an all-lowercase
2055 version of the new name; and an all-uppercase version of the new
2056 name. */
2057 insert_reg_alias (nbuf, old->number, old->type);
2058
2059 for (p = nbuf; *p; p++)
2060 *p = TOUPPER (*p);
2061
2062 if (strncmp (nbuf, newname, nlen))
2063 insert_reg_alias (nbuf, old->number, old->type);
2064
2065 for (p = nbuf; *p; p++)
2066 *p = TOLOWER (*p);
2067
2068 if (strncmp (nbuf, newname, nlen))
2069 insert_reg_alias (nbuf, old->number, old->type);
2070
2071 return 1;
2072 }
2073
2074 /* Create a Neon typed/indexed register alias using directives, e.g.:
2075 X .dn d5.s32[1]
2076 Y .qn 6.s16
2077 Z .dn d7
2078 T .dn Z[0]
2079 These typed registers can be used instead of the types specified after the
2080 Neon mnemonic, so long as all operands given have types. Types can also be
2081 specified directly, e.g.:
2082 vadd d0.s32, d1.s32, d2.s32
2083 */
2084
2085 static int
2086 create_neon_reg_alias (char *newname, char *p)
2087 {
2088 enum arm_reg_type basetype;
2089 struct reg_entry *basereg;
2090 struct reg_entry mybasereg;
2091 struct neon_type ntype;
2092 struct neon_typed_alias typeinfo;
2093 char *namebuf, *nameend;
2094 int namelen;
2095
2096 typeinfo.defined = 0;
2097 typeinfo.eltype.type = NT_invtype;
2098 typeinfo.eltype.size = -1;
2099 typeinfo.index = -1;
2100
2101 nameend = p;
2102
2103 if (strncmp (p, " .dn ", 5) == 0)
2104 basetype = REG_TYPE_VFD;
2105 else if (strncmp (p, " .qn ", 5) == 0)
2106 basetype = REG_TYPE_NQ;
2107 else
2108 return 0;
2109
2110 p += 5;
2111
2112 if (*p == '\0')
2113 return 0;
2114
2115 basereg = arm_reg_parse_multi (&p);
2116
2117 if (basereg && basereg->type != basetype)
2118 {
2119 as_bad (_("bad type for register"));
2120 return 0;
2121 }
2122
2123 if (basereg == NULL)
2124 {
2125 expressionS exp;
2126 /* Try parsing as an integer. */
2127 my_get_expression (&exp, &p, GE_NO_PREFIX);
2128 if (exp.X_op != O_constant)
2129 {
2130 as_bad (_("expression must be constant"));
2131 return 0;
2132 }
2133 basereg = &mybasereg;
2134 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2135 : exp.X_add_number;
2136 basereg->neon = 0;
2137 }
2138
2139 if (basereg->neon)
2140 typeinfo = *basereg->neon;
2141
2142 if (parse_neon_type (&ntype, &p) == SUCCESS)
2143 {
2144 /* We got a type. */
2145 if (typeinfo.defined & NTA_HASTYPE)
2146 {
2147 as_bad (_("can't redefine the type of a register alias"));
2148 return 0;
2149 }
2150
2151 typeinfo.defined |= NTA_HASTYPE;
2152 if (ntype.elems != 1)
2153 {
2154 as_bad (_("you must specify a single type only"));
2155 return 0;
2156 }
2157 typeinfo.eltype = ntype.el[0];
2158 }
2159
2160 if (skip_past_char (&p, '[') == SUCCESS)
2161 {
2162 expressionS exp;
2163 /* We got a scalar index. */
2164
2165 if (typeinfo.defined & NTA_HASINDEX)
2166 {
2167 as_bad (_("can't redefine the index of a scalar alias"));
2168 return 0;
2169 }
2170
2171 my_get_expression (&exp, &p, GE_NO_PREFIX);
2172
2173 if (exp.X_op != O_constant)
2174 {
2175 as_bad (_("scalar index must be constant"));
2176 return 0;
2177 }
2178
2179 typeinfo.defined |= NTA_HASINDEX;
2180 typeinfo.index = exp.X_add_number;
2181
2182 if (skip_past_char (&p, ']') == FAIL)
2183 {
2184 as_bad (_("expecting ]"));
2185 return 0;
2186 }
2187 }
2188
2189 namelen = nameend - newname;
2190 namebuf = alloca (namelen + 1);
2191 strncpy (namebuf, newname, namelen);
2192 namebuf[namelen] = '\0';
2193
2194 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2195 typeinfo.defined != 0 ? &typeinfo : NULL);
2196
2197 /* Insert name in all uppercase. */
2198 for (p = namebuf; *p; p++)
2199 *p = TOUPPER (*p);
2200
2201 if (strncmp (namebuf, newname, namelen))
2202 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2203 typeinfo.defined != 0 ? &typeinfo : NULL);
2204
2205 /* Insert name in all lowercase. */
2206 for (p = namebuf; *p; p++)
2207 *p = TOLOWER (*p);
2208
2209 if (strncmp (namebuf, newname, namelen))
2210 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2211 typeinfo.defined != 0 ? &typeinfo : NULL);
2212
2213 return 1;
2214 }
2215
2216 /* Should never be called, as .req goes between the alias and the
2217 register name, not at the beginning of the line. */
2218 static void
2219 s_req (int a ATTRIBUTE_UNUSED)
2220 {
2221 as_bad (_("invalid syntax for .req directive"));
2222 }
2223
2224 static void
2225 s_dn (int a ATTRIBUTE_UNUSED)
2226 {
2227 as_bad (_("invalid syntax for .dn directive"));
2228 }
2229
2230 static void
2231 s_qn (int a ATTRIBUTE_UNUSED)
2232 {
2233 as_bad (_("invalid syntax for .qn directive"));
2234 }
2235
2236 /* The .unreq directive deletes an alias which was previously defined
2237 by .req. For example:
2238
2239 my_alias .req r11
2240 .unreq my_alias */
2241
2242 static void
2243 s_unreq (int a ATTRIBUTE_UNUSED)
2244 {
2245 char * name;
2246 char saved_char;
2247
2248 name = input_line_pointer;
2249
2250 while (*input_line_pointer != 0
2251 && *input_line_pointer != ' '
2252 && *input_line_pointer != '\n')
2253 ++input_line_pointer;
2254
2255 saved_char = *input_line_pointer;
2256 *input_line_pointer = 0;
2257
2258 if (!*name)
2259 as_bad (_("invalid syntax for .unreq directive"));
2260 else
2261 {
2262 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2263
2264 if (!reg)
2265 as_bad (_("unknown register alias '%s'"), name);
2266 else if (reg->builtin)
2267 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2268 name);
2269 else
2270 {
2271 hash_delete (arm_reg_hsh, name);
2272 free ((char *) reg->name);
2273 if (reg->neon)
2274 free (reg->neon);
2275 free (reg);
2276 }
2277 }
2278
2279 *input_line_pointer = saved_char;
2280 demand_empty_rest_of_line ();
2281 }
2282
2283 /* Directives: Instruction set selection. */
2284
2285 #ifdef OBJ_ELF
2286 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2287 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2288 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2289 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2290
2291 static enum mstate mapstate = MAP_UNDEFINED;
2292
2293 void
2294 mapping_state (enum mstate state)
2295 {
2296 symbolS * symbolP;
2297 const char * symname;
2298 int type;
2299
2300 if (mapstate == state)
2301 /* The mapping symbol has already been emitted.
2302 There is nothing else to do. */
2303 return;
2304
2305 mapstate = state;
2306
2307 switch (state)
2308 {
2309 case MAP_DATA:
2310 symname = "$d";
2311 type = BSF_NO_FLAGS;
2312 break;
2313 case MAP_ARM:
2314 symname = "$a";
2315 type = BSF_NO_FLAGS;
2316 break;
2317 case MAP_THUMB:
2318 symname = "$t";
2319 type = BSF_NO_FLAGS;
2320 break;
2321 case MAP_UNDEFINED:
2322 return;
2323 default:
2324 abort ();
2325 }
2326
2327 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2328
2329 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2330 symbol_table_insert (symbolP);
2331 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2332
2333 switch (state)
2334 {
2335 case MAP_ARM:
2336 THUMB_SET_FUNC (symbolP, 0);
2337 ARM_SET_THUMB (symbolP, 0);
2338 ARM_SET_INTERWORK (symbolP, support_interwork);
2339 break;
2340
2341 case MAP_THUMB:
2342 THUMB_SET_FUNC (symbolP, 1);
2343 ARM_SET_THUMB (symbolP, 1);
2344 ARM_SET_INTERWORK (symbolP, support_interwork);
2345 break;
2346
2347 case MAP_DATA:
2348 default:
2349 return;
2350 }
2351 }
2352 #else
2353 #define mapping_state(x) /* nothing */
2354 #endif
2355
2356 /* Find the real, Thumb encoded start of a Thumb function. */
2357
2358 static symbolS *
2359 find_real_start (symbolS * symbolP)
2360 {
2361 char * real_start;
2362 const char * name = S_GET_NAME (symbolP);
2363 symbolS * new_target;
2364
2365 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2366 #define STUB_NAME ".real_start_of"
2367
2368 if (name == NULL)
2369 abort ();
2370
2371 /* The compiler may generate BL instructions to local labels because
2372 it needs to perform a branch to a far away location. These labels
2373 do not have a corresponding ".real_start_of" label. We check
2374 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2375 the ".real_start_of" convention for nonlocal branches. */
2376 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2377 return symbolP;
2378
2379 real_start = ACONCAT ((STUB_NAME, name, NULL));
2380 new_target = symbol_find (real_start);
2381
2382 if (new_target == NULL)
2383 {
2384 as_warn ("Failed to find real start of function: %s\n", name);
2385 new_target = symbolP;
2386 }
2387
2388 return new_target;
2389 }
2390
2391 static void
2392 opcode_select (int width)
2393 {
2394 switch (width)
2395 {
2396 case 16:
2397 if (! thumb_mode)
2398 {
2399 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2400 as_bad (_("selected processor does not support THUMB opcodes"));
2401
2402 thumb_mode = 1;
2403 /* No need to force the alignment, since we will have been
2404 coming from ARM mode, which is word-aligned. */
2405 record_alignment (now_seg, 1);
2406 }
2407 mapping_state (MAP_THUMB);
2408 break;
2409
2410 case 32:
2411 if (thumb_mode)
2412 {
2413 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2414 as_bad (_("selected processor does not support ARM opcodes"));
2415
2416 thumb_mode = 0;
2417
2418 if (!need_pass_2)
2419 frag_align (2, 0, 0);
2420
2421 record_alignment (now_seg, 1);
2422 }
2423 mapping_state (MAP_ARM);
2424 break;
2425
2426 default:
2427 as_bad (_("invalid instruction size selected (%d)"), width);
2428 }
2429 }
2430
2431 static void
2432 s_arm (int ignore ATTRIBUTE_UNUSED)
2433 {
2434 opcode_select (32);
2435 demand_empty_rest_of_line ();
2436 }
2437
2438 static void
2439 s_thumb (int ignore ATTRIBUTE_UNUSED)
2440 {
2441 opcode_select (16);
2442 demand_empty_rest_of_line ();
2443 }
2444
2445 static void
2446 s_code (int unused ATTRIBUTE_UNUSED)
2447 {
2448 int temp;
2449
2450 temp = get_absolute_expression ();
2451 switch (temp)
2452 {
2453 case 16:
2454 case 32:
2455 opcode_select (temp);
2456 break;
2457
2458 default:
2459 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2460 }
2461 }
2462
2463 static void
2464 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2465 {
2466 /* If we are not already in thumb mode go into it, EVEN if
2467 the target processor does not support thumb instructions.
2468 This is used by gcc/config/arm/lib1funcs.asm for example
2469 to compile interworking support functions even if the
2470 target processor should not support interworking. */
2471 if (! thumb_mode)
2472 {
2473 thumb_mode = 2;
2474 record_alignment (now_seg, 1);
2475 }
2476
2477 demand_empty_rest_of_line ();
2478 }
2479
2480 static void
2481 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2482 {
2483 s_thumb (0);
2484
2485 /* The following label is the name/address of the start of a Thumb function.
2486 We need to know this for the interworking support. */
2487 label_is_thumb_function_name = TRUE;
2488 }
2489
2490 /* Perform a .set directive, but also mark the alias as
2491 being a thumb function. */
2492
2493 static void
2494 s_thumb_set (int equiv)
2495 {
2496 /* XXX the following is a duplicate of the code for s_set() in read.c
2497 We cannot just call that code as we need to get at the symbol that
2498 is created. */
2499 char * name;
2500 char delim;
2501 char * end_name;
2502 symbolS * symbolP;
2503
2504 /* Especial apologies for the random logic:
2505 This just grew, and could be parsed much more simply!
2506 Dean - in haste. */
2507 name = input_line_pointer;
2508 delim = get_symbol_end ();
2509 end_name = input_line_pointer;
2510 *end_name = delim;
2511
2512 if (*input_line_pointer != ',')
2513 {
2514 *end_name = 0;
2515 as_bad (_("expected comma after name \"%s\""), name);
2516 *end_name = delim;
2517 ignore_rest_of_line ();
2518 return;
2519 }
2520
2521 input_line_pointer++;
2522 *end_name = 0;
2523
2524 if (name[0] == '.' && name[1] == '\0')
2525 {
2526 /* XXX - this should not happen to .thumb_set. */
2527 abort ();
2528 }
2529
2530 if ((symbolP = symbol_find (name)) == NULL
2531 && (symbolP = md_undefined_symbol (name)) == NULL)
2532 {
2533 #ifndef NO_LISTING
2534 /* When doing symbol listings, play games with dummy fragments living
2535 outside the normal fragment chain to record the file and line info
2536 for this symbol. */
2537 if (listing & LISTING_SYMBOLS)
2538 {
2539 extern struct list_info_struct * listing_tail;
2540 fragS * dummy_frag = xmalloc (sizeof (fragS));
2541
2542 memset (dummy_frag, 0, sizeof (fragS));
2543 dummy_frag->fr_type = rs_fill;
2544 dummy_frag->line = listing_tail;
2545 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2546 dummy_frag->fr_symbol = symbolP;
2547 }
2548 else
2549 #endif
2550 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2551
2552 #ifdef OBJ_COFF
2553 /* "set" symbols are local unless otherwise specified. */
2554 SF_SET_LOCAL (symbolP);
2555 #endif /* OBJ_COFF */
2556 } /* Make a new symbol. */
2557
2558 symbol_table_insert (symbolP);
2559
2560 * end_name = delim;
2561
2562 if (equiv
2563 && S_IS_DEFINED (symbolP)
2564 && S_GET_SEGMENT (symbolP) != reg_section)
2565 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2566
2567 pseudo_set (symbolP);
2568
2569 demand_empty_rest_of_line ();
2570
2571 /* XXX Now we come to the Thumb specific bit of code. */
2572
2573 THUMB_SET_FUNC (symbolP, 1);
2574 ARM_SET_THUMB (symbolP, 1);
2575 #if defined OBJ_ELF || defined OBJ_COFF
2576 ARM_SET_INTERWORK (symbolP, support_interwork);
2577 #endif
2578 }
2579
2580 /* Directives: Mode selection. */
2581
2582 /* .syntax [unified|divided] - choose the new unified syntax
2583 (same for Arm and Thumb encoding, modulo slight differences in what
2584 can be represented) or the old divergent syntax for each mode. */
2585 static void
2586 s_syntax (int unused ATTRIBUTE_UNUSED)
2587 {
2588 char *name, delim;
2589
2590 name = input_line_pointer;
2591 delim = get_symbol_end ();
2592
2593 if (!strcasecmp (name, "unified"))
2594 unified_syntax = TRUE;
2595 else if (!strcasecmp (name, "divided"))
2596 unified_syntax = FALSE;
2597 else
2598 {
2599 as_bad (_("unrecognized syntax mode \"%s\""), name);
2600 return;
2601 }
2602 *input_line_pointer = delim;
2603 demand_empty_rest_of_line ();
2604 }
2605
2606 /* Directives: sectioning and alignment. */
2607
2608 /* Same as s_align_ptwo but align 0 => align 2. */
2609
2610 static void
2611 s_align (int unused ATTRIBUTE_UNUSED)
2612 {
2613 int temp;
2614 long temp_fill;
2615 long max_alignment = 15;
2616
2617 temp = get_absolute_expression ();
2618 if (temp > max_alignment)
2619 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2620 else if (temp < 0)
2621 {
2622 as_bad (_("alignment negative. 0 assumed."));
2623 temp = 0;
2624 }
2625
2626 if (*input_line_pointer == ',')
2627 {
2628 input_line_pointer++;
2629 temp_fill = get_absolute_expression ();
2630 }
2631 else
2632 temp_fill = 0;
2633
2634 if (!temp)
2635 temp = 2;
2636
2637 /* Only make a frag if we HAVE to. */
2638 if (temp && !need_pass_2)
2639 frag_align (temp, (int) temp_fill, 0);
2640 demand_empty_rest_of_line ();
2641
2642 record_alignment (now_seg, temp);
2643 }
2644
2645 static void
2646 s_bss (int ignore ATTRIBUTE_UNUSED)
2647 {
2648 /* We don't support putting frags in the BSS segment, we fake it by
2649 marking in_bss, then looking at s_skip for clues. */
2650 subseg_set (bss_section, 0);
2651 demand_empty_rest_of_line ();
2652 mapping_state (MAP_DATA);
2653 }
2654
2655 static void
2656 s_even (int ignore ATTRIBUTE_UNUSED)
2657 {
2658 /* Never make frag if expect extra pass. */
2659 if (!need_pass_2)
2660 frag_align (1, 0, 0);
2661
2662 record_alignment (now_seg, 1);
2663
2664 demand_empty_rest_of_line ();
2665 }
2666
2667 /* Directives: Literal pools. */
2668
2669 static literal_pool *
2670 find_literal_pool (void)
2671 {
2672 literal_pool * pool;
2673
2674 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2675 {
2676 if (pool->section == now_seg
2677 && pool->sub_section == now_subseg)
2678 break;
2679 }
2680
2681 return pool;
2682 }
2683
2684 static literal_pool *
2685 find_or_make_literal_pool (void)
2686 {
2687 /* Next literal pool ID number. */
2688 static unsigned int latest_pool_num = 1;
2689 literal_pool * pool;
2690
2691 pool = find_literal_pool ();
2692
2693 if (pool == NULL)
2694 {
2695 /* Create a new pool. */
2696 pool = xmalloc (sizeof (* pool));
2697 if (! pool)
2698 return NULL;
2699
2700 pool->next_free_entry = 0;
2701 pool->section = now_seg;
2702 pool->sub_section = now_subseg;
2703 pool->next = list_of_pools;
2704 pool->symbol = NULL;
2705
2706 /* Add it to the list. */
2707 list_of_pools = pool;
2708 }
2709
2710 /* New pools, and emptied pools, will have a NULL symbol. */
2711 if (pool->symbol == NULL)
2712 {
2713 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2714 (valueT) 0, &zero_address_frag);
2715 pool->id = latest_pool_num ++;
2716 }
2717
2718 /* Done. */
2719 return pool;
2720 }
2721
2722 /* Add the literal in the global 'inst'
2723 structure to the relevent literal pool. */
2724
2725 static int
2726 add_to_lit_pool (void)
2727 {
2728 literal_pool * pool;
2729 unsigned int entry;
2730
2731 pool = find_or_make_literal_pool ();
2732
2733 /* Check if this literal value is already in the pool. */
2734 for (entry = 0; entry < pool->next_free_entry; entry ++)
2735 {
2736 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2737 && (inst.reloc.exp.X_op == O_constant)
2738 && (pool->literals[entry].X_add_number
2739 == inst.reloc.exp.X_add_number)
2740 && (pool->literals[entry].X_unsigned
2741 == inst.reloc.exp.X_unsigned))
2742 break;
2743
2744 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2745 && (inst.reloc.exp.X_op == O_symbol)
2746 && (pool->literals[entry].X_add_number
2747 == inst.reloc.exp.X_add_number)
2748 && (pool->literals[entry].X_add_symbol
2749 == inst.reloc.exp.X_add_symbol)
2750 && (pool->literals[entry].X_op_symbol
2751 == inst.reloc.exp.X_op_symbol))
2752 break;
2753 }
2754
2755 /* Do we need to create a new entry? */
2756 if (entry == pool->next_free_entry)
2757 {
2758 if (entry >= MAX_LITERAL_POOL_SIZE)
2759 {
2760 inst.error = _("literal pool overflow");
2761 return FAIL;
2762 }
2763
2764 pool->literals[entry] = inst.reloc.exp;
2765 pool->next_free_entry += 1;
2766 }
2767
2768 inst.reloc.exp.X_op = O_symbol;
2769 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2770 inst.reloc.exp.X_add_symbol = pool->symbol;
2771
2772 return SUCCESS;
2773 }
2774
2775 /* Can't use symbol_new here, so have to create a symbol and then at
2776 a later date assign it a value. Thats what these functions do. */
2777
2778 static void
2779 symbol_locate (symbolS * symbolP,
2780 const char * name, /* It is copied, the caller can modify. */
2781 segT segment, /* Segment identifier (SEG_<something>). */
2782 valueT valu, /* Symbol value. */
2783 fragS * frag) /* Associated fragment. */
2784 {
2785 unsigned int name_length;
2786 char * preserved_copy_of_name;
2787
2788 name_length = strlen (name) + 1; /* +1 for \0. */
2789 obstack_grow (&notes, name, name_length);
2790 preserved_copy_of_name = obstack_finish (&notes);
2791
2792 #ifdef tc_canonicalize_symbol_name
2793 preserved_copy_of_name =
2794 tc_canonicalize_symbol_name (preserved_copy_of_name);
2795 #endif
2796
2797 S_SET_NAME (symbolP, preserved_copy_of_name);
2798
2799 S_SET_SEGMENT (symbolP, segment);
2800 S_SET_VALUE (symbolP, valu);
2801 symbol_clear_list_pointers (symbolP);
2802
2803 symbol_set_frag (symbolP, frag);
2804
2805 /* Link to end of symbol chain. */
2806 {
2807 extern int symbol_table_frozen;
2808
2809 if (symbol_table_frozen)
2810 abort ();
2811 }
2812
2813 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2814
2815 obj_symbol_new_hook (symbolP);
2816
2817 #ifdef tc_symbol_new_hook
2818 tc_symbol_new_hook (symbolP);
2819 #endif
2820
2821 #ifdef DEBUG_SYMS
2822 verify_symbol_chain (symbol_rootP, symbol_lastP);
2823 #endif /* DEBUG_SYMS */
2824 }
2825
2826
2827 static void
2828 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2829 {
2830 unsigned int entry;
2831 literal_pool * pool;
2832 char sym_name[20];
2833
2834 pool = find_literal_pool ();
2835 if (pool == NULL
2836 || pool->symbol == NULL
2837 || pool->next_free_entry == 0)
2838 return;
2839
2840 mapping_state (MAP_DATA);
2841
2842 /* Align pool as you have word accesses.
2843 Only make a frag if we have to. */
2844 if (!need_pass_2)
2845 frag_align (2, 0, 0);
2846
2847 record_alignment (now_seg, 2);
2848
2849 sprintf (sym_name, "$$lit_\002%x", pool->id);
2850
2851 symbol_locate (pool->symbol, sym_name, now_seg,
2852 (valueT) frag_now_fix (), frag_now);
2853 symbol_table_insert (pool->symbol);
2854
2855 ARM_SET_THUMB (pool->symbol, thumb_mode);
2856
2857 #if defined OBJ_COFF || defined OBJ_ELF
2858 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2859 #endif
2860
2861 for (entry = 0; entry < pool->next_free_entry; entry ++)
2862 /* First output the expression in the instruction to the pool. */
2863 emit_expr (&(pool->literals[entry]), 4); /* .word */
2864
2865 /* Mark the pool as empty. */
2866 pool->next_free_entry = 0;
2867 pool->symbol = NULL;
2868 }
2869
2870 #ifdef OBJ_ELF
2871 /* Forward declarations for functions below, in the MD interface
2872 section. */
2873 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2874 static valueT create_unwind_entry (int);
2875 static void start_unwind_section (const segT, int);
2876 static void add_unwind_opcode (valueT, int);
2877 static void flush_pending_unwind (void);
2878
2879 /* Directives: Data. */
2880
2881 static void
2882 s_arm_elf_cons (int nbytes)
2883 {
2884 expressionS exp;
2885
2886 #ifdef md_flush_pending_output
2887 md_flush_pending_output ();
2888 #endif
2889
2890 if (is_it_end_of_statement ())
2891 {
2892 demand_empty_rest_of_line ();
2893 return;
2894 }
2895
2896 #ifdef md_cons_align
2897 md_cons_align (nbytes);
2898 #endif
2899
2900 mapping_state (MAP_DATA);
2901 do
2902 {
2903 int reloc;
2904 char *base = input_line_pointer;
2905
2906 expression (& exp);
2907
2908 if (exp.X_op != O_symbol)
2909 emit_expr (&exp, (unsigned int) nbytes);
2910 else
2911 {
2912 char *before_reloc = input_line_pointer;
2913 reloc = parse_reloc (&input_line_pointer);
2914 if (reloc == -1)
2915 {
2916 as_bad (_("unrecognized relocation suffix"));
2917 ignore_rest_of_line ();
2918 return;
2919 }
2920 else if (reloc == BFD_RELOC_UNUSED)
2921 emit_expr (&exp, (unsigned int) nbytes);
2922 else
2923 {
2924 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2925 int size = bfd_get_reloc_size (howto);
2926
2927 if (reloc == BFD_RELOC_ARM_PLT32)
2928 {
2929 as_bad (_("(plt) is only valid on branch targets"));
2930 reloc = BFD_RELOC_UNUSED;
2931 size = 0;
2932 }
2933
2934 if (size > nbytes)
2935 as_bad (_("%s relocations do not fit in %d bytes"),
2936 howto->name, nbytes);
2937 else
2938 {
2939 /* We've parsed an expression stopping at O_symbol.
2940 But there may be more expression left now that we
2941 have parsed the relocation marker. Parse it again.
2942 XXX Surely there is a cleaner way to do this. */
2943 char *p = input_line_pointer;
2944 int offset;
2945 char *save_buf = alloca (input_line_pointer - base);
2946 memcpy (save_buf, base, input_line_pointer - base);
2947 memmove (base + (input_line_pointer - before_reloc),
2948 base, before_reloc - base);
2949
2950 input_line_pointer = base + (input_line_pointer-before_reloc);
2951 expression (&exp);
2952 memcpy (base, save_buf, p - base);
2953
2954 offset = nbytes - size;
2955 p = frag_more ((int) nbytes);
2956 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2957 size, &exp, 0, reloc);
2958 }
2959 }
2960 }
2961 }
2962 while (*input_line_pointer++ == ',');
2963
2964 /* Put terminator back into stream. */
2965 input_line_pointer --;
2966 demand_empty_rest_of_line ();
2967 }
2968
2969
2970 /* Parse a .rel31 directive. */
2971
2972 static void
2973 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2974 {
2975 expressionS exp;
2976 char *p;
2977 valueT highbit;
2978
2979 highbit = 0;
2980 if (*input_line_pointer == '1')
2981 highbit = 0x80000000;
2982 else if (*input_line_pointer != '0')
2983 as_bad (_("expected 0 or 1"));
2984
2985 input_line_pointer++;
2986 if (*input_line_pointer != ',')
2987 as_bad (_("missing comma"));
2988 input_line_pointer++;
2989
2990 #ifdef md_flush_pending_output
2991 md_flush_pending_output ();
2992 #endif
2993
2994 #ifdef md_cons_align
2995 md_cons_align (4);
2996 #endif
2997
2998 mapping_state (MAP_DATA);
2999
3000 expression (&exp);
3001
3002 p = frag_more (4);
3003 md_number_to_chars (p, highbit, 4);
3004 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3005 BFD_RELOC_ARM_PREL31);
3006
3007 demand_empty_rest_of_line ();
3008 }
3009
3010 /* Directives: AEABI stack-unwind tables. */
3011
3012 /* Parse an unwind_fnstart directive. Simply records the current location. */
3013
3014 static void
3015 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3016 {
3017 demand_empty_rest_of_line ();
3018 /* Mark the start of the function. */
3019 unwind.proc_start = expr_build_dot ();
3020
3021 /* Reset the rest of the unwind info. */
3022 unwind.opcode_count = 0;
3023 unwind.table_entry = NULL;
3024 unwind.personality_routine = NULL;
3025 unwind.personality_index = -1;
3026 unwind.frame_size = 0;
3027 unwind.fp_offset = 0;
3028 unwind.fp_reg = 13;
3029 unwind.fp_used = 0;
3030 unwind.sp_restored = 0;
3031 }
3032
3033
3034 /* Parse a handlerdata directive. Creates the exception handling table entry
3035 for the function. */
3036
3037 static void
3038 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3039 {
3040 demand_empty_rest_of_line ();
3041 if (unwind.table_entry)
3042 as_bad (_("dupicate .handlerdata directive"));
3043
3044 create_unwind_entry (1);
3045 }
3046
3047 /* Parse an unwind_fnend directive. Generates the index table entry. */
3048
3049 static void
3050 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3051 {
3052 long where;
3053 char *ptr;
3054 valueT val;
3055
3056 demand_empty_rest_of_line ();
3057
3058 /* Add eh table entry. */
3059 if (unwind.table_entry == NULL)
3060 val = create_unwind_entry (0);
3061 else
3062 val = 0;
3063
3064 /* Add index table entry. This is two words. */
3065 start_unwind_section (unwind.saved_seg, 1);
3066 frag_align (2, 0, 0);
3067 record_alignment (now_seg, 2);
3068
3069 ptr = frag_more (8);
3070 where = frag_now_fix () - 8;
3071
3072 /* Self relative offset of the function start. */
3073 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3074 BFD_RELOC_ARM_PREL31);
3075
3076 /* Indicate dependency on EHABI-defined personality routines to the
3077 linker, if it hasn't been done already. */
3078 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3079 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3080 {
3081 static const char *const name[] = {
3082 "__aeabi_unwind_cpp_pr0",
3083 "__aeabi_unwind_cpp_pr1",
3084 "__aeabi_unwind_cpp_pr2"
3085 };
3086 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3087 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3088 marked_pr_dependency |= 1 << unwind.personality_index;
3089 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3090 = marked_pr_dependency;
3091 }
3092
3093 if (val)
3094 /* Inline exception table entry. */
3095 md_number_to_chars (ptr + 4, val, 4);
3096 else
3097 /* Self relative offset of the table entry. */
3098 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3099 BFD_RELOC_ARM_PREL31);
3100
3101 /* Restore the original section. */
3102 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3103 }
3104
3105
3106 /* Parse an unwind_cantunwind directive. */
3107
3108 static void
3109 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3110 {
3111 demand_empty_rest_of_line ();
3112 if (unwind.personality_routine || unwind.personality_index != -1)
3113 as_bad (_("personality routine specified for cantunwind frame"));
3114
3115 unwind.personality_index = -2;
3116 }
3117
3118
3119 /* Parse a personalityindex directive. */
3120
3121 static void
3122 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3123 {
3124 expressionS exp;
3125
3126 if (unwind.personality_routine || unwind.personality_index != -1)
3127 as_bad (_("duplicate .personalityindex directive"));
3128
3129 expression (&exp);
3130
3131 if (exp.X_op != O_constant
3132 || exp.X_add_number < 0 || exp.X_add_number > 15)
3133 {
3134 as_bad (_("bad personality routine number"));
3135 ignore_rest_of_line ();
3136 return;
3137 }
3138
3139 unwind.personality_index = exp.X_add_number;
3140
3141 demand_empty_rest_of_line ();
3142 }
3143
3144
3145 /* Parse a personality directive. */
3146
3147 static void
3148 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3149 {
3150 char *name, *p, c;
3151
3152 if (unwind.personality_routine || unwind.personality_index != -1)
3153 as_bad (_("duplicate .personality directive"));
3154
3155 name = input_line_pointer;
3156 c = get_symbol_end ();
3157 p = input_line_pointer;
3158 unwind.personality_routine = symbol_find_or_make (name);
3159 *p = c;
3160 demand_empty_rest_of_line ();
3161 }
3162
3163
3164 /* Parse a directive saving core registers. */
3165
3166 static void
3167 s_arm_unwind_save_core (void)
3168 {
3169 valueT op;
3170 long range;
3171 int n;
3172
3173 range = parse_reg_list (&input_line_pointer);
3174 if (range == FAIL)
3175 {
3176 as_bad (_("expected register list"));
3177 ignore_rest_of_line ();
3178 return;
3179 }
3180
3181 demand_empty_rest_of_line ();
3182
3183 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3184 into .unwind_save {..., sp...}. We aren't bothered about the value of
3185 ip because it is clobbered by calls. */
3186 if (unwind.sp_restored && unwind.fp_reg == 12
3187 && (range & 0x3000) == 0x1000)
3188 {
3189 unwind.opcode_count--;
3190 unwind.sp_restored = 0;
3191 range = (range | 0x2000) & ~0x1000;
3192 unwind.pending_offset = 0;
3193 }
3194
3195 /* Pop r4-r15. */
3196 if (range & 0xfff0)
3197 {
3198 /* See if we can use the short opcodes. These pop a block of up to 8
3199 registers starting with r4, plus maybe r14. */
3200 for (n = 0; n < 8; n++)
3201 {
3202 /* Break at the first non-saved register. */
3203 if ((range & (1 << (n + 4))) == 0)
3204 break;
3205 }
3206 /* See if there are any other bits set. */
3207 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3208 {
3209 /* Use the long form. */
3210 op = 0x8000 | ((range >> 4) & 0xfff);
3211 add_unwind_opcode (op, 2);
3212 }
3213 else
3214 {
3215 /* Use the short form. */
3216 if (range & 0x4000)
3217 op = 0xa8; /* Pop r14. */
3218 else
3219 op = 0xa0; /* Do not pop r14. */
3220 op |= (n - 1);
3221 add_unwind_opcode (op, 1);
3222 }
3223 }
3224
3225 /* Pop r0-r3. */
3226 if (range & 0xf)
3227 {
3228 op = 0xb100 | (range & 0xf);
3229 add_unwind_opcode (op, 2);
3230 }
3231
3232 /* Record the number of bytes pushed. */
3233 for (n = 0; n < 16; n++)
3234 {
3235 if (range & (1 << n))
3236 unwind.frame_size += 4;
3237 }
3238 }
3239
3240
3241 /* Parse a directive saving FPA registers. */
3242
3243 static void
3244 s_arm_unwind_save_fpa (int reg)
3245 {
3246 expressionS exp;
3247 int num_regs;
3248 valueT op;
3249
3250 /* Get Number of registers to transfer. */
3251 if (skip_past_comma (&input_line_pointer) != FAIL)
3252 expression (&exp);
3253 else
3254 exp.X_op = O_illegal;
3255
3256 if (exp.X_op != O_constant)
3257 {
3258 as_bad (_("expected , <constant>"));
3259 ignore_rest_of_line ();
3260 return;
3261 }
3262
3263 num_regs = exp.X_add_number;
3264
3265 if (num_regs < 1 || num_regs > 4)
3266 {
3267 as_bad (_("number of registers must be in the range [1:4]"));
3268 ignore_rest_of_line ();
3269 return;
3270 }
3271
3272 demand_empty_rest_of_line ();
3273
3274 if (reg == 4)
3275 {
3276 /* Short form. */
3277 op = 0xb4 | (num_regs - 1);
3278 add_unwind_opcode (op, 1);
3279 }
3280 else
3281 {
3282 /* Long form. */
3283 op = 0xc800 | (reg << 4) | (num_regs - 1);
3284 add_unwind_opcode (op, 2);
3285 }
3286 unwind.frame_size += num_regs * 12;
3287 }
3288
3289
3290 /* Parse a directive saving VFP registers for ARMv6 and above. */
3291
3292 static void
3293 s_arm_unwind_save_vfp_armv6 (void)
3294 {
3295 int count;
3296 unsigned int start;
3297 valueT op;
3298 int num_vfpv3_regs = 0;
3299 int num_regs_below_16;
3300
3301 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3302 if (count == FAIL)
3303 {
3304 as_bad (_("expected register list"));
3305 ignore_rest_of_line ();
3306 return;
3307 }
3308
3309 demand_empty_rest_of_line ();
3310
3311 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3312 than FSTMX/FLDMX-style ones). */
3313
3314 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3315 if (start >= 16)
3316 num_vfpv3_regs = count;
3317 else if (start + count > 16)
3318 num_vfpv3_regs = start + count - 16;
3319
3320 if (num_vfpv3_regs > 0)
3321 {
3322 int start_offset = start > 16 ? start - 16 : 0;
3323 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3324 add_unwind_opcode (op, 2);
3325 }
3326
3327 /* Generate opcode for registers numbered in the range 0 .. 15. */
3328 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3329 assert (num_regs_below_16 + num_vfpv3_regs == count);
3330 if (num_regs_below_16 > 0)
3331 {
3332 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3333 add_unwind_opcode (op, 2);
3334 }
3335
3336 unwind.frame_size += count * 8;
3337 }
3338
3339
3340 /* Parse a directive saving VFP registers for pre-ARMv6. */
3341
3342 static void
3343 s_arm_unwind_save_vfp (void)
3344 {
3345 int count;
3346 unsigned int reg;
3347 valueT op;
3348
3349 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3350 if (count == FAIL)
3351 {
3352 as_bad (_("expected register list"));
3353 ignore_rest_of_line ();
3354 return;
3355 }
3356
3357 demand_empty_rest_of_line ();
3358
3359 if (reg == 8)
3360 {
3361 /* Short form. */
3362 op = 0xb8 | (count - 1);
3363 add_unwind_opcode (op, 1);
3364 }
3365 else
3366 {
3367 /* Long form. */
3368 op = 0xb300 | (reg << 4) | (count - 1);
3369 add_unwind_opcode (op, 2);
3370 }
3371 unwind.frame_size += count * 8 + 4;
3372 }
3373
3374
3375 /* Parse a directive saving iWMMXt data registers. */
3376
3377 static void
3378 s_arm_unwind_save_mmxwr (void)
3379 {
3380 int reg;
3381 int hi_reg;
3382 int i;
3383 unsigned mask = 0;
3384 valueT op;
3385
3386 if (*input_line_pointer == '{')
3387 input_line_pointer++;
3388
3389 do
3390 {
3391 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3392
3393 if (reg == FAIL)
3394 {
3395 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3396 goto error;
3397 }
3398
3399 if (mask >> reg)
3400 as_tsktsk (_("register list not in ascending order"));
3401 mask |= 1 << reg;
3402
3403 if (*input_line_pointer == '-')
3404 {
3405 input_line_pointer++;
3406 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3407 if (hi_reg == FAIL)
3408 {
3409 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3410 goto error;
3411 }
3412 else if (reg >= hi_reg)
3413 {
3414 as_bad (_("bad register range"));
3415 goto error;
3416 }
3417 for (; reg < hi_reg; reg++)
3418 mask |= 1 << reg;
3419 }
3420 }
3421 while (skip_past_comma (&input_line_pointer) != FAIL);
3422
3423 if (*input_line_pointer == '}')
3424 input_line_pointer++;
3425
3426 demand_empty_rest_of_line ();
3427
3428 /* Generate any deferred opcodes because we're going to be looking at
3429 the list. */
3430 flush_pending_unwind ();
3431
3432 for (i = 0; i < 16; i++)
3433 {
3434 if (mask & (1 << i))
3435 unwind.frame_size += 8;
3436 }
3437
3438 /* Attempt to combine with a previous opcode. We do this because gcc
3439 likes to output separate unwind directives for a single block of
3440 registers. */
3441 if (unwind.opcode_count > 0)
3442 {
3443 i = unwind.opcodes[unwind.opcode_count - 1];
3444 if ((i & 0xf8) == 0xc0)
3445 {
3446 i &= 7;
3447 /* Only merge if the blocks are contiguous. */
3448 if (i < 6)
3449 {
3450 if ((mask & 0xfe00) == (1 << 9))
3451 {
3452 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3453 unwind.opcode_count--;
3454 }
3455 }
3456 else if (i == 6 && unwind.opcode_count >= 2)
3457 {
3458 i = unwind.opcodes[unwind.opcode_count - 2];
3459 reg = i >> 4;
3460 i &= 0xf;
3461
3462 op = 0xffff << (reg - 1);
3463 if (reg > 0
3464 && ((mask & op) == (1u << (reg - 1))))
3465 {
3466 op = (1 << (reg + i + 1)) - 1;
3467 op &= ~((1 << reg) - 1);
3468 mask |= op;
3469 unwind.opcode_count -= 2;
3470 }
3471 }
3472 }
3473 }
3474
3475 hi_reg = 15;
3476 /* We want to generate opcodes in the order the registers have been
3477 saved, ie. descending order. */
3478 for (reg = 15; reg >= -1; reg--)
3479 {
3480 /* Save registers in blocks. */
3481 if (reg < 0
3482 || !(mask & (1 << reg)))
3483 {
3484 /* We found an unsaved reg. Generate opcodes to save the
3485 preceeding block. */
3486 if (reg != hi_reg)
3487 {
3488 if (reg == 9)
3489 {
3490 /* Short form. */
3491 op = 0xc0 | (hi_reg - 10);
3492 add_unwind_opcode (op, 1);
3493 }
3494 else
3495 {
3496 /* Long form. */
3497 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3498 add_unwind_opcode (op, 2);
3499 }
3500 }
3501 hi_reg = reg - 1;
3502 }
3503 }
3504
3505 return;
3506 error:
3507 ignore_rest_of_line ();
3508 }
3509
3510 static void
3511 s_arm_unwind_save_mmxwcg (void)
3512 {
3513 int reg;
3514 int hi_reg;
3515 unsigned mask = 0;
3516 valueT op;
3517
3518 if (*input_line_pointer == '{')
3519 input_line_pointer++;
3520
3521 do
3522 {
3523 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3524
3525 if (reg == FAIL)
3526 {
3527 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3528 goto error;
3529 }
3530
3531 reg -= 8;
3532 if (mask >> reg)
3533 as_tsktsk (_("register list not in ascending order"));
3534 mask |= 1 << reg;
3535
3536 if (*input_line_pointer == '-')
3537 {
3538 input_line_pointer++;
3539 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3540 if (hi_reg == FAIL)
3541 {
3542 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3543 goto error;
3544 }
3545 else if (reg >= hi_reg)
3546 {
3547 as_bad (_("bad register range"));
3548 goto error;
3549 }
3550 for (; reg < hi_reg; reg++)
3551 mask |= 1 << reg;
3552 }
3553 }
3554 while (skip_past_comma (&input_line_pointer) != FAIL);
3555
3556 if (*input_line_pointer == '}')
3557 input_line_pointer++;
3558
3559 demand_empty_rest_of_line ();
3560
3561 /* Generate any deferred opcodes because we're going to be looking at
3562 the list. */
3563 flush_pending_unwind ();
3564
3565 for (reg = 0; reg < 16; reg++)
3566 {
3567 if (mask & (1 << reg))
3568 unwind.frame_size += 4;
3569 }
3570 op = 0xc700 | mask;
3571 add_unwind_opcode (op, 2);
3572 return;
3573 error:
3574 ignore_rest_of_line ();
3575 }
3576
3577
3578 /* Parse an unwind_save directive.
3579 If the argument is non-zero, this is a .vsave directive. */
3580
3581 static void
3582 s_arm_unwind_save (int arch_v6)
3583 {
3584 char *peek;
3585 struct reg_entry *reg;
3586 bfd_boolean had_brace = FALSE;
3587
3588 /* Figure out what sort of save we have. */
3589 peek = input_line_pointer;
3590
3591 if (*peek == '{')
3592 {
3593 had_brace = TRUE;
3594 peek++;
3595 }
3596
3597 reg = arm_reg_parse_multi (&peek);
3598
3599 if (!reg)
3600 {
3601 as_bad (_("register expected"));
3602 ignore_rest_of_line ();
3603 return;
3604 }
3605
3606 switch (reg->type)
3607 {
3608 case REG_TYPE_FN:
3609 if (had_brace)
3610 {
3611 as_bad (_("FPA .unwind_save does not take a register list"));
3612 ignore_rest_of_line ();
3613 return;
3614 }
3615 s_arm_unwind_save_fpa (reg->number);
3616 return;
3617
3618 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3619 case REG_TYPE_VFD:
3620 if (arch_v6)
3621 s_arm_unwind_save_vfp_armv6 ();
3622 else
3623 s_arm_unwind_save_vfp ();
3624 return;
3625 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3626 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3627
3628 default:
3629 as_bad (_(".unwind_save does not support this kind of register"));
3630 ignore_rest_of_line ();
3631 }
3632 }
3633
3634
3635 /* Parse an unwind_movsp directive. */
3636
3637 static void
3638 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3639 {
3640 int reg;
3641 valueT op;
3642 int offset;
3643
3644 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3645 if (reg == FAIL)
3646 {
3647 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3648 ignore_rest_of_line ();
3649 return;
3650 }
3651
3652 /* Optional constant. */
3653 if (skip_past_comma (&input_line_pointer) != FAIL)
3654 {
3655 if (immediate_for_directive (&offset) == FAIL)
3656 return;
3657 }
3658 else
3659 offset = 0;
3660
3661 demand_empty_rest_of_line ();
3662
3663 if (reg == REG_SP || reg == REG_PC)
3664 {
3665 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3666 return;
3667 }
3668
3669 if (unwind.fp_reg != REG_SP)
3670 as_bad (_("unexpected .unwind_movsp directive"));
3671
3672 /* Generate opcode to restore the value. */
3673 op = 0x90 | reg;
3674 add_unwind_opcode (op, 1);
3675
3676 /* Record the information for later. */
3677 unwind.fp_reg = reg;
3678 unwind.fp_offset = unwind.frame_size - offset;
3679 unwind.sp_restored = 1;
3680 }
3681
3682 /* Parse an unwind_pad directive. */
3683
3684 static void
3685 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3686 {
3687 int offset;
3688
3689 if (immediate_for_directive (&offset) == FAIL)
3690 return;
3691
3692 if (offset & 3)
3693 {
3694 as_bad (_("stack increment must be multiple of 4"));
3695 ignore_rest_of_line ();
3696 return;
3697 }
3698
3699 /* Don't generate any opcodes, just record the details for later. */
3700 unwind.frame_size += offset;
3701 unwind.pending_offset += offset;
3702
3703 demand_empty_rest_of_line ();
3704 }
3705
3706 /* Parse an unwind_setfp directive. */
3707
3708 static void
3709 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3710 {
3711 int sp_reg;
3712 int fp_reg;
3713 int offset;
3714
3715 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3716 if (skip_past_comma (&input_line_pointer) == FAIL)
3717 sp_reg = FAIL;
3718 else
3719 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3720
3721 if (fp_reg == FAIL || sp_reg == FAIL)
3722 {
3723 as_bad (_("expected <reg>, <reg>"));
3724 ignore_rest_of_line ();
3725 return;
3726 }
3727
3728 /* Optional constant. */
3729 if (skip_past_comma (&input_line_pointer) != FAIL)
3730 {
3731 if (immediate_for_directive (&offset) == FAIL)
3732 return;
3733 }
3734 else
3735 offset = 0;
3736
3737 demand_empty_rest_of_line ();
3738
3739 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3740 {
3741 as_bad (_("register must be either sp or set by a previous"
3742 "unwind_movsp directive"));
3743 return;
3744 }
3745
3746 /* Don't generate any opcodes, just record the information for later. */
3747 unwind.fp_reg = fp_reg;
3748 unwind.fp_used = 1;
3749 if (sp_reg == 13)
3750 unwind.fp_offset = unwind.frame_size - offset;
3751 else
3752 unwind.fp_offset -= offset;
3753 }
3754
3755 /* Parse an unwind_raw directive. */
3756
3757 static void
3758 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3759 {
3760 expressionS exp;
3761 /* This is an arbitrary limit. */
3762 unsigned char op[16];
3763 int count;
3764
3765 expression (&exp);
3766 if (exp.X_op == O_constant
3767 && skip_past_comma (&input_line_pointer) != FAIL)
3768 {
3769 unwind.frame_size += exp.X_add_number;
3770 expression (&exp);
3771 }
3772 else
3773 exp.X_op = O_illegal;
3774
3775 if (exp.X_op != O_constant)
3776 {
3777 as_bad (_("expected <offset>, <opcode>"));
3778 ignore_rest_of_line ();
3779 return;
3780 }
3781
3782 count = 0;
3783
3784 /* Parse the opcode. */
3785 for (;;)
3786 {
3787 if (count >= 16)
3788 {
3789 as_bad (_("unwind opcode too long"));
3790 ignore_rest_of_line ();
3791 }
3792 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3793 {
3794 as_bad (_("invalid unwind opcode"));
3795 ignore_rest_of_line ();
3796 return;
3797 }
3798 op[count++] = exp.X_add_number;
3799
3800 /* Parse the next byte. */
3801 if (skip_past_comma (&input_line_pointer) == FAIL)
3802 break;
3803
3804 expression (&exp);
3805 }
3806
3807 /* Add the opcode bytes in reverse order. */
3808 while (count--)
3809 add_unwind_opcode (op[count], 1);
3810
3811 demand_empty_rest_of_line ();
3812 }
3813
3814
3815 /* Parse a .eabi_attribute directive. */
3816
3817 static void
3818 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3819 {
3820 expressionS exp;
3821 bfd_boolean is_string;
3822 int tag;
3823 unsigned int i = 0;
3824 char *s = NULL;
3825 char saved_char;
3826
3827 expression (& exp);
3828 if (exp.X_op != O_constant)
3829 goto bad;
3830
3831 tag = exp.X_add_number;
3832 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3833 is_string = 1;
3834 else
3835 is_string = 0;
3836
3837 if (skip_past_comma (&input_line_pointer) == FAIL)
3838 goto bad;
3839 if (tag == 32 || !is_string)
3840 {
3841 expression (& exp);
3842 if (exp.X_op != O_constant)
3843 {
3844 as_bad (_("expected numeric constant"));
3845 ignore_rest_of_line ();
3846 return;
3847 }
3848 i = exp.X_add_number;
3849 }
3850 if (tag == Tag_compatibility
3851 && skip_past_comma (&input_line_pointer) == FAIL)
3852 {
3853 as_bad (_("expected comma"));
3854 ignore_rest_of_line ();
3855 return;
3856 }
3857 if (is_string)
3858 {
3859 skip_whitespace(input_line_pointer);
3860 if (*input_line_pointer != '"')
3861 goto bad_string;
3862 input_line_pointer++;
3863 s = input_line_pointer;
3864 while (*input_line_pointer && *input_line_pointer != '"')
3865 input_line_pointer++;
3866 if (*input_line_pointer != '"')
3867 goto bad_string;
3868 saved_char = *input_line_pointer;
3869 *input_line_pointer = 0;
3870 }
3871 else
3872 {
3873 s = NULL;
3874 saved_char = 0;
3875 }
3876
3877 if (tag == Tag_compatibility)
3878 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3879 else if (is_string)
3880 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3881 else
3882 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3883
3884 if (s)
3885 {
3886 *input_line_pointer = saved_char;
3887 input_line_pointer++;
3888 }
3889 demand_empty_rest_of_line ();
3890 return;
3891 bad_string:
3892 as_bad (_("bad string constant"));
3893 ignore_rest_of_line ();
3894 return;
3895 bad:
3896 as_bad (_("expected <tag> , <value>"));
3897 ignore_rest_of_line ();
3898 }
3899 #endif /* OBJ_ELF */
3900
3901 static void s_arm_arch (int);
3902 static void s_arm_object_arch (int);
3903 static void s_arm_cpu (int);
3904 static void s_arm_fpu (int);
3905
3906 #ifdef TE_PE
3907
3908 static void
3909 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
3910 {
3911 expressionS exp;
3912
3913 do
3914 {
3915 expression (&exp);
3916 if (exp.X_op == O_symbol)
3917 exp.X_op = O_secrel;
3918
3919 emit_expr (&exp, 4);
3920 }
3921 while (*input_line_pointer++ == ',');
3922
3923 input_line_pointer--;
3924 demand_empty_rest_of_line ();
3925 }
3926 #endif /* TE_PE */
3927
3928 /* This table describes all the machine specific pseudo-ops the assembler
3929 has to support. The fields are:
3930 pseudo-op name without dot
3931 function to call to execute this pseudo-op
3932 Integer arg to pass to the function. */
3933
3934 const pseudo_typeS md_pseudo_table[] =
3935 {
3936 /* Never called because '.req' does not start a line. */
3937 { "req", s_req, 0 },
3938 /* Following two are likewise never called. */
3939 { "dn", s_dn, 0 },
3940 { "qn", s_qn, 0 },
3941 { "unreq", s_unreq, 0 },
3942 { "bss", s_bss, 0 },
3943 { "align", s_align, 0 },
3944 { "arm", s_arm, 0 },
3945 { "thumb", s_thumb, 0 },
3946 { "code", s_code, 0 },
3947 { "force_thumb", s_force_thumb, 0 },
3948 { "thumb_func", s_thumb_func, 0 },
3949 { "thumb_set", s_thumb_set, 0 },
3950 { "even", s_even, 0 },
3951 { "ltorg", s_ltorg, 0 },
3952 { "pool", s_ltorg, 0 },
3953 { "syntax", s_syntax, 0 },
3954 { "cpu", s_arm_cpu, 0 },
3955 { "arch", s_arm_arch, 0 },
3956 { "object_arch", s_arm_object_arch, 0 },
3957 { "fpu", s_arm_fpu, 0 },
3958 #ifdef OBJ_ELF
3959 { "word", s_arm_elf_cons, 4 },
3960 { "long", s_arm_elf_cons, 4 },
3961 { "rel31", s_arm_rel31, 0 },
3962 { "fnstart", s_arm_unwind_fnstart, 0 },
3963 { "fnend", s_arm_unwind_fnend, 0 },
3964 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3965 { "personality", s_arm_unwind_personality, 0 },
3966 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3967 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3968 { "save", s_arm_unwind_save, 0 },
3969 { "vsave", s_arm_unwind_save, 1 },
3970 { "movsp", s_arm_unwind_movsp, 0 },
3971 { "pad", s_arm_unwind_pad, 0 },
3972 { "setfp", s_arm_unwind_setfp, 0 },
3973 { "unwind_raw", s_arm_unwind_raw, 0 },
3974 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3975 #else
3976 { "word", cons, 4},
3977
3978 /* These are used for dwarf. */
3979 {"2byte", cons, 2},
3980 {"4byte", cons, 4},
3981 {"8byte", cons, 8},
3982 /* These are used for dwarf2. */
3983 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
3984 { "loc", dwarf2_directive_loc, 0 },
3985 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
3986 #endif
3987 { "extend", float_cons, 'x' },
3988 { "ldouble", float_cons, 'x' },
3989 { "packed", float_cons, 'p' },
3990 #ifdef TE_PE
3991 {"secrel32", pe_directive_secrel, 0},
3992 #endif
3993 { 0, 0, 0 }
3994 };
3995 \f
3996 /* Parser functions used exclusively in instruction operands. */
3997
3998 /* Generic immediate-value read function for use in insn parsing.
3999 STR points to the beginning of the immediate (the leading #);
4000 VAL receives the value; if the value is outside [MIN, MAX]
4001 issue an error. PREFIX_OPT is true if the immediate prefix is
4002 optional. */
4003
4004 static int
4005 parse_immediate (char **str, int *val, int min, int max,
4006 bfd_boolean prefix_opt)
4007 {
4008 expressionS exp;
4009 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4010 if (exp.X_op != O_constant)
4011 {
4012 inst.error = _("constant expression required");
4013 return FAIL;
4014 }
4015
4016 if (exp.X_add_number < min || exp.X_add_number > max)
4017 {
4018 inst.error = _("immediate value out of range");
4019 return FAIL;
4020 }
4021
4022 *val = exp.X_add_number;
4023 return SUCCESS;
4024 }
4025
4026 /* Less-generic immediate-value read function with the possibility of loading a
4027 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4028 instructions. Puts the result directly in inst.operands[i]. */
4029
4030 static int
4031 parse_big_immediate (char **str, int i)
4032 {
4033 expressionS exp;
4034 char *ptr = *str;
4035
4036 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4037
4038 if (exp.X_op == O_constant)
4039 {
4040 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4041 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4042 O_constant. We have to be careful not to break compilation for
4043 32-bit X_add_number, though. */
4044 if ((exp.X_add_number & ~0xffffffffl) != 0)
4045 {
4046 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4047 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4048 inst.operands[i].regisimm = 1;
4049 }
4050 }
4051 else if (exp.X_op == O_big
4052 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4053 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4054 {
4055 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4056 /* Bignums have their least significant bits in
4057 generic_bignum[0]. Make sure we put 32 bits in imm and
4058 32 bits in reg, in a (hopefully) portable way. */
4059 assert (parts != 0);
4060 inst.operands[i].imm = 0;
4061 for (j = 0; j < parts; j++, idx++)
4062 inst.operands[i].imm |= generic_bignum[idx]
4063 << (LITTLENUM_NUMBER_OF_BITS * j);
4064 inst.operands[i].reg = 0;
4065 for (j = 0; j < parts; j++, idx++)
4066 inst.operands[i].reg |= generic_bignum[idx]
4067 << (LITTLENUM_NUMBER_OF_BITS * j);
4068 inst.operands[i].regisimm = 1;
4069 }
4070 else
4071 return FAIL;
4072
4073 *str = ptr;
4074
4075 return SUCCESS;
4076 }
4077
4078 /* Returns the pseudo-register number of an FPA immediate constant,
4079 or FAIL if there isn't a valid constant here. */
4080
4081 static int
4082 parse_fpa_immediate (char ** str)
4083 {
4084 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4085 char * save_in;
4086 expressionS exp;
4087 int i;
4088 int j;
4089
4090 /* First try and match exact strings, this is to guarantee
4091 that some formats will work even for cross assembly. */
4092
4093 for (i = 0; fp_const[i]; i++)
4094 {
4095 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4096 {
4097 char *start = *str;
4098
4099 *str += strlen (fp_const[i]);
4100 if (is_end_of_line[(unsigned char) **str])
4101 return i + 8;
4102 *str = start;
4103 }
4104 }
4105
4106 /* Just because we didn't get a match doesn't mean that the constant
4107 isn't valid, just that it is in a format that we don't
4108 automatically recognize. Try parsing it with the standard
4109 expression routines. */
4110
4111 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4112
4113 /* Look for a raw floating point number. */
4114 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4115 && is_end_of_line[(unsigned char) *save_in])
4116 {
4117 for (i = 0; i < NUM_FLOAT_VALS; i++)
4118 {
4119 for (j = 0; j < MAX_LITTLENUMS; j++)
4120 {
4121 if (words[j] != fp_values[i][j])
4122 break;
4123 }
4124
4125 if (j == MAX_LITTLENUMS)
4126 {
4127 *str = save_in;
4128 return i + 8;
4129 }
4130 }
4131 }
4132
4133 /* Try and parse a more complex expression, this will probably fail
4134 unless the code uses a floating point prefix (eg "0f"). */
4135 save_in = input_line_pointer;
4136 input_line_pointer = *str;
4137 if (expression (&exp) == absolute_section
4138 && exp.X_op == O_big
4139 && exp.X_add_number < 0)
4140 {
4141 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4142 Ditto for 15. */
4143 if (gen_to_words (words, 5, (long) 15) == 0)
4144 {
4145 for (i = 0; i < NUM_FLOAT_VALS; i++)
4146 {
4147 for (j = 0; j < MAX_LITTLENUMS; j++)
4148 {
4149 if (words[j] != fp_values[i][j])
4150 break;
4151 }
4152
4153 if (j == MAX_LITTLENUMS)
4154 {
4155 *str = input_line_pointer;
4156 input_line_pointer = save_in;
4157 return i + 8;
4158 }
4159 }
4160 }
4161 }
4162
4163 *str = input_line_pointer;
4164 input_line_pointer = save_in;
4165 inst.error = _("invalid FPA immediate expression");
4166 return FAIL;
4167 }
4168
4169 /* Returns 1 if a number has "quarter-precision" float format
4170 0baBbbbbbc defgh000 00000000 00000000. */
4171
4172 static int
4173 is_quarter_float (unsigned imm)
4174 {
4175 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4176 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4177 }
4178
4179 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4180 0baBbbbbbc defgh000 00000000 00000000.
4181 The zero and minus-zero cases need special handling, since they can't be
4182 encoded in the "quarter-precision" float format, but can nonetheless be
4183 loaded as integer constants. */
4184
4185 static unsigned
4186 parse_qfloat_immediate (char **ccp, int *immed)
4187 {
4188 char *str = *ccp;
4189 char *fpnum;
4190 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4191 int found_fpchar = 0;
4192
4193 skip_past_char (&str, '#');
4194
4195 /* We must not accidentally parse an integer as a floating-point number. Make
4196 sure that the value we parse is not an integer by checking for special
4197 characters '.' or 'e'.
4198 FIXME: This is a horrible hack, but doing better is tricky because type
4199 information isn't in a very usable state at parse time. */
4200 fpnum = str;
4201 skip_whitespace (fpnum);
4202
4203 if (strncmp (fpnum, "0x", 2) == 0)
4204 return FAIL;
4205 else
4206 {
4207 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4208 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4209 {
4210 found_fpchar = 1;
4211 break;
4212 }
4213
4214 if (!found_fpchar)
4215 return FAIL;
4216 }
4217
4218 if ((str = atof_ieee (str, 's', words)) != NULL)
4219 {
4220 unsigned fpword = 0;
4221 int i;
4222
4223 /* Our FP word must be 32 bits (single-precision FP). */
4224 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4225 {
4226 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4227 fpword |= words[i];
4228 }
4229
4230 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4231 *immed = fpword;
4232 else
4233 return FAIL;
4234
4235 *ccp = str;
4236
4237 return SUCCESS;
4238 }
4239
4240 return FAIL;
4241 }
4242
4243 /* Shift operands. */
4244 enum shift_kind
4245 {
4246 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4247 };
4248
4249 struct asm_shift_name
4250 {
4251 const char *name;
4252 enum shift_kind kind;
4253 };
4254
4255 /* Third argument to parse_shift. */
4256 enum parse_shift_mode
4257 {
4258 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4259 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4260 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4261 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4262 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4263 };
4264
4265 /* Parse a <shift> specifier on an ARM data processing instruction.
4266 This has three forms:
4267
4268 (LSL|LSR|ASL|ASR|ROR) Rs
4269 (LSL|LSR|ASL|ASR|ROR) #imm
4270 RRX
4271
4272 Note that ASL is assimilated to LSL in the instruction encoding, and
4273 RRX to ROR #0 (which cannot be written as such). */
4274
4275 static int
4276 parse_shift (char **str, int i, enum parse_shift_mode mode)
4277 {
4278 const struct asm_shift_name *shift_name;
4279 enum shift_kind shift;
4280 char *s = *str;
4281 char *p = s;
4282 int reg;
4283
4284 for (p = *str; ISALPHA (*p); p++)
4285 ;
4286
4287 if (p == *str)
4288 {
4289 inst.error = _("shift expression expected");
4290 return FAIL;
4291 }
4292
4293 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4294
4295 if (shift_name == NULL)
4296 {
4297 inst.error = _("shift expression expected");
4298 return FAIL;
4299 }
4300
4301 shift = shift_name->kind;
4302
4303 switch (mode)
4304 {
4305 case NO_SHIFT_RESTRICT:
4306 case SHIFT_IMMEDIATE: break;
4307
4308 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4309 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4310 {
4311 inst.error = _("'LSL' or 'ASR' required");
4312 return FAIL;
4313 }
4314 break;
4315
4316 case SHIFT_LSL_IMMEDIATE:
4317 if (shift != SHIFT_LSL)
4318 {
4319 inst.error = _("'LSL' required");
4320 return FAIL;
4321 }
4322 break;
4323
4324 case SHIFT_ASR_IMMEDIATE:
4325 if (shift != SHIFT_ASR)
4326 {
4327 inst.error = _("'ASR' required");
4328 return FAIL;
4329 }
4330 break;
4331
4332 default: abort ();
4333 }
4334
4335 if (shift != SHIFT_RRX)
4336 {
4337 /* Whitespace can appear here if the next thing is a bare digit. */
4338 skip_whitespace (p);
4339
4340 if (mode == NO_SHIFT_RESTRICT
4341 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4342 {
4343 inst.operands[i].imm = reg;
4344 inst.operands[i].immisreg = 1;
4345 }
4346 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4347 return FAIL;
4348 }
4349 inst.operands[i].shift_kind = shift;
4350 inst.operands[i].shifted = 1;
4351 *str = p;
4352 return SUCCESS;
4353 }
4354
4355 /* Parse a <shifter_operand> for an ARM data processing instruction:
4356
4357 #<immediate>
4358 #<immediate>, <rotate>
4359 <Rm>
4360 <Rm>, <shift>
4361
4362 where <shift> is defined by parse_shift above, and <rotate> is a
4363 multiple of 2 between 0 and 30. Validation of immediate operands
4364 is deferred to md_apply_fix. */
4365
4366 static int
4367 parse_shifter_operand (char **str, int i)
4368 {
4369 int value;
4370 expressionS expr;
4371
4372 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4373 {
4374 inst.operands[i].reg = value;
4375 inst.operands[i].isreg = 1;
4376
4377 /* parse_shift will override this if appropriate */
4378 inst.reloc.exp.X_op = O_constant;
4379 inst.reloc.exp.X_add_number = 0;
4380
4381 if (skip_past_comma (str) == FAIL)
4382 return SUCCESS;
4383
4384 /* Shift operation on register. */
4385 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4386 }
4387
4388 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4389 return FAIL;
4390
4391 if (skip_past_comma (str) == SUCCESS)
4392 {
4393 /* #x, y -- ie explicit rotation by Y. */
4394 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4395 return FAIL;
4396
4397 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4398 {
4399 inst.error = _("constant expression expected");
4400 return FAIL;
4401 }
4402
4403 value = expr.X_add_number;
4404 if (value < 0 || value > 30 || value % 2 != 0)
4405 {
4406 inst.error = _("invalid rotation");
4407 return FAIL;
4408 }
4409 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4410 {
4411 inst.error = _("invalid constant");
4412 return FAIL;
4413 }
4414
4415 /* Convert to decoded value. md_apply_fix will put it back. */
4416 inst.reloc.exp.X_add_number
4417 = (((inst.reloc.exp.X_add_number << (32 - value))
4418 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4419 }
4420
4421 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4422 inst.reloc.pc_rel = 0;
4423 return SUCCESS;
4424 }
4425
4426 /* Group relocation information. Each entry in the table contains the
4427 textual name of the relocation as may appear in assembler source
4428 and must end with a colon.
4429 Along with this textual name are the relocation codes to be used if
4430 the corresponding instruction is an ALU instruction (ADD or SUB only),
4431 an LDR, an LDRS, or an LDC. */
4432
4433 struct group_reloc_table_entry
4434 {
4435 const char *name;
4436 int alu_code;
4437 int ldr_code;
4438 int ldrs_code;
4439 int ldc_code;
4440 };
4441
4442 typedef enum
4443 {
4444 /* Varieties of non-ALU group relocation. */
4445
4446 GROUP_LDR,
4447 GROUP_LDRS,
4448 GROUP_LDC
4449 } group_reloc_type;
4450
4451 static struct group_reloc_table_entry group_reloc_table[] =
4452 { /* Program counter relative: */
4453 { "pc_g0_nc",
4454 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4455 0, /* LDR */
4456 0, /* LDRS */
4457 0 }, /* LDC */
4458 { "pc_g0",
4459 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4460 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4461 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4462 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4463 { "pc_g1_nc",
4464 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4465 0, /* LDR */
4466 0, /* LDRS */
4467 0 }, /* LDC */
4468 { "pc_g1",
4469 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4470 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4471 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4472 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4473 { "pc_g2",
4474 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4475 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4476 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4477 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4478 /* Section base relative */
4479 { "sb_g0_nc",
4480 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4481 0, /* LDR */
4482 0, /* LDRS */
4483 0 }, /* LDC */
4484 { "sb_g0",
4485 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4486 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4487 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4488 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4489 { "sb_g1_nc",
4490 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4491 0, /* LDR */
4492 0, /* LDRS */
4493 0 }, /* LDC */
4494 { "sb_g1",
4495 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4496 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4497 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4498 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4499 { "sb_g2",
4500 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4501 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4502 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4503 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4504
4505 /* Given the address of a pointer pointing to the textual name of a group
4506 relocation as may appear in assembler source, attempt to find its details
4507 in group_reloc_table. The pointer will be updated to the character after
4508 the trailing colon. On failure, FAIL will be returned; SUCCESS
4509 otherwise. On success, *entry will be updated to point at the relevant
4510 group_reloc_table entry. */
4511
4512 static int
4513 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4514 {
4515 unsigned int i;
4516 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4517 {
4518 int length = strlen (group_reloc_table[i].name);
4519
4520 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4521 (*str)[length] == ':')
4522 {
4523 *out = &group_reloc_table[i];
4524 *str += (length + 1);
4525 return SUCCESS;
4526 }
4527 }
4528
4529 return FAIL;
4530 }
4531
4532 /* Parse a <shifter_operand> for an ARM data processing instruction
4533 (as for parse_shifter_operand) where group relocations are allowed:
4534
4535 #<immediate>
4536 #<immediate>, <rotate>
4537 #:<group_reloc>:<expression>
4538 <Rm>
4539 <Rm>, <shift>
4540
4541 where <group_reloc> is one of the strings defined in group_reloc_table.
4542 The hashes are optional.
4543
4544 Everything else is as for parse_shifter_operand. */
4545
4546 static parse_operand_result
4547 parse_shifter_operand_group_reloc (char **str, int i)
4548 {
4549 /* Determine if we have the sequence of characters #: or just :
4550 coming next. If we do, then we check for a group relocation.
4551 If we don't, punt the whole lot to parse_shifter_operand. */
4552
4553 if (((*str)[0] == '#' && (*str)[1] == ':')
4554 || (*str)[0] == ':')
4555 {
4556 struct group_reloc_table_entry *entry;
4557
4558 if ((*str)[0] == '#')
4559 (*str) += 2;
4560 else
4561 (*str)++;
4562
4563 /* Try to parse a group relocation. Anything else is an error. */
4564 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4565 {
4566 inst.error = _("unknown group relocation");
4567 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4568 }
4569
4570 /* We now have the group relocation table entry corresponding to
4571 the name in the assembler source. Next, we parse the expression. */
4572 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4573 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4574
4575 /* Record the relocation type (always the ALU variant here). */
4576 inst.reloc.type = entry->alu_code;
4577 assert (inst.reloc.type != 0);
4578
4579 return PARSE_OPERAND_SUCCESS;
4580 }
4581 else
4582 return parse_shifter_operand (str, i) == SUCCESS
4583 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4584
4585 /* Never reached. */
4586 }
4587
4588 /* Parse all forms of an ARM address expression. Information is written
4589 to inst.operands[i] and/or inst.reloc.
4590
4591 Preindexed addressing (.preind=1):
4592
4593 [Rn, #offset] .reg=Rn .reloc.exp=offset
4594 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4595 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4596 .shift_kind=shift .reloc.exp=shift_imm
4597
4598 These three may have a trailing ! which causes .writeback to be set also.
4599
4600 Postindexed addressing (.postind=1, .writeback=1):
4601
4602 [Rn], #offset .reg=Rn .reloc.exp=offset
4603 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4604 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4605 .shift_kind=shift .reloc.exp=shift_imm
4606
4607 Unindexed addressing (.preind=0, .postind=0):
4608
4609 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4610
4611 Other:
4612
4613 [Rn]{!} shorthand for [Rn,#0]{!}
4614 =immediate .isreg=0 .reloc.exp=immediate
4615 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4616
4617 It is the caller's responsibility to check for addressing modes not
4618 supported by the instruction, and to set inst.reloc.type. */
4619
4620 static parse_operand_result
4621 parse_address_main (char **str, int i, int group_relocations,
4622 group_reloc_type group_type)
4623 {
4624 char *p = *str;
4625 int reg;
4626
4627 if (skip_past_char (&p, '[') == FAIL)
4628 {
4629 if (skip_past_char (&p, '=') == FAIL)
4630 {
4631 /* bare address - translate to PC-relative offset */
4632 inst.reloc.pc_rel = 1;
4633 inst.operands[i].reg = REG_PC;
4634 inst.operands[i].isreg = 1;
4635 inst.operands[i].preind = 1;
4636 }
4637 /* else a load-constant pseudo op, no special treatment needed here */
4638
4639 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4640 return PARSE_OPERAND_FAIL;
4641
4642 *str = p;
4643 return PARSE_OPERAND_SUCCESS;
4644 }
4645
4646 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4647 {
4648 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4649 return PARSE_OPERAND_FAIL;
4650 }
4651 inst.operands[i].reg = reg;
4652 inst.operands[i].isreg = 1;
4653
4654 if (skip_past_comma (&p) == SUCCESS)
4655 {
4656 inst.operands[i].preind = 1;
4657
4658 if (*p == '+') p++;
4659 else if (*p == '-') p++, inst.operands[i].negative = 1;
4660
4661 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4662 {
4663 inst.operands[i].imm = reg;
4664 inst.operands[i].immisreg = 1;
4665
4666 if (skip_past_comma (&p) == SUCCESS)
4667 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4668 return PARSE_OPERAND_FAIL;
4669 }
4670 else if (skip_past_char (&p, ':') == SUCCESS)
4671 {
4672 /* FIXME: '@' should be used here, but it's filtered out by generic
4673 code before we get to see it here. This may be subject to
4674 change. */
4675 expressionS exp;
4676 my_get_expression (&exp, &p, GE_NO_PREFIX);
4677 if (exp.X_op != O_constant)
4678 {
4679 inst.error = _("alignment must be constant");
4680 return PARSE_OPERAND_FAIL;
4681 }
4682 inst.operands[i].imm = exp.X_add_number << 8;
4683 inst.operands[i].immisalign = 1;
4684 /* Alignments are not pre-indexes. */
4685 inst.operands[i].preind = 0;
4686 }
4687 else
4688 {
4689 if (inst.operands[i].negative)
4690 {
4691 inst.operands[i].negative = 0;
4692 p--;
4693 }
4694
4695 if (group_relocations &&
4696 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4697
4698 {
4699 struct group_reloc_table_entry *entry;
4700
4701 /* Skip over the #: or : sequence. */
4702 if (*p == '#')
4703 p += 2;
4704 else
4705 p++;
4706
4707 /* Try to parse a group relocation. Anything else is an
4708 error. */
4709 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4710 {
4711 inst.error = _("unknown group relocation");
4712 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4713 }
4714
4715 /* We now have the group relocation table entry corresponding to
4716 the name in the assembler source. Next, we parse the
4717 expression. */
4718 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4719 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4720
4721 /* Record the relocation type. */
4722 switch (group_type)
4723 {
4724 case GROUP_LDR:
4725 inst.reloc.type = entry->ldr_code;
4726 break;
4727
4728 case GROUP_LDRS:
4729 inst.reloc.type = entry->ldrs_code;
4730 break;
4731
4732 case GROUP_LDC:
4733 inst.reloc.type = entry->ldc_code;
4734 break;
4735
4736 default:
4737 assert (0);
4738 }
4739
4740 if (inst.reloc.type == 0)
4741 {
4742 inst.error = _("this group relocation is not allowed on this instruction");
4743 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4744 }
4745 }
4746 else
4747 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4748 return PARSE_OPERAND_FAIL;
4749 }
4750 }
4751
4752 if (skip_past_char (&p, ']') == FAIL)
4753 {
4754 inst.error = _("']' expected");
4755 return PARSE_OPERAND_FAIL;
4756 }
4757
4758 if (skip_past_char (&p, '!') == SUCCESS)
4759 inst.operands[i].writeback = 1;
4760
4761 else if (skip_past_comma (&p) == SUCCESS)
4762 {
4763 if (skip_past_char (&p, '{') == SUCCESS)
4764 {
4765 /* [Rn], {expr} - unindexed, with option */
4766 if (parse_immediate (&p, &inst.operands[i].imm,
4767 0, 255, TRUE) == FAIL)
4768 return PARSE_OPERAND_FAIL;
4769
4770 if (skip_past_char (&p, '}') == FAIL)
4771 {
4772 inst.error = _("'}' expected at end of 'option' field");
4773 return PARSE_OPERAND_FAIL;
4774 }
4775 if (inst.operands[i].preind)
4776 {
4777 inst.error = _("cannot combine index with option");
4778 return PARSE_OPERAND_FAIL;
4779 }
4780 *str = p;
4781 return PARSE_OPERAND_SUCCESS;
4782 }
4783 else
4784 {
4785 inst.operands[i].postind = 1;
4786 inst.operands[i].writeback = 1;
4787
4788 if (inst.operands[i].preind)
4789 {
4790 inst.error = _("cannot combine pre- and post-indexing");
4791 return PARSE_OPERAND_FAIL;
4792 }
4793
4794 if (*p == '+') p++;
4795 else if (*p == '-') p++, inst.operands[i].negative = 1;
4796
4797 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4798 {
4799 /* We might be using the immediate for alignment already. If we
4800 are, OR the register number into the low-order bits. */
4801 if (inst.operands[i].immisalign)
4802 inst.operands[i].imm |= reg;
4803 else
4804 inst.operands[i].imm = reg;
4805 inst.operands[i].immisreg = 1;
4806
4807 if (skip_past_comma (&p) == SUCCESS)
4808 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4809 return PARSE_OPERAND_FAIL;
4810 }
4811 else
4812 {
4813 if (inst.operands[i].negative)
4814 {
4815 inst.operands[i].negative = 0;
4816 p--;
4817 }
4818 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4819 return PARSE_OPERAND_FAIL;
4820 }
4821 }
4822 }
4823
4824 /* If at this point neither .preind nor .postind is set, we have a
4825 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4826 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4827 {
4828 inst.operands[i].preind = 1;
4829 inst.reloc.exp.X_op = O_constant;
4830 inst.reloc.exp.X_add_number = 0;
4831 }
4832 *str = p;
4833 return PARSE_OPERAND_SUCCESS;
4834 }
4835
4836 static int
4837 parse_address (char **str, int i)
4838 {
4839 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4840 ? SUCCESS : FAIL;
4841 }
4842
4843 static parse_operand_result
4844 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4845 {
4846 return parse_address_main (str, i, 1, type);
4847 }
4848
4849 /* Parse an operand for a MOVW or MOVT instruction. */
4850 static int
4851 parse_half (char **str)
4852 {
4853 char * p;
4854
4855 p = *str;
4856 skip_past_char (&p, '#');
4857 if (strncasecmp (p, ":lower16:", 9) == 0)
4858 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4859 else if (strncasecmp (p, ":upper16:", 9) == 0)
4860 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4861
4862 if (inst.reloc.type != BFD_RELOC_UNUSED)
4863 {
4864 p += 9;
4865 skip_whitespace(p);
4866 }
4867
4868 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4869 return FAIL;
4870
4871 if (inst.reloc.type == BFD_RELOC_UNUSED)
4872 {
4873 if (inst.reloc.exp.X_op != O_constant)
4874 {
4875 inst.error = _("constant expression expected");
4876 return FAIL;
4877 }
4878 if (inst.reloc.exp.X_add_number < 0
4879 || inst.reloc.exp.X_add_number > 0xffff)
4880 {
4881 inst.error = _("immediate value out of range");
4882 return FAIL;
4883 }
4884 }
4885 *str = p;
4886 return SUCCESS;
4887 }
4888
4889 /* Miscellaneous. */
4890
4891 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4892 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4893 static int
4894 parse_psr (char **str)
4895 {
4896 char *p;
4897 unsigned long psr_field;
4898 const struct asm_psr *psr;
4899 char *start;
4900
4901 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4902 feature for ease of use and backwards compatibility. */
4903 p = *str;
4904 if (strncasecmp (p, "SPSR", 4) == 0)
4905 psr_field = SPSR_BIT;
4906 else if (strncasecmp (p, "CPSR", 4) == 0)
4907 psr_field = 0;
4908 else
4909 {
4910 start = p;
4911 do
4912 p++;
4913 while (ISALNUM (*p) || *p == '_');
4914
4915 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4916 if (!psr)
4917 return FAIL;
4918
4919 *str = p;
4920 return psr->field;
4921 }
4922
4923 p += 4;
4924 if (*p == '_')
4925 {
4926 /* A suffix follows. */
4927 p++;
4928 start = p;
4929
4930 do
4931 p++;
4932 while (ISALNUM (*p) || *p == '_');
4933
4934 psr = hash_find_n (arm_psr_hsh, start, p - start);
4935 if (!psr)
4936 goto error;
4937
4938 psr_field |= psr->field;
4939 }
4940 else
4941 {
4942 if (ISALNUM (*p))
4943 goto error; /* Garbage after "[CS]PSR". */
4944
4945 psr_field |= (PSR_c | PSR_f);
4946 }
4947 *str = p;
4948 return psr_field;
4949
4950 error:
4951 inst.error = _("flag for {c}psr instruction expected");
4952 return FAIL;
4953 }
4954
4955 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4956 value suitable for splatting into the AIF field of the instruction. */
4957
4958 static int
4959 parse_cps_flags (char **str)
4960 {
4961 int val = 0;
4962 int saw_a_flag = 0;
4963 char *s = *str;
4964
4965 for (;;)
4966 switch (*s++)
4967 {
4968 case '\0': case ',':
4969 goto done;
4970
4971 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4972 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4973 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4974
4975 default:
4976 inst.error = _("unrecognized CPS flag");
4977 return FAIL;
4978 }
4979
4980 done:
4981 if (saw_a_flag == 0)
4982 {
4983 inst.error = _("missing CPS flags");
4984 return FAIL;
4985 }
4986
4987 *str = s - 1;
4988 return val;
4989 }
4990
4991 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4992 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4993
4994 static int
4995 parse_endian_specifier (char **str)
4996 {
4997 int little_endian;
4998 char *s = *str;
4999
5000 if (strncasecmp (s, "BE", 2))
5001 little_endian = 0;
5002 else if (strncasecmp (s, "LE", 2))
5003 little_endian = 1;
5004 else
5005 {
5006 inst.error = _("valid endian specifiers are be or le");
5007 return FAIL;
5008 }
5009
5010 if (ISALNUM (s[2]) || s[2] == '_')
5011 {
5012 inst.error = _("valid endian specifiers are be or le");
5013 return FAIL;
5014 }
5015
5016 *str = s + 2;
5017 return little_endian;
5018 }
5019
5020 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5021 value suitable for poking into the rotate field of an sxt or sxta
5022 instruction, or FAIL on error. */
5023
5024 static int
5025 parse_ror (char **str)
5026 {
5027 int rot;
5028 char *s = *str;
5029
5030 if (strncasecmp (s, "ROR", 3) == 0)
5031 s += 3;
5032 else
5033 {
5034 inst.error = _("missing rotation field after comma");
5035 return FAIL;
5036 }
5037
5038 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5039 return FAIL;
5040
5041 switch (rot)
5042 {
5043 case 0: *str = s; return 0x0;
5044 case 8: *str = s; return 0x1;
5045 case 16: *str = s; return 0x2;
5046 case 24: *str = s; return 0x3;
5047
5048 default:
5049 inst.error = _("rotation can only be 0, 8, 16, or 24");
5050 return FAIL;
5051 }
5052 }
5053
5054 /* Parse a conditional code (from conds[] below). The value returned is in the
5055 range 0 .. 14, or FAIL. */
5056 static int
5057 parse_cond (char **str)
5058 {
5059 char *p, *q;
5060 const struct asm_cond *c;
5061
5062 p = q = *str;
5063 while (ISALPHA (*q))
5064 q++;
5065
5066 c = hash_find_n (arm_cond_hsh, p, q - p);
5067 if (!c)
5068 {
5069 inst.error = _("condition required");
5070 return FAIL;
5071 }
5072
5073 *str = q;
5074 return c->value;
5075 }
5076
5077 /* Parse an option for a barrier instruction. Returns the encoding for the
5078 option, or FAIL. */
5079 static int
5080 parse_barrier (char **str)
5081 {
5082 char *p, *q;
5083 const struct asm_barrier_opt *o;
5084
5085 p = q = *str;
5086 while (ISALPHA (*q))
5087 q++;
5088
5089 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5090 if (!o)
5091 return FAIL;
5092
5093 *str = q;
5094 return o->value;
5095 }
5096
5097 /* Parse the operands of a table branch instruction. Similar to a memory
5098 operand. */
5099 static int
5100 parse_tb (char **str)
5101 {
5102 char * p = *str;
5103 int reg;
5104
5105 if (skip_past_char (&p, '[') == FAIL)
5106 {
5107 inst.error = _("'[' expected");
5108 return FAIL;
5109 }
5110
5111 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5112 {
5113 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5114 return FAIL;
5115 }
5116 inst.operands[0].reg = reg;
5117
5118 if (skip_past_comma (&p) == FAIL)
5119 {
5120 inst.error = _("',' expected");
5121 return FAIL;
5122 }
5123
5124 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5125 {
5126 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5127 return FAIL;
5128 }
5129 inst.operands[0].imm = reg;
5130
5131 if (skip_past_comma (&p) == SUCCESS)
5132 {
5133 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5134 return FAIL;
5135 if (inst.reloc.exp.X_add_number != 1)
5136 {
5137 inst.error = _("invalid shift");
5138 return FAIL;
5139 }
5140 inst.operands[0].shifted = 1;
5141 }
5142
5143 if (skip_past_char (&p, ']') == FAIL)
5144 {
5145 inst.error = _("']' expected");
5146 return FAIL;
5147 }
5148 *str = p;
5149 return SUCCESS;
5150 }
5151
5152 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5153 information on the types the operands can take and how they are encoded.
5154 Up to four operands may be read; this function handles setting the
5155 ".present" field for each read operand itself.
5156 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5157 else returns FAIL. */
5158
5159 static int
5160 parse_neon_mov (char **str, int *which_operand)
5161 {
5162 int i = *which_operand, val;
5163 enum arm_reg_type rtype;
5164 char *ptr = *str;
5165 struct neon_type_el optype;
5166
5167 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5168 {
5169 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5170 inst.operands[i].reg = val;
5171 inst.operands[i].isscalar = 1;
5172 inst.operands[i].vectype = optype;
5173 inst.operands[i++].present = 1;
5174
5175 if (skip_past_comma (&ptr) == FAIL)
5176 goto wanted_comma;
5177
5178 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5179 goto wanted_arm;
5180
5181 inst.operands[i].reg = val;
5182 inst.operands[i].isreg = 1;
5183 inst.operands[i].present = 1;
5184 }
5185 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5186 != FAIL)
5187 {
5188 /* Cases 0, 1, 2, 3, 5 (D only). */
5189 if (skip_past_comma (&ptr) == FAIL)
5190 goto wanted_comma;
5191
5192 inst.operands[i].reg = val;
5193 inst.operands[i].isreg = 1;
5194 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5195 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5196 inst.operands[i].isvec = 1;
5197 inst.operands[i].vectype = optype;
5198 inst.operands[i++].present = 1;
5199
5200 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5201 {
5202 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5203 Case 13: VMOV <Sd>, <Rm> */
5204 inst.operands[i].reg = val;
5205 inst.operands[i].isreg = 1;
5206 inst.operands[i].present = 1;
5207
5208 if (rtype == REG_TYPE_NQ)
5209 {
5210 first_error (_("can't use Neon quad register here"));
5211 return FAIL;
5212 }
5213 else if (rtype != REG_TYPE_VFS)
5214 {
5215 i++;
5216 if (skip_past_comma (&ptr) == FAIL)
5217 goto wanted_comma;
5218 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5219 goto wanted_arm;
5220 inst.operands[i].reg = val;
5221 inst.operands[i].isreg = 1;
5222 inst.operands[i].present = 1;
5223 }
5224 }
5225 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5226 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5227 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5228 Case 10: VMOV.F32 <Sd>, #<imm>
5229 Case 11: VMOV.F64 <Dd>, #<imm> */
5230 inst.operands[i].immisfloat = 1;
5231 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5232 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5233 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5234 ;
5235 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5236 &optype)) != FAIL)
5237 {
5238 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5239 Case 1: VMOV<c><q> <Dd>, <Dm>
5240 Case 8: VMOV.F32 <Sd>, <Sm>
5241 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5242
5243 inst.operands[i].reg = val;
5244 inst.operands[i].isreg = 1;
5245 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5246 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5247 inst.operands[i].isvec = 1;
5248 inst.operands[i].vectype = optype;
5249 inst.operands[i].present = 1;
5250
5251 if (skip_past_comma (&ptr) == SUCCESS)
5252 {
5253 /* Case 15. */
5254 i++;
5255
5256 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5257 goto wanted_arm;
5258
5259 inst.operands[i].reg = val;
5260 inst.operands[i].isreg = 1;
5261 inst.operands[i++].present = 1;
5262
5263 if (skip_past_comma (&ptr) == FAIL)
5264 goto wanted_comma;
5265
5266 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5267 goto wanted_arm;
5268
5269 inst.operands[i].reg = val;
5270 inst.operands[i].isreg = 1;
5271 inst.operands[i++].present = 1;
5272 }
5273 }
5274 else
5275 {
5276 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5277 return FAIL;
5278 }
5279 }
5280 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5281 {
5282 /* Cases 6, 7. */
5283 inst.operands[i].reg = val;
5284 inst.operands[i].isreg = 1;
5285 inst.operands[i++].present = 1;
5286
5287 if (skip_past_comma (&ptr) == FAIL)
5288 goto wanted_comma;
5289
5290 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5291 {
5292 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5293 inst.operands[i].reg = val;
5294 inst.operands[i].isscalar = 1;
5295 inst.operands[i].present = 1;
5296 inst.operands[i].vectype = optype;
5297 }
5298 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5299 {
5300 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5301 inst.operands[i].reg = val;
5302 inst.operands[i].isreg = 1;
5303 inst.operands[i++].present = 1;
5304
5305 if (skip_past_comma (&ptr) == FAIL)
5306 goto wanted_comma;
5307
5308 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5309 == FAIL)
5310 {
5311 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5312 return FAIL;
5313 }
5314
5315 inst.operands[i].reg = val;
5316 inst.operands[i].isreg = 1;
5317 inst.operands[i].isvec = 1;
5318 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5319 inst.operands[i].vectype = optype;
5320 inst.operands[i].present = 1;
5321
5322 if (rtype == REG_TYPE_VFS)
5323 {
5324 /* Case 14. */
5325 i++;
5326 if (skip_past_comma (&ptr) == FAIL)
5327 goto wanted_comma;
5328 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5329 &optype)) == FAIL)
5330 {
5331 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5332 return FAIL;
5333 }
5334 inst.operands[i].reg = val;
5335 inst.operands[i].isreg = 1;
5336 inst.operands[i].isvec = 1;
5337 inst.operands[i].issingle = 1;
5338 inst.operands[i].vectype = optype;
5339 inst.operands[i].present = 1;
5340 }
5341 }
5342 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5343 != FAIL)
5344 {
5345 /* Case 13. */
5346 inst.operands[i].reg = val;
5347 inst.operands[i].isreg = 1;
5348 inst.operands[i].isvec = 1;
5349 inst.operands[i].issingle = 1;
5350 inst.operands[i].vectype = optype;
5351 inst.operands[i++].present = 1;
5352 }
5353 }
5354 else
5355 {
5356 first_error (_("parse error"));
5357 return FAIL;
5358 }
5359
5360 /* Successfully parsed the operands. Update args. */
5361 *which_operand = i;
5362 *str = ptr;
5363 return SUCCESS;
5364
5365 wanted_comma:
5366 first_error (_("expected comma"));
5367 return FAIL;
5368
5369 wanted_arm:
5370 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5371 return FAIL;
5372 }
5373
5374 /* Matcher codes for parse_operands. */
5375 enum operand_parse_code
5376 {
5377 OP_stop, /* end of line */
5378
5379 OP_RR, /* ARM register */
5380 OP_RRnpc, /* ARM register, not r15 */
5381 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5382 OP_RRw, /* ARM register, not r15, optional trailing ! */
5383 OP_RCP, /* Coprocessor number */
5384 OP_RCN, /* Coprocessor register */
5385 OP_RF, /* FPA register */
5386 OP_RVS, /* VFP single precision register */
5387 OP_RVD, /* VFP double precision register (0..15) */
5388 OP_RND, /* Neon double precision register (0..31) */
5389 OP_RNQ, /* Neon quad precision register */
5390 OP_RVSD, /* VFP single or double precision register */
5391 OP_RNDQ, /* Neon double or quad precision register */
5392 OP_RNSDQ, /* Neon single, double or quad precision register */
5393 OP_RNSC, /* Neon scalar D[X] */
5394 OP_RVC, /* VFP control register */
5395 OP_RMF, /* Maverick F register */
5396 OP_RMD, /* Maverick D register */
5397 OP_RMFX, /* Maverick FX register */
5398 OP_RMDX, /* Maverick DX register */
5399 OP_RMAX, /* Maverick AX register */
5400 OP_RMDS, /* Maverick DSPSC register */
5401 OP_RIWR, /* iWMMXt wR register */
5402 OP_RIWC, /* iWMMXt wC register */
5403 OP_RIWG, /* iWMMXt wCG register */
5404 OP_RXA, /* XScale accumulator register */
5405
5406 OP_REGLST, /* ARM register list */
5407 OP_VRSLST, /* VFP single-precision register list */
5408 OP_VRDLST, /* VFP double-precision register list */
5409 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5410 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5411 OP_NSTRLST, /* Neon element/structure list */
5412
5413 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5414 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5415 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5416 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5417 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5418 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5419 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5420 OP_VMOV, /* Neon VMOV operands. */
5421 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5422 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5423 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5424
5425 OP_I0, /* immediate zero */
5426 OP_I7, /* immediate value 0 .. 7 */
5427 OP_I15, /* 0 .. 15 */
5428 OP_I16, /* 1 .. 16 */
5429 OP_I16z, /* 0 .. 16 */
5430 OP_I31, /* 0 .. 31 */
5431 OP_I31w, /* 0 .. 31, optional trailing ! */
5432 OP_I32, /* 1 .. 32 */
5433 OP_I32z, /* 0 .. 32 */
5434 OP_I63, /* 0 .. 63 */
5435 OP_I63s, /* -64 .. 63 */
5436 OP_I64, /* 1 .. 64 */
5437 OP_I64z, /* 0 .. 64 */
5438 OP_I255, /* 0 .. 255 */
5439
5440 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5441 OP_I7b, /* 0 .. 7 */
5442 OP_I15b, /* 0 .. 15 */
5443 OP_I31b, /* 0 .. 31 */
5444
5445 OP_SH, /* shifter operand */
5446 OP_SHG, /* shifter operand with possible group relocation */
5447 OP_ADDR, /* Memory address expression (any mode) */
5448 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5449 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5450 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5451 OP_EXP, /* arbitrary expression */
5452 OP_EXPi, /* same, with optional immediate prefix */
5453 OP_EXPr, /* same, with optional relocation suffix */
5454 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5455
5456 OP_CPSF, /* CPS flags */
5457 OP_ENDI, /* Endianness specifier */
5458 OP_PSR, /* CPSR/SPSR mask for msr */
5459 OP_COND, /* conditional code */
5460 OP_TB, /* Table branch. */
5461
5462 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5463 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5464
5465 OP_RRnpc_I0, /* ARM register or literal 0 */
5466 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5467 OP_RR_EXi, /* ARM register or expression with imm prefix */
5468 OP_RF_IF, /* FPA register or immediate */
5469 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5470 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5471
5472 /* Optional operands. */
5473 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5474 OP_oI31b, /* 0 .. 31 */
5475 OP_oI32b, /* 1 .. 32 */
5476 OP_oIffffb, /* 0 .. 65535 */
5477 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5478
5479 OP_oRR, /* ARM register */
5480 OP_oRRnpc, /* ARM register, not the PC */
5481 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5482 OP_oRND, /* Optional Neon double precision register */
5483 OP_oRNQ, /* Optional Neon quad precision register */
5484 OP_oRNDQ, /* Optional Neon double or quad precision register */
5485 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5486 OP_oSHll, /* LSL immediate */
5487 OP_oSHar, /* ASR immediate */
5488 OP_oSHllar, /* LSL or ASR immediate */
5489 OP_oROR, /* ROR 0/8/16/24 */
5490 OP_oBARRIER, /* Option argument for a barrier instruction. */
5491
5492 OP_FIRST_OPTIONAL = OP_oI7b
5493 };
5494
5495 /* Generic instruction operand parser. This does no encoding and no
5496 semantic validation; it merely squirrels values away in the inst
5497 structure. Returns SUCCESS or FAIL depending on whether the
5498 specified grammar matched. */
5499 static int
5500 parse_operands (char *str, const unsigned char *pattern)
5501 {
5502 unsigned const char *upat = pattern;
5503 char *backtrack_pos = 0;
5504 const char *backtrack_error = 0;
5505 int i, val, backtrack_index = 0;
5506 enum arm_reg_type rtype;
5507 parse_operand_result result;
5508
5509 #define po_char_or_fail(chr) do { \
5510 if (skip_past_char (&str, chr) == FAIL) \
5511 goto bad_args; \
5512 } while (0)
5513
5514 #define po_reg_or_fail(regtype) do { \
5515 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5516 &inst.operands[i].vectype); \
5517 if (val == FAIL) \
5518 { \
5519 first_error (_(reg_expected_msgs[regtype])); \
5520 goto failure; \
5521 } \
5522 inst.operands[i].reg = val; \
5523 inst.operands[i].isreg = 1; \
5524 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5525 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5526 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5527 || rtype == REG_TYPE_VFD \
5528 || rtype == REG_TYPE_NQ); \
5529 } while (0)
5530
5531 #define po_reg_or_goto(regtype, label) do { \
5532 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5533 &inst.operands[i].vectype); \
5534 if (val == FAIL) \
5535 goto label; \
5536 \
5537 inst.operands[i].reg = val; \
5538 inst.operands[i].isreg = 1; \
5539 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5540 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5541 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5542 || rtype == REG_TYPE_VFD \
5543 || rtype == REG_TYPE_NQ); \
5544 } while (0)
5545
5546 #define po_imm_or_fail(min, max, popt) do { \
5547 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5548 goto failure; \
5549 inst.operands[i].imm = val; \
5550 } while (0)
5551
5552 #define po_scalar_or_goto(elsz, label) do { \
5553 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5554 if (val == FAIL) \
5555 goto label; \
5556 inst.operands[i].reg = val; \
5557 inst.operands[i].isscalar = 1; \
5558 } while (0)
5559
5560 #define po_misc_or_fail(expr) do { \
5561 if (expr) \
5562 goto failure; \
5563 } while (0)
5564
5565 #define po_misc_or_fail_no_backtrack(expr) do { \
5566 result = expr; \
5567 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5568 backtrack_pos = 0; \
5569 if (result != PARSE_OPERAND_SUCCESS) \
5570 goto failure; \
5571 } while (0)
5572
5573 skip_whitespace (str);
5574
5575 for (i = 0; upat[i] != OP_stop; i++)
5576 {
5577 if (upat[i] >= OP_FIRST_OPTIONAL)
5578 {
5579 /* Remember where we are in case we need to backtrack. */
5580 assert (!backtrack_pos);
5581 backtrack_pos = str;
5582 backtrack_error = inst.error;
5583 backtrack_index = i;
5584 }
5585
5586 if (i > 0 && (i > 1 || inst.operands[0].present))
5587 po_char_or_fail (',');
5588
5589 switch (upat[i])
5590 {
5591 /* Registers */
5592 case OP_oRRnpc:
5593 case OP_RRnpc:
5594 case OP_oRR:
5595 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5596 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5597 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5598 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5599 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5600 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5601 case OP_oRND:
5602 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5603 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5604 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5605 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5606 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5607 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5608 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5609 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5610 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5611 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5612 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5613 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5614 case OP_oRNQ:
5615 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5616 case OP_oRNDQ:
5617 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5618 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5619 case OP_oRNSDQ:
5620 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5621
5622 /* Neon scalar. Using an element size of 8 means that some invalid
5623 scalars are accepted here, so deal with those in later code. */
5624 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5625
5626 /* WARNING: We can expand to two operands here. This has the potential
5627 to totally confuse the backtracking mechanism! It will be OK at
5628 least as long as we don't try to use optional args as well,
5629 though. */
5630 case OP_NILO:
5631 {
5632 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5633 inst.operands[i].present = 1;
5634 i++;
5635 skip_past_comma (&str);
5636 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5637 break;
5638 one_reg_only:
5639 /* Optional register operand was omitted. Unfortunately, it's in
5640 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5641 here (this is a bit grotty). */
5642 inst.operands[i] = inst.operands[i-1];
5643 inst.operands[i-1].present = 0;
5644 break;
5645 try_imm:
5646 /* There's a possibility of getting a 64-bit immediate here, so
5647 we need special handling. */
5648 if (parse_big_immediate (&str, i) == FAIL)
5649 {
5650 inst.error = _("immediate value is out of range");
5651 goto failure;
5652 }
5653 }
5654 break;
5655
5656 case OP_RNDQ_I0:
5657 {
5658 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5659 break;
5660 try_imm0:
5661 po_imm_or_fail (0, 0, TRUE);
5662 }
5663 break;
5664
5665 case OP_RVSD_I0:
5666 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5667 break;
5668
5669 case OP_RR_RNSC:
5670 {
5671 po_scalar_or_goto (8, try_rr);
5672 break;
5673 try_rr:
5674 po_reg_or_fail (REG_TYPE_RN);
5675 }
5676 break;
5677
5678 case OP_RNSDQ_RNSC:
5679 {
5680 po_scalar_or_goto (8, try_nsdq);
5681 break;
5682 try_nsdq:
5683 po_reg_or_fail (REG_TYPE_NSDQ);
5684 }
5685 break;
5686
5687 case OP_RNDQ_RNSC:
5688 {
5689 po_scalar_or_goto (8, try_ndq);
5690 break;
5691 try_ndq:
5692 po_reg_or_fail (REG_TYPE_NDQ);
5693 }
5694 break;
5695
5696 case OP_RND_RNSC:
5697 {
5698 po_scalar_or_goto (8, try_vfd);
5699 break;
5700 try_vfd:
5701 po_reg_or_fail (REG_TYPE_VFD);
5702 }
5703 break;
5704
5705 case OP_VMOV:
5706 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5707 not careful then bad things might happen. */
5708 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5709 break;
5710
5711 case OP_RNDQ_IMVNb:
5712 {
5713 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5714 break;
5715 try_mvnimm:
5716 /* There's a possibility of getting a 64-bit immediate here, so
5717 we need special handling. */
5718 if (parse_big_immediate (&str, i) == FAIL)
5719 {
5720 inst.error = _("immediate value is out of range");
5721 goto failure;
5722 }
5723 }
5724 break;
5725
5726 case OP_RNDQ_I63b:
5727 {
5728 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5729 break;
5730 try_shimm:
5731 po_imm_or_fail (0, 63, TRUE);
5732 }
5733 break;
5734
5735 case OP_RRnpcb:
5736 po_char_or_fail ('[');
5737 po_reg_or_fail (REG_TYPE_RN);
5738 po_char_or_fail (']');
5739 break;
5740
5741 case OP_RRw:
5742 case OP_oRRw:
5743 po_reg_or_fail (REG_TYPE_RN);
5744 if (skip_past_char (&str, '!') == SUCCESS)
5745 inst.operands[i].writeback = 1;
5746 break;
5747
5748 /* Immediates */
5749 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5750 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5751 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5752 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5753 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5754 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5755 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5756 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5757 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5758 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5759 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5760 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5761
5762 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5763 case OP_oI7b:
5764 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5765 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5766 case OP_oI31b:
5767 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5768 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5769 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5770
5771 /* Immediate variants */
5772 case OP_oI255c:
5773 po_char_or_fail ('{');
5774 po_imm_or_fail (0, 255, TRUE);
5775 po_char_or_fail ('}');
5776 break;
5777
5778 case OP_I31w:
5779 /* The expression parser chokes on a trailing !, so we have
5780 to find it first and zap it. */
5781 {
5782 char *s = str;
5783 while (*s && *s != ',')
5784 s++;
5785 if (s[-1] == '!')
5786 {
5787 s[-1] = '\0';
5788 inst.operands[i].writeback = 1;
5789 }
5790 po_imm_or_fail (0, 31, TRUE);
5791 if (str == s - 1)
5792 str = s;
5793 }
5794 break;
5795
5796 /* Expressions */
5797 case OP_EXPi: EXPi:
5798 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5799 GE_OPT_PREFIX));
5800 break;
5801
5802 case OP_EXP:
5803 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5804 GE_NO_PREFIX));
5805 break;
5806
5807 case OP_EXPr: EXPr:
5808 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5809 GE_NO_PREFIX));
5810 if (inst.reloc.exp.X_op == O_symbol)
5811 {
5812 val = parse_reloc (&str);
5813 if (val == -1)
5814 {
5815 inst.error = _("unrecognized relocation suffix");
5816 goto failure;
5817 }
5818 else if (val != BFD_RELOC_UNUSED)
5819 {
5820 inst.operands[i].imm = val;
5821 inst.operands[i].hasreloc = 1;
5822 }
5823 }
5824 break;
5825
5826 /* Operand for MOVW or MOVT. */
5827 case OP_HALF:
5828 po_misc_or_fail (parse_half (&str));
5829 break;
5830
5831 /* Register or expression */
5832 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5833 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5834
5835 /* Register or immediate */
5836 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5837 I0: po_imm_or_fail (0, 0, FALSE); break;
5838
5839 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5840 IF:
5841 if (!is_immediate_prefix (*str))
5842 goto bad_args;
5843 str++;
5844 val = parse_fpa_immediate (&str);
5845 if (val == FAIL)
5846 goto failure;
5847 /* FPA immediates are encoded as registers 8-15.
5848 parse_fpa_immediate has already applied the offset. */
5849 inst.operands[i].reg = val;
5850 inst.operands[i].isreg = 1;
5851 break;
5852
5853 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5854 I32z: po_imm_or_fail (0, 32, FALSE); break;
5855
5856 /* Two kinds of register */
5857 case OP_RIWR_RIWC:
5858 {
5859 struct reg_entry *rege = arm_reg_parse_multi (&str);
5860 if (!rege
5861 || (rege->type != REG_TYPE_MMXWR
5862 && rege->type != REG_TYPE_MMXWC
5863 && rege->type != REG_TYPE_MMXWCG))
5864 {
5865 inst.error = _("iWMMXt data or control register expected");
5866 goto failure;
5867 }
5868 inst.operands[i].reg = rege->number;
5869 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5870 }
5871 break;
5872
5873 case OP_RIWC_RIWG:
5874 {
5875 struct reg_entry *rege = arm_reg_parse_multi (&str);
5876 if (!rege
5877 || (rege->type != REG_TYPE_MMXWC
5878 && rege->type != REG_TYPE_MMXWCG))
5879 {
5880 inst.error = _("iWMMXt control register expected");
5881 goto failure;
5882 }
5883 inst.operands[i].reg = rege->number;
5884 inst.operands[i].isreg = 1;
5885 }
5886 break;
5887
5888 /* Misc */
5889 case OP_CPSF: val = parse_cps_flags (&str); break;
5890 case OP_ENDI: val = parse_endian_specifier (&str); break;
5891 case OP_oROR: val = parse_ror (&str); break;
5892 case OP_PSR: val = parse_psr (&str); break;
5893 case OP_COND: val = parse_cond (&str); break;
5894 case OP_oBARRIER:val = parse_barrier (&str); break;
5895
5896 case OP_RVC_PSR:
5897 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5898 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5899 break;
5900 try_psr:
5901 val = parse_psr (&str);
5902 break;
5903
5904 case OP_APSR_RR:
5905 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5906 break;
5907 try_apsr:
5908 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5909 instruction). */
5910 if (strncasecmp (str, "APSR_", 5) == 0)
5911 {
5912 unsigned found = 0;
5913 str += 5;
5914 while (found < 15)
5915 switch (*str++)
5916 {
5917 case 'c': found = (found & 1) ? 16 : found | 1; break;
5918 case 'n': found = (found & 2) ? 16 : found | 2; break;
5919 case 'z': found = (found & 4) ? 16 : found | 4; break;
5920 case 'v': found = (found & 8) ? 16 : found | 8; break;
5921 default: found = 16;
5922 }
5923 if (found != 15)
5924 goto failure;
5925 inst.operands[i].isvec = 1;
5926 }
5927 else
5928 goto failure;
5929 break;
5930
5931 case OP_TB:
5932 po_misc_or_fail (parse_tb (&str));
5933 break;
5934
5935 /* Register lists */
5936 case OP_REGLST:
5937 val = parse_reg_list (&str);
5938 if (*str == '^')
5939 {
5940 inst.operands[1].writeback = 1;
5941 str++;
5942 }
5943 break;
5944
5945 case OP_VRSLST:
5946 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5947 break;
5948
5949 case OP_VRDLST:
5950 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5951 break;
5952
5953 case OP_VRSDLST:
5954 /* Allow Q registers too. */
5955 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5956 REGLIST_NEON_D);
5957 if (val == FAIL)
5958 {
5959 inst.error = NULL;
5960 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5961 REGLIST_VFP_S);
5962 inst.operands[i].issingle = 1;
5963 }
5964 break;
5965
5966 case OP_NRDLST:
5967 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5968 REGLIST_NEON_D);
5969 break;
5970
5971 case OP_NSTRLST:
5972 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5973 &inst.operands[i].vectype);
5974 break;
5975
5976 /* Addressing modes */
5977 case OP_ADDR:
5978 po_misc_or_fail (parse_address (&str, i));
5979 break;
5980
5981 case OP_ADDRGLDR:
5982 po_misc_or_fail_no_backtrack (
5983 parse_address_group_reloc (&str, i, GROUP_LDR));
5984 break;
5985
5986 case OP_ADDRGLDRS:
5987 po_misc_or_fail_no_backtrack (
5988 parse_address_group_reloc (&str, i, GROUP_LDRS));
5989 break;
5990
5991 case OP_ADDRGLDC:
5992 po_misc_or_fail_no_backtrack (
5993 parse_address_group_reloc (&str, i, GROUP_LDC));
5994 break;
5995
5996 case OP_SH:
5997 po_misc_or_fail (parse_shifter_operand (&str, i));
5998 break;
5999
6000 case OP_SHG:
6001 po_misc_or_fail_no_backtrack (
6002 parse_shifter_operand_group_reloc (&str, i));
6003 break;
6004
6005 case OP_oSHll:
6006 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6007 break;
6008
6009 case OP_oSHar:
6010 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6011 break;
6012
6013 case OP_oSHllar:
6014 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6015 break;
6016
6017 default:
6018 as_fatal ("unhandled operand code %d", upat[i]);
6019 }
6020
6021 /* Various value-based sanity checks and shared operations. We
6022 do not signal immediate failures for the register constraints;
6023 this allows a syntax error to take precedence. */
6024 switch (upat[i])
6025 {
6026 case OP_oRRnpc:
6027 case OP_RRnpc:
6028 case OP_RRnpcb:
6029 case OP_RRw:
6030 case OP_oRRw:
6031 case OP_RRnpc_I0:
6032 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6033 inst.error = BAD_PC;
6034 break;
6035
6036 case OP_CPSF:
6037 case OP_ENDI:
6038 case OP_oROR:
6039 case OP_PSR:
6040 case OP_RVC_PSR:
6041 case OP_COND:
6042 case OP_oBARRIER:
6043 case OP_REGLST:
6044 case OP_VRSLST:
6045 case OP_VRDLST:
6046 case OP_VRSDLST:
6047 case OP_NRDLST:
6048 case OP_NSTRLST:
6049 if (val == FAIL)
6050 goto failure;
6051 inst.operands[i].imm = val;
6052 break;
6053
6054 default:
6055 break;
6056 }
6057
6058 /* If we get here, this operand was successfully parsed. */
6059 inst.operands[i].present = 1;
6060 continue;
6061
6062 bad_args:
6063 inst.error = BAD_ARGS;
6064
6065 failure:
6066 if (!backtrack_pos)
6067 {
6068 /* The parse routine should already have set inst.error, but set a
6069 defaut here just in case. */
6070 if (!inst.error)
6071 inst.error = _("syntax error");
6072 return FAIL;
6073 }
6074
6075 /* Do not backtrack over a trailing optional argument that
6076 absorbed some text. We will only fail again, with the
6077 'garbage following instruction' error message, which is
6078 probably less helpful than the current one. */
6079 if (backtrack_index == i && backtrack_pos != str
6080 && upat[i+1] == OP_stop)
6081 {
6082 if (!inst.error)
6083 inst.error = _("syntax error");
6084 return FAIL;
6085 }
6086
6087 /* Try again, skipping the optional argument at backtrack_pos. */
6088 str = backtrack_pos;
6089 inst.error = backtrack_error;
6090 inst.operands[backtrack_index].present = 0;
6091 i = backtrack_index;
6092 backtrack_pos = 0;
6093 }
6094
6095 /* Check that we have parsed all the arguments. */
6096 if (*str != '\0' && !inst.error)
6097 inst.error = _("garbage following instruction");
6098
6099 return inst.error ? FAIL : SUCCESS;
6100 }
6101
6102 #undef po_char_or_fail
6103 #undef po_reg_or_fail
6104 #undef po_reg_or_goto
6105 #undef po_imm_or_fail
6106 #undef po_scalar_or_fail
6107 \f
6108 /* Shorthand macro for instruction encoding functions issuing errors. */
6109 #define constraint(expr, err) do { \
6110 if (expr) \
6111 { \
6112 inst.error = err; \
6113 return; \
6114 } \
6115 } while (0)
6116
6117 /* Functions for operand encoding. ARM, then Thumb. */
6118
6119 #define rotate_left(v, n) (v << n | v >> (32 - n))
6120
6121 /* If VAL can be encoded in the immediate field of an ARM instruction,
6122 return the encoded form. Otherwise, return FAIL. */
6123
6124 static unsigned int
6125 encode_arm_immediate (unsigned int val)
6126 {
6127 unsigned int a, i;
6128
6129 for (i = 0; i < 32; i += 2)
6130 if ((a = rotate_left (val, i)) <= 0xff)
6131 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6132
6133 return FAIL;
6134 }
6135
6136 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6137 return the encoded form. Otherwise, return FAIL. */
6138 static unsigned int
6139 encode_thumb32_immediate (unsigned int val)
6140 {
6141 unsigned int a, i;
6142
6143 if (val <= 0xff)
6144 return val;
6145
6146 for (i = 1; i <= 24; i++)
6147 {
6148 a = val >> i;
6149 if ((val & ~(0xff << i)) == 0)
6150 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6151 }
6152
6153 a = val & 0xff;
6154 if (val == ((a << 16) | a))
6155 return 0x100 | a;
6156 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6157 return 0x300 | a;
6158
6159 a = val & 0xff00;
6160 if (val == ((a << 16) | a))
6161 return 0x200 | (a >> 8);
6162
6163 return FAIL;
6164 }
6165 /* Encode a VFP SP or DP register number into inst.instruction. */
6166
6167 static void
6168 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6169 {
6170 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6171 && reg > 15)
6172 {
6173 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6174 {
6175 if (thumb_mode)
6176 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6177 fpu_vfp_ext_v3);
6178 else
6179 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6180 fpu_vfp_ext_v3);
6181 }
6182 else
6183 {
6184 first_error (_("D register out of range for selected VFP version"));
6185 return;
6186 }
6187 }
6188
6189 switch (pos)
6190 {
6191 case VFP_REG_Sd:
6192 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6193 break;
6194
6195 case VFP_REG_Sn:
6196 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6197 break;
6198
6199 case VFP_REG_Sm:
6200 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6201 break;
6202
6203 case VFP_REG_Dd:
6204 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6205 break;
6206
6207 case VFP_REG_Dn:
6208 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6209 break;
6210
6211 case VFP_REG_Dm:
6212 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6213 break;
6214
6215 default:
6216 abort ();
6217 }
6218 }
6219
6220 /* Encode a <shift> in an ARM-format instruction. The immediate,
6221 if any, is handled by md_apply_fix. */
6222 static void
6223 encode_arm_shift (int i)
6224 {
6225 if (inst.operands[i].shift_kind == SHIFT_RRX)
6226 inst.instruction |= SHIFT_ROR << 5;
6227 else
6228 {
6229 inst.instruction |= inst.operands[i].shift_kind << 5;
6230 if (inst.operands[i].immisreg)
6231 {
6232 inst.instruction |= SHIFT_BY_REG;
6233 inst.instruction |= inst.operands[i].imm << 8;
6234 }
6235 else
6236 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6237 }
6238 }
6239
6240 static void
6241 encode_arm_shifter_operand (int i)
6242 {
6243 if (inst.operands[i].isreg)
6244 {
6245 inst.instruction |= inst.operands[i].reg;
6246 encode_arm_shift (i);
6247 }
6248 else
6249 inst.instruction |= INST_IMMEDIATE;
6250 }
6251
6252 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6253 static void
6254 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6255 {
6256 assert (inst.operands[i].isreg);
6257 inst.instruction |= inst.operands[i].reg << 16;
6258
6259 if (inst.operands[i].preind)
6260 {
6261 if (is_t)
6262 {
6263 inst.error = _("instruction does not accept preindexed addressing");
6264 return;
6265 }
6266 inst.instruction |= PRE_INDEX;
6267 if (inst.operands[i].writeback)
6268 inst.instruction |= WRITE_BACK;
6269
6270 }
6271 else if (inst.operands[i].postind)
6272 {
6273 assert (inst.operands[i].writeback);
6274 if (is_t)
6275 inst.instruction |= WRITE_BACK;
6276 }
6277 else /* unindexed - only for coprocessor */
6278 {
6279 inst.error = _("instruction does not accept unindexed addressing");
6280 return;
6281 }
6282
6283 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6284 && (((inst.instruction & 0x000f0000) >> 16)
6285 == ((inst.instruction & 0x0000f000) >> 12)))
6286 as_warn ((inst.instruction & LOAD_BIT)
6287 ? _("destination register same as write-back base")
6288 : _("source register same as write-back base"));
6289 }
6290
6291 /* inst.operands[i] was set up by parse_address. Encode it into an
6292 ARM-format mode 2 load or store instruction. If is_t is true,
6293 reject forms that cannot be used with a T instruction (i.e. not
6294 post-indexed). */
6295 static void
6296 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6297 {
6298 encode_arm_addr_mode_common (i, is_t);
6299
6300 if (inst.operands[i].immisreg)
6301 {
6302 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6303 inst.instruction |= inst.operands[i].imm;
6304 if (!inst.operands[i].negative)
6305 inst.instruction |= INDEX_UP;
6306 if (inst.operands[i].shifted)
6307 {
6308 if (inst.operands[i].shift_kind == SHIFT_RRX)
6309 inst.instruction |= SHIFT_ROR << 5;
6310 else
6311 {
6312 inst.instruction |= inst.operands[i].shift_kind << 5;
6313 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6314 }
6315 }
6316 }
6317 else /* immediate offset in inst.reloc */
6318 {
6319 if (inst.reloc.type == BFD_RELOC_UNUSED)
6320 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6321 }
6322 }
6323
6324 /* inst.operands[i] was set up by parse_address. Encode it into an
6325 ARM-format mode 3 load or store instruction. Reject forms that
6326 cannot be used with such instructions. If is_t is true, reject
6327 forms that cannot be used with a T instruction (i.e. not
6328 post-indexed). */
6329 static void
6330 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6331 {
6332 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6333 {
6334 inst.error = _("instruction does not accept scaled register index");
6335 return;
6336 }
6337
6338 encode_arm_addr_mode_common (i, is_t);
6339
6340 if (inst.operands[i].immisreg)
6341 {
6342 inst.instruction |= inst.operands[i].imm;
6343 if (!inst.operands[i].negative)
6344 inst.instruction |= INDEX_UP;
6345 }
6346 else /* immediate offset in inst.reloc */
6347 {
6348 inst.instruction |= HWOFFSET_IMM;
6349 if (inst.reloc.type == BFD_RELOC_UNUSED)
6350 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6351 }
6352 }
6353
6354 /* inst.operands[i] was set up by parse_address. Encode it into an
6355 ARM-format instruction. Reject all forms which cannot be encoded
6356 into a coprocessor load/store instruction. If wb_ok is false,
6357 reject use of writeback; if unind_ok is false, reject use of
6358 unindexed addressing. If reloc_override is not 0, use it instead
6359 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6360 (in which case it is preserved). */
6361
6362 static int
6363 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6364 {
6365 inst.instruction |= inst.operands[i].reg << 16;
6366
6367 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6368
6369 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6370 {
6371 assert (!inst.operands[i].writeback);
6372 if (!unind_ok)
6373 {
6374 inst.error = _("instruction does not support unindexed addressing");
6375 return FAIL;
6376 }
6377 inst.instruction |= inst.operands[i].imm;
6378 inst.instruction |= INDEX_UP;
6379 return SUCCESS;
6380 }
6381
6382 if (inst.operands[i].preind)
6383 inst.instruction |= PRE_INDEX;
6384
6385 if (inst.operands[i].writeback)
6386 {
6387 if (inst.operands[i].reg == REG_PC)
6388 {
6389 inst.error = _("pc may not be used with write-back");
6390 return FAIL;
6391 }
6392 if (!wb_ok)
6393 {
6394 inst.error = _("instruction does not support writeback");
6395 return FAIL;
6396 }
6397 inst.instruction |= WRITE_BACK;
6398 }
6399
6400 if (reloc_override)
6401 inst.reloc.type = reloc_override;
6402 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6403 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6404 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6405 {
6406 if (thumb_mode)
6407 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6408 else
6409 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6410 }
6411
6412 return SUCCESS;
6413 }
6414
6415 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6416 Determine whether it can be performed with a move instruction; if
6417 it can, convert inst.instruction to that move instruction and
6418 return 1; if it can't, convert inst.instruction to a literal-pool
6419 load and return 0. If this is not a valid thing to do in the
6420 current context, set inst.error and return 1.
6421
6422 inst.operands[i] describes the destination register. */
6423
6424 static int
6425 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6426 {
6427 unsigned long tbit;
6428
6429 if (thumb_p)
6430 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6431 else
6432 tbit = LOAD_BIT;
6433
6434 if ((inst.instruction & tbit) == 0)
6435 {
6436 inst.error = _("invalid pseudo operation");
6437 return 1;
6438 }
6439 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6440 {
6441 inst.error = _("constant expression expected");
6442 return 1;
6443 }
6444 if (inst.reloc.exp.X_op == O_constant)
6445 {
6446 if (thumb_p)
6447 {
6448 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6449 {
6450 /* This can be done with a mov(1) instruction. */
6451 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6452 inst.instruction |= inst.reloc.exp.X_add_number;
6453 return 1;
6454 }
6455 }
6456 else
6457 {
6458 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6459 if (value != FAIL)
6460 {
6461 /* This can be done with a mov instruction. */
6462 inst.instruction &= LITERAL_MASK;
6463 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6464 inst.instruction |= value & 0xfff;
6465 return 1;
6466 }
6467
6468 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6469 if (value != FAIL)
6470 {
6471 /* This can be done with a mvn instruction. */
6472 inst.instruction &= LITERAL_MASK;
6473 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6474 inst.instruction |= value & 0xfff;
6475 return 1;
6476 }
6477 }
6478 }
6479
6480 if (add_to_lit_pool () == FAIL)
6481 {
6482 inst.error = _("literal pool insertion failed");
6483 return 1;
6484 }
6485 inst.operands[1].reg = REG_PC;
6486 inst.operands[1].isreg = 1;
6487 inst.operands[1].preind = 1;
6488 inst.reloc.pc_rel = 1;
6489 inst.reloc.type = (thumb_p
6490 ? BFD_RELOC_ARM_THUMB_OFFSET
6491 : (mode_3
6492 ? BFD_RELOC_ARM_HWLITERAL
6493 : BFD_RELOC_ARM_LITERAL));
6494 return 0;
6495 }
6496
6497 /* Functions for instruction encoding, sorted by subarchitecture.
6498 First some generics; their names are taken from the conventional
6499 bit positions for register arguments in ARM format instructions. */
6500
6501 static void
6502 do_noargs (void)
6503 {
6504 }
6505
6506 static void
6507 do_rd (void)
6508 {
6509 inst.instruction |= inst.operands[0].reg << 12;
6510 }
6511
6512 static void
6513 do_rd_rm (void)
6514 {
6515 inst.instruction |= inst.operands[0].reg << 12;
6516 inst.instruction |= inst.operands[1].reg;
6517 }
6518
6519 static void
6520 do_rd_rn (void)
6521 {
6522 inst.instruction |= inst.operands[0].reg << 12;
6523 inst.instruction |= inst.operands[1].reg << 16;
6524 }
6525
6526 static void
6527 do_rn_rd (void)
6528 {
6529 inst.instruction |= inst.operands[0].reg << 16;
6530 inst.instruction |= inst.operands[1].reg << 12;
6531 }
6532
6533 static void
6534 do_rd_rm_rn (void)
6535 {
6536 unsigned Rn = inst.operands[2].reg;
6537 /* Enforce restrictions on SWP instruction. */
6538 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6539 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6540 _("Rn must not overlap other operands"));
6541 inst.instruction |= inst.operands[0].reg << 12;
6542 inst.instruction |= inst.operands[1].reg;
6543 inst.instruction |= Rn << 16;
6544 }
6545
6546 static void
6547 do_rd_rn_rm (void)
6548 {
6549 inst.instruction |= inst.operands[0].reg << 12;
6550 inst.instruction |= inst.operands[1].reg << 16;
6551 inst.instruction |= inst.operands[2].reg;
6552 }
6553
6554 static void
6555 do_rm_rd_rn (void)
6556 {
6557 inst.instruction |= inst.operands[0].reg;
6558 inst.instruction |= inst.operands[1].reg << 12;
6559 inst.instruction |= inst.operands[2].reg << 16;
6560 }
6561
6562 static void
6563 do_imm0 (void)
6564 {
6565 inst.instruction |= inst.operands[0].imm;
6566 }
6567
6568 static void
6569 do_rd_cpaddr (void)
6570 {
6571 inst.instruction |= inst.operands[0].reg << 12;
6572 encode_arm_cp_address (1, TRUE, TRUE, 0);
6573 }
6574
6575 /* ARM instructions, in alphabetical order by function name (except
6576 that wrapper functions appear immediately after the function they
6577 wrap). */
6578
6579 /* This is a pseudo-op of the form "adr rd, label" to be converted
6580 into a relative address of the form "add rd, pc, #label-.-8". */
6581
6582 static void
6583 do_adr (void)
6584 {
6585 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6586
6587 /* Frag hacking will turn this into a sub instruction if the offset turns
6588 out to be negative. */
6589 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6590 inst.reloc.pc_rel = 1;
6591 inst.reloc.exp.X_add_number -= 8;
6592 }
6593
6594 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6595 into a relative address of the form:
6596 add rd, pc, #low(label-.-8)"
6597 add rd, rd, #high(label-.-8)" */
6598
6599 static void
6600 do_adrl (void)
6601 {
6602 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6603
6604 /* Frag hacking will turn this into a sub instruction if the offset turns
6605 out to be negative. */
6606 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6607 inst.reloc.pc_rel = 1;
6608 inst.size = INSN_SIZE * 2;
6609 inst.reloc.exp.X_add_number -= 8;
6610 }
6611
6612 static void
6613 do_arit (void)
6614 {
6615 if (!inst.operands[1].present)
6616 inst.operands[1].reg = inst.operands[0].reg;
6617 inst.instruction |= inst.operands[0].reg << 12;
6618 inst.instruction |= inst.operands[1].reg << 16;
6619 encode_arm_shifter_operand (2);
6620 }
6621
6622 static void
6623 do_barrier (void)
6624 {
6625 if (inst.operands[0].present)
6626 {
6627 constraint ((inst.instruction & 0xf0) != 0x40
6628 && inst.operands[0].imm != 0xf,
6629 "bad barrier type");
6630 inst.instruction |= inst.operands[0].imm;
6631 }
6632 else
6633 inst.instruction |= 0xf;
6634 }
6635
6636 static void
6637 do_bfc (void)
6638 {
6639 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6640 constraint (msb > 32, _("bit-field extends past end of register"));
6641 /* The instruction encoding stores the LSB and MSB,
6642 not the LSB and width. */
6643 inst.instruction |= inst.operands[0].reg << 12;
6644 inst.instruction |= inst.operands[1].imm << 7;
6645 inst.instruction |= (msb - 1) << 16;
6646 }
6647
6648 static void
6649 do_bfi (void)
6650 {
6651 unsigned int msb;
6652
6653 /* #0 in second position is alternative syntax for bfc, which is
6654 the same instruction but with REG_PC in the Rm field. */
6655 if (!inst.operands[1].isreg)
6656 inst.operands[1].reg = REG_PC;
6657
6658 msb = inst.operands[2].imm + inst.operands[3].imm;
6659 constraint (msb > 32, _("bit-field extends past end of register"));
6660 /* The instruction encoding stores the LSB and MSB,
6661 not the LSB and width. */
6662 inst.instruction |= inst.operands[0].reg << 12;
6663 inst.instruction |= inst.operands[1].reg;
6664 inst.instruction |= inst.operands[2].imm << 7;
6665 inst.instruction |= (msb - 1) << 16;
6666 }
6667
6668 static void
6669 do_bfx (void)
6670 {
6671 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6672 _("bit-field extends past end of register"));
6673 inst.instruction |= inst.operands[0].reg << 12;
6674 inst.instruction |= inst.operands[1].reg;
6675 inst.instruction |= inst.operands[2].imm << 7;
6676 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6677 }
6678
6679 /* ARM V5 breakpoint instruction (argument parse)
6680 BKPT <16 bit unsigned immediate>
6681 Instruction is not conditional.
6682 The bit pattern given in insns[] has the COND_ALWAYS condition,
6683 and it is an error if the caller tried to override that. */
6684
6685 static void
6686 do_bkpt (void)
6687 {
6688 /* Top 12 of 16 bits to bits 19:8. */
6689 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6690
6691 /* Bottom 4 of 16 bits to bits 3:0. */
6692 inst.instruction |= inst.operands[0].imm & 0xf;
6693 }
6694
6695 static void
6696 encode_branch (int default_reloc)
6697 {
6698 if (inst.operands[0].hasreloc)
6699 {
6700 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6701 _("the only suffix valid here is '(plt)'"));
6702 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6703 }
6704 else
6705 {
6706 inst.reloc.type = default_reloc;
6707 }
6708 inst.reloc.pc_rel = 1;
6709 }
6710
6711 static void
6712 do_branch (void)
6713 {
6714 #ifdef OBJ_ELF
6715 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6716 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6717 else
6718 #endif
6719 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6720 }
6721
6722 static void
6723 do_bl (void)
6724 {
6725 #ifdef OBJ_ELF
6726 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6727 {
6728 if (inst.cond == COND_ALWAYS)
6729 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6730 else
6731 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6732 }
6733 else
6734 #endif
6735 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6736 }
6737
6738 /* ARM V5 branch-link-exchange instruction (argument parse)
6739 BLX <target_addr> ie BLX(1)
6740 BLX{<condition>} <Rm> ie BLX(2)
6741 Unfortunately, there are two different opcodes for this mnemonic.
6742 So, the insns[].value is not used, and the code here zaps values
6743 into inst.instruction.
6744 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6745
6746 static void
6747 do_blx (void)
6748 {
6749 if (inst.operands[0].isreg)
6750 {
6751 /* Arg is a register; the opcode provided by insns[] is correct.
6752 It is not illegal to do "blx pc", just useless. */
6753 if (inst.operands[0].reg == REG_PC)
6754 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6755
6756 inst.instruction |= inst.operands[0].reg;
6757 }
6758 else
6759 {
6760 /* Arg is an address; this instruction cannot be executed
6761 conditionally, and the opcode must be adjusted. */
6762 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6763 inst.instruction = 0xfa000000;
6764 #ifdef OBJ_ELF
6765 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6766 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6767 else
6768 #endif
6769 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6770 }
6771 }
6772
6773 static void
6774 do_bx (void)
6775 {
6776 if (inst.operands[0].reg == REG_PC)
6777 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6778
6779 inst.instruction |= inst.operands[0].reg;
6780 }
6781
6782
6783 /* ARM v5TEJ. Jump to Jazelle code. */
6784
6785 static void
6786 do_bxj (void)
6787 {
6788 if (inst.operands[0].reg == REG_PC)
6789 as_tsktsk (_("use of r15 in bxj is not really useful"));
6790
6791 inst.instruction |= inst.operands[0].reg;
6792 }
6793
6794 /* Co-processor data operation:
6795 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6796 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6797 static void
6798 do_cdp (void)
6799 {
6800 inst.instruction |= inst.operands[0].reg << 8;
6801 inst.instruction |= inst.operands[1].imm << 20;
6802 inst.instruction |= inst.operands[2].reg << 12;
6803 inst.instruction |= inst.operands[3].reg << 16;
6804 inst.instruction |= inst.operands[4].reg;
6805 inst.instruction |= inst.operands[5].imm << 5;
6806 }
6807
6808 static void
6809 do_cmp (void)
6810 {
6811 inst.instruction |= inst.operands[0].reg << 16;
6812 encode_arm_shifter_operand (1);
6813 }
6814
6815 /* Transfer between coprocessor and ARM registers.
6816 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6817 MRC2
6818 MCR{cond}
6819 MCR2
6820
6821 No special properties. */
6822
6823 static void
6824 do_co_reg (void)
6825 {
6826 inst.instruction |= inst.operands[0].reg << 8;
6827 inst.instruction |= inst.operands[1].imm << 21;
6828 inst.instruction |= inst.operands[2].reg << 12;
6829 inst.instruction |= inst.operands[3].reg << 16;
6830 inst.instruction |= inst.operands[4].reg;
6831 inst.instruction |= inst.operands[5].imm << 5;
6832 }
6833
6834 /* Transfer between coprocessor register and pair of ARM registers.
6835 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6836 MCRR2
6837 MRRC{cond}
6838 MRRC2
6839
6840 Two XScale instructions are special cases of these:
6841
6842 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6843 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6844
6845 Result unpredicatable if Rd or Rn is R15. */
6846
6847 static void
6848 do_co_reg2c (void)
6849 {
6850 inst.instruction |= inst.operands[0].reg << 8;
6851 inst.instruction |= inst.operands[1].imm << 4;
6852 inst.instruction |= inst.operands[2].reg << 12;
6853 inst.instruction |= inst.operands[3].reg << 16;
6854 inst.instruction |= inst.operands[4].reg;
6855 }
6856
6857 static void
6858 do_cpsi (void)
6859 {
6860 inst.instruction |= inst.operands[0].imm << 6;
6861 if (inst.operands[1].present)
6862 {
6863 inst.instruction |= CPSI_MMOD;
6864 inst.instruction |= inst.operands[1].imm;
6865 }
6866 }
6867
6868 static void
6869 do_dbg (void)
6870 {
6871 inst.instruction |= inst.operands[0].imm;
6872 }
6873
6874 static void
6875 do_it (void)
6876 {
6877 /* There is no IT instruction in ARM mode. We
6878 process it but do not generate code for it. */
6879 inst.size = 0;
6880 }
6881
6882 static void
6883 do_ldmstm (void)
6884 {
6885 int base_reg = inst.operands[0].reg;
6886 int range = inst.operands[1].imm;
6887
6888 inst.instruction |= base_reg << 16;
6889 inst.instruction |= range;
6890
6891 if (inst.operands[1].writeback)
6892 inst.instruction |= LDM_TYPE_2_OR_3;
6893
6894 if (inst.operands[0].writeback)
6895 {
6896 inst.instruction |= WRITE_BACK;
6897 /* Check for unpredictable uses of writeback. */
6898 if (inst.instruction & LOAD_BIT)
6899 {
6900 /* Not allowed in LDM type 2. */
6901 if ((inst.instruction & LDM_TYPE_2_OR_3)
6902 && ((range & (1 << REG_PC)) == 0))
6903 as_warn (_("writeback of base register is UNPREDICTABLE"));
6904 /* Only allowed if base reg not in list for other types. */
6905 else if (range & (1 << base_reg))
6906 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6907 }
6908 else /* STM. */
6909 {
6910 /* Not allowed for type 2. */
6911 if (inst.instruction & LDM_TYPE_2_OR_3)
6912 as_warn (_("writeback of base register is UNPREDICTABLE"));
6913 /* Only allowed if base reg not in list, or first in list. */
6914 else if ((range & (1 << base_reg))
6915 && (range & ((1 << base_reg) - 1)))
6916 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6917 }
6918 }
6919 }
6920
6921 /* ARMv5TE load-consecutive (argument parse)
6922 Mode is like LDRH.
6923
6924 LDRccD R, mode
6925 STRccD R, mode. */
6926
6927 static void
6928 do_ldrd (void)
6929 {
6930 constraint (inst.operands[0].reg % 2 != 0,
6931 _("first destination register must be even"));
6932 constraint (inst.operands[1].present
6933 && inst.operands[1].reg != inst.operands[0].reg + 1,
6934 _("can only load two consecutive registers"));
6935 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6936 constraint (!inst.operands[2].isreg, _("'[' expected"));
6937
6938 if (!inst.operands[1].present)
6939 inst.operands[1].reg = inst.operands[0].reg + 1;
6940
6941 if (inst.instruction & LOAD_BIT)
6942 {
6943 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6944 register and the first register written; we have to diagnose
6945 overlap between the base and the second register written here. */
6946
6947 if (inst.operands[2].reg == inst.operands[1].reg
6948 && (inst.operands[2].writeback || inst.operands[2].postind))
6949 as_warn (_("base register written back, and overlaps "
6950 "second destination register"));
6951
6952 /* For an index-register load, the index register must not overlap the
6953 destination (even if not write-back). */
6954 else if (inst.operands[2].immisreg
6955 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6956 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6957 as_warn (_("index register overlaps destination register"));
6958 }
6959
6960 inst.instruction |= inst.operands[0].reg << 12;
6961 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6962 }
6963
6964 static void
6965 do_ldrex (void)
6966 {
6967 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6968 || inst.operands[1].postind || inst.operands[1].writeback
6969 || inst.operands[1].immisreg || inst.operands[1].shifted
6970 || inst.operands[1].negative
6971 /* This can arise if the programmer has written
6972 strex rN, rM, foo
6973 or if they have mistakenly used a register name as the last
6974 operand, eg:
6975 strex rN, rM, rX
6976 It is very difficult to distinguish between these two cases
6977 because "rX" might actually be a label. ie the register
6978 name has been occluded by a symbol of the same name. So we
6979 just generate a general 'bad addressing mode' type error
6980 message and leave it up to the programmer to discover the
6981 true cause and fix their mistake. */
6982 || (inst.operands[1].reg == REG_PC),
6983 BAD_ADDR_MODE);
6984
6985 constraint (inst.reloc.exp.X_op != O_constant
6986 || inst.reloc.exp.X_add_number != 0,
6987 _("offset must be zero in ARM encoding"));
6988
6989 inst.instruction |= inst.operands[0].reg << 12;
6990 inst.instruction |= inst.operands[1].reg << 16;
6991 inst.reloc.type = BFD_RELOC_UNUSED;
6992 }
6993
6994 static void
6995 do_ldrexd (void)
6996 {
6997 constraint (inst.operands[0].reg % 2 != 0,
6998 _("even register required"));
6999 constraint (inst.operands[1].present
7000 && inst.operands[1].reg != inst.operands[0].reg + 1,
7001 _("can only load two consecutive registers"));
7002 /* If op 1 were present and equal to PC, this function wouldn't
7003 have been called in the first place. */
7004 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7005
7006 inst.instruction |= inst.operands[0].reg << 12;
7007 inst.instruction |= inst.operands[2].reg << 16;
7008 }
7009
7010 static void
7011 do_ldst (void)
7012 {
7013 inst.instruction |= inst.operands[0].reg << 12;
7014 if (!inst.operands[1].isreg)
7015 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7016 return;
7017 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7018 }
7019
7020 static void
7021 do_ldstt (void)
7022 {
7023 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7024 reject [Rn,...]. */
7025 if (inst.operands[1].preind)
7026 {
7027 constraint (inst.reloc.exp.X_op != O_constant ||
7028 inst.reloc.exp.X_add_number != 0,
7029 _("this instruction requires a post-indexed address"));
7030
7031 inst.operands[1].preind = 0;
7032 inst.operands[1].postind = 1;
7033 inst.operands[1].writeback = 1;
7034 }
7035 inst.instruction |= inst.operands[0].reg << 12;
7036 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7037 }
7038
7039 /* Halfword and signed-byte load/store operations. */
7040
7041 static void
7042 do_ldstv4 (void)
7043 {
7044 inst.instruction |= inst.operands[0].reg << 12;
7045 if (!inst.operands[1].isreg)
7046 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7047 return;
7048 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7049 }
7050
7051 static void
7052 do_ldsttv4 (void)
7053 {
7054 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7055 reject [Rn,...]. */
7056 if (inst.operands[1].preind)
7057 {
7058 constraint (inst.reloc.exp.X_op != O_constant ||
7059 inst.reloc.exp.X_add_number != 0,
7060 _("this instruction requires a post-indexed address"));
7061
7062 inst.operands[1].preind = 0;
7063 inst.operands[1].postind = 1;
7064 inst.operands[1].writeback = 1;
7065 }
7066 inst.instruction |= inst.operands[0].reg << 12;
7067 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7068 }
7069
7070 /* Co-processor register load/store.
7071 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7072 static void
7073 do_lstc (void)
7074 {
7075 inst.instruction |= inst.operands[0].reg << 8;
7076 inst.instruction |= inst.operands[1].reg << 12;
7077 encode_arm_cp_address (2, TRUE, TRUE, 0);
7078 }
7079
7080 static void
7081 do_mlas (void)
7082 {
7083 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7084 if (inst.operands[0].reg == inst.operands[1].reg
7085 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7086 && !(inst.instruction & 0x00400000))
7087 as_tsktsk (_("Rd and Rm should be different in mla"));
7088
7089 inst.instruction |= inst.operands[0].reg << 16;
7090 inst.instruction |= inst.operands[1].reg;
7091 inst.instruction |= inst.operands[2].reg << 8;
7092 inst.instruction |= inst.operands[3].reg << 12;
7093 }
7094
7095 static void
7096 do_mov (void)
7097 {
7098 inst.instruction |= inst.operands[0].reg << 12;
7099 encode_arm_shifter_operand (1);
7100 }
7101
7102 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7103 static void
7104 do_mov16 (void)
7105 {
7106 bfd_vma imm;
7107 bfd_boolean top;
7108
7109 top = (inst.instruction & 0x00400000) != 0;
7110 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7111 _(":lower16: not allowed this instruction"));
7112 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7113 _(":upper16: not allowed instruction"));
7114 inst.instruction |= inst.operands[0].reg << 12;
7115 if (inst.reloc.type == BFD_RELOC_UNUSED)
7116 {
7117 imm = inst.reloc.exp.X_add_number;
7118 /* The value is in two pieces: 0:11, 16:19. */
7119 inst.instruction |= (imm & 0x00000fff);
7120 inst.instruction |= (imm & 0x0000f000) << 4;
7121 }
7122 }
7123
7124 static void do_vfp_nsyn_opcode (const char *);
7125
7126 static int
7127 do_vfp_nsyn_mrs (void)
7128 {
7129 if (inst.operands[0].isvec)
7130 {
7131 if (inst.operands[1].reg != 1)
7132 first_error (_("operand 1 must be FPSCR"));
7133 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7134 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7135 do_vfp_nsyn_opcode ("fmstat");
7136 }
7137 else if (inst.operands[1].isvec)
7138 do_vfp_nsyn_opcode ("fmrx");
7139 else
7140 return FAIL;
7141
7142 return SUCCESS;
7143 }
7144
7145 static int
7146 do_vfp_nsyn_msr (void)
7147 {
7148 if (inst.operands[0].isvec)
7149 do_vfp_nsyn_opcode ("fmxr");
7150 else
7151 return FAIL;
7152
7153 return SUCCESS;
7154 }
7155
7156 static void
7157 do_mrs (void)
7158 {
7159 if (do_vfp_nsyn_mrs () == SUCCESS)
7160 return;
7161
7162 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7163 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7164 != (PSR_c|PSR_f),
7165 _("'CPSR' or 'SPSR' expected"));
7166 inst.instruction |= inst.operands[0].reg << 12;
7167 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7168 }
7169
7170 /* Two possible forms:
7171 "{C|S}PSR_<field>, Rm",
7172 "{C|S}PSR_f, #expression". */
7173
7174 static void
7175 do_msr (void)
7176 {
7177 if (do_vfp_nsyn_msr () == SUCCESS)
7178 return;
7179
7180 inst.instruction |= inst.operands[0].imm;
7181 if (inst.operands[1].isreg)
7182 inst.instruction |= inst.operands[1].reg;
7183 else
7184 {
7185 inst.instruction |= INST_IMMEDIATE;
7186 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7187 inst.reloc.pc_rel = 0;
7188 }
7189 }
7190
7191 static void
7192 do_mul (void)
7193 {
7194 if (!inst.operands[2].present)
7195 inst.operands[2].reg = inst.operands[0].reg;
7196 inst.instruction |= inst.operands[0].reg << 16;
7197 inst.instruction |= inst.operands[1].reg;
7198 inst.instruction |= inst.operands[2].reg << 8;
7199
7200 if (inst.operands[0].reg == inst.operands[1].reg
7201 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7202 as_tsktsk (_("Rd and Rm should be different in mul"));
7203 }
7204
7205 /* Long Multiply Parser
7206 UMULL RdLo, RdHi, Rm, Rs
7207 SMULL RdLo, RdHi, Rm, Rs
7208 UMLAL RdLo, RdHi, Rm, Rs
7209 SMLAL RdLo, RdHi, Rm, Rs. */
7210
7211 static void
7212 do_mull (void)
7213 {
7214 inst.instruction |= inst.operands[0].reg << 12;
7215 inst.instruction |= inst.operands[1].reg << 16;
7216 inst.instruction |= inst.operands[2].reg;
7217 inst.instruction |= inst.operands[3].reg << 8;
7218
7219 /* rdhi, rdlo and rm must all be different. */
7220 if (inst.operands[0].reg == inst.operands[1].reg
7221 || inst.operands[0].reg == inst.operands[2].reg
7222 || inst.operands[1].reg == inst.operands[2].reg)
7223 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7224 }
7225
7226 static void
7227 do_nop (void)
7228 {
7229 if (inst.operands[0].present)
7230 {
7231 /* Architectural NOP hints are CPSR sets with no bits selected. */
7232 inst.instruction &= 0xf0000000;
7233 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7234 }
7235 }
7236
7237 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7238 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7239 Condition defaults to COND_ALWAYS.
7240 Error if Rd, Rn or Rm are R15. */
7241
7242 static void
7243 do_pkhbt (void)
7244 {
7245 inst.instruction |= inst.operands[0].reg << 12;
7246 inst.instruction |= inst.operands[1].reg << 16;
7247 inst.instruction |= inst.operands[2].reg;
7248 if (inst.operands[3].present)
7249 encode_arm_shift (3);
7250 }
7251
7252 /* ARM V6 PKHTB (Argument Parse). */
7253
7254 static void
7255 do_pkhtb (void)
7256 {
7257 if (!inst.operands[3].present)
7258 {
7259 /* If the shift specifier is omitted, turn the instruction
7260 into pkhbt rd, rm, rn. */
7261 inst.instruction &= 0xfff00010;
7262 inst.instruction |= inst.operands[0].reg << 12;
7263 inst.instruction |= inst.operands[1].reg;
7264 inst.instruction |= inst.operands[2].reg << 16;
7265 }
7266 else
7267 {
7268 inst.instruction |= inst.operands[0].reg << 12;
7269 inst.instruction |= inst.operands[1].reg << 16;
7270 inst.instruction |= inst.operands[2].reg;
7271 encode_arm_shift (3);
7272 }
7273 }
7274
7275 /* ARMv5TE: Preload-Cache
7276
7277 PLD <addr_mode>
7278
7279 Syntactically, like LDR with B=1, W=0, L=1. */
7280
7281 static void
7282 do_pld (void)
7283 {
7284 constraint (!inst.operands[0].isreg,
7285 _("'[' expected after PLD mnemonic"));
7286 constraint (inst.operands[0].postind,
7287 _("post-indexed expression used in preload instruction"));
7288 constraint (inst.operands[0].writeback,
7289 _("writeback used in preload instruction"));
7290 constraint (!inst.operands[0].preind,
7291 _("unindexed addressing used in preload instruction"));
7292 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7293 }
7294
7295 /* ARMv7: PLI <addr_mode> */
7296 static void
7297 do_pli (void)
7298 {
7299 constraint (!inst.operands[0].isreg,
7300 _("'[' expected after PLI mnemonic"));
7301 constraint (inst.operands[0].postind,
7302 _("post-indexed expression used in preload instruction"));
7303 constraint (inst.operands[0].writeback,
7304 _("writeback used in preload instruction"));
7305 constraint (!inst.operands[0].preind,
7306 _("unindexed addressing used in preload instruction"));
7307 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7308 inst.instruction &= ~PRE_INDEX;
7309 }
7310
7311 static void
7312 do_push_pop (void)
7313 {
7314 inst.operands[1] = inst.operands[0];
7315 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7316 inst.operands[0].isreg = 1;
7317 inst.operands[0].writeback = 1;
7318 inst.operands[0].reg = REG_SP;
7319 do_ldmstm ();
7320 }
7321
7322 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7323 word at the specified address and the following word
7324 respectively.
7325 Unconditionally executed.
7326 Error if Rn is R15. */
7327
7328 static void
7329 do_rfe (void)
7330 {
7331 inst.instruction |= inst.operands[0].reg << 16;
7332 if (inst.operands[0].writeback)
7333 inst.instruction |= WRITE_BACK;
7334 }
7335
7336 /* ARM V6 ssat (argument parse). */
7337
7338 static void
7339 do_ssat (void)
7340 {
7341 inst.instruction |= inst.operands[0].reg << 12;
7342 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7343 inst.instruction |= inst.operands[2].reg;
7344
7345 if (inst.operands[3].present)
7346 encode_arm_shift (3);
7347 }
7348
7349 /* ARM V6 usat (argument parse). */
7350
7351 static void
7352 do_usat (void)
7353 {
7354 inst.instruction |= inst.operands[0].reg << 12;
7355 inst.instruction |= inst.operands[1].imm << 16;
7356 inst.instruction |= inst.operands[2].reg;
7357
7358 if (inst.operands[3].present)
7359 encode_arm_shift (3);
7360 }
7361
7362 /* ARM V6 ssat16 (argument parse). */
7363
7364 static void
7365 do_ssat16 (void)
7366 {
7367 inst.instruction |= inst.operands[0].reg << 12;
7368 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7369 inst.instruction |= inst.operands[2].reg;
7370 }
7371
7372 static void
7373 do_usat16 (void)
7374 {
7375 inst.instruction |= inst.operands[0].reg << 12;
7376 inst.instruction |= inst.operands[1].imm << 16;
7377 inst.instruction |= inst.operands[2].reg;
7378 }
7379
7380 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7381 preserving the other bits.
7382
7383 setend <endian_specifier>, where <endian_specifier> is either
7384 BE or LE. */
7385
7386 static void
7387 do_setend (void)
7388 {
7389 if (inst.operands[0].imm)
7390 inst.instruction |= 0x200;
7391 }
7392
7393 static void
7394 do_shift (void)
7395 {
7396 unsigned int Rm = (inst.operands[1].present
7397 ? inst.operands[1].reg
7398 : inst.operands[0].reg);
7399
7400 inst.instruction |= inst.operands[0].reg << 12;
7401 inst.instruction |= Rm;
7402 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7403 {
7404 inst.instruction |= inst.operands[2].reg << 8;
7405 inst.instruction |= SHIFT_BY_REG;
7406 }
7407 else
7408 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7409 }
7410
7411 static void
7412 do_smc (void)
7413 {
7414 inst.reloc.type = BFD_RELOC_ARM_SMC;
7415 inst.reloc.pc_rel = 0;
7416 }
7417
7418 static void
7419 do_swi (void)
7420 {
7421 inst.reloc.type = BFD_RELOC_ARM_SWI;
7422 inst.reloc.pc_rel = 0;
7423 }
7424
7425 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7426 SMLAxy{cond} Rd,Rm,Rs,Rn
7427 SMLAWy{cond} Rd,Rm,Rs,Rn
7428 Error if any register is R15. */
7429
7430 static void
7431 do_smla (void)
7432 {
7433 inst.instruction |= inst.operands[0].reg << 16;
7434 inst.instruction |= inst.operands[1].reg;
7435 inst.instruction |= inst.operands[2].reg << 8;
7436 inst.instruction |= inst.operands[3].reg << 12;
7437 }
7438
7439 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7440 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7441 Error if any register is R15.
7442 Warning if Rdlo == Rdhi. */
7443
7444 static void
7445 do_smlal (void)
7446 {
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 inst.instruction |= inst.operands[1].reg << 16;
7449 inst.instruction |= inst.operands[2].reg;
7450 inst.instruction |= inst.operands[3].reg << 8;
7451
7452 if (inst.operands[0].reg == inst.operands[1].reg)
7453 as_tsktsk (_("rdhi and rdlo must be different"));
7454 }
7455
7456 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7457 SMULxy{cond} Rd,Rm,Rs
7458 Error if any register is R15. */
7459
7460 static void
7461 do_smul (void)
7462 {
7463 inst.instruction |= inst.operands[0].reg << 16;
7464 inst.instruction |= inst.operands[1].reg;
7465 inst.instruction |= inst.operands[2].reg << 8;
7466 }
7467
7468 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7469 the same for both ARM and Thumb-2. */
7470
7471 static void
7472 do_srs (void)
7473 {
7474 int reg;
7475
7476 if (inst.operands[0].present)
7477 {
7478 reg = inst.operands[0].reg;
7479 constraint (reg != 13, _("SRS base register must be r13"));
7480 }
7481 else
7482 reg = 13;
7483
7484 inst.instruction |= reg << 16;
7485 inst.instruction |= inst.operands[1].imm;
7486 if (inst.operands[0].writeback || inst.operands[1].writeback)
7487 inst.instruction |= WRITE_BACK;
7488 }
7489
7490 /* ARM V6 strex (argument parse). */
7491
7492 static void
7493 do_strex (void)
7494 {
7495 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7496 || inst.operands[2].postind || inst.operands[2].writeback
7497 || inst.operands[2].immisreg || inst.operands[2].shifted
7498 || inst.operands[2].negative
7499 /* See comment in do_ldrex(). */
7500 || (inst.operands[2].reg == REG_PC),
7501 BAD_ADDR_MODE);
7502
7503 constraint (inst.operands[0].reg == inst.operands[1].reg
7504 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7505
7506 constraint (inst.reloc.exp.X_op != O_constant
7507 || inst.reloc.exp.X_add_number != 0,
7508 _("offset must be zero in ARM encoding"));
7509
7510 inst.instruction |= inst.operands[0].reg << 12;
7511 inst.instruction |= inst.operands[1].reg;
7512 inst.instruction |= inst.operands[2].reg << 16;
7513 inst.reloc.type = BFD_RELOC_UNUSED;
7514 }
7515
7516 static void
7517 do_strexd (void)
7518 {
7519 constraint (inst.operands[1].reg % 2 != 0,
7520 _("even register required"));
7521 constraint (inst.operands[2].present
7522 && inst.operands[2].reg != inst.operands[1].reg + 1,
7523 _("can only store two consecutive registers"));
7524 /* If op 2 were present and equal to PC, this function wouldn't
7525 have been called in the first place. */
7526 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7527
7528 constraint (inst.operands[0].reg == inst.operands[1].reg
7529 || inst.operands[0].reg == inst.operands[1].reg + 1
7530 || inst.operands[0].reg == inst.operands[3].reg,
7531 BAD_OVERLAP);
7532
7533 inst.instruction |= inst.operands[0].reg << 12;
7534 inst.instruction |= inst.operands[1].reg;
7535 inst.instruction |= inst.operands[3].reg << 16;
7536 }
7537
7538 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7539 extends it to 32-bits, and adds the result to a value in another
7540 register. You can specify a rotation by 0, 8, 16, or 24 bits
7541 before extracting the 16-bit value.
7542 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7543 Condition defaults to COND_ALWAYS.
7544 Error if any register uses R15. */
7545
7546 static void
7547 do_sxtah (void)
7548 {
7549 inst.instruction |= inst.operands[0].reg << 12;
7550 inst.instruction |= inst.operands[1].reg << 16;
7551 inst.instruction |= inst.operands[2].reg;
7552 inst.instruction |= inst.operands[3].imm << 10;
7553 }
7554
7555 /* ARM V6 SXTH.
7556
7557 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7558 Condition defaults to COND_ALWAYS.
7559 Error if any register uses R15. */
7560
7561 static void
7562 do_sxth (void)
7563 {
7564 inst.instruction |= inst.operands[0].reg << 12;
7565 inst.instruction |= inst.operands[1].reg;
7566 inst.instruction |= inst.operands[2].imm << 10;
7567 }
7568 \f
7569 /* VFP instructions. In a logical order: SP variant first, monad
7570 before dyad, arithmetic then move then load/store. */
7571
7572 static void
7573 do_vfp_sp_monadic (void)
7574 {
7575 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7576 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7577 }
7578
7579 static void
7580 do_vfp_sp_dyadic (void)
7581 {
7582 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7583 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7584 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7585 }
7586
7587 static void
7588 do_vfp_sp_compare_z (void)
7589 {
7590 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7591 }
7592
7593 static void
7594 do_vfp_dp_sp_cvt (void)
7595 {
7596 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7597 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7598 }
7599
7600 static void
7601 do_vfp_sp_dp_cvt (void)
7602 {
7603 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7604 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7605 }
7606
7607 static void
7608 do_vfp_reg_from_sp (void)
7609 {
7610 inst.instruction |= inst.operands[0].reg << 12;
7611 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7612 }
7613
7614 static void
7615 do_vfp_reg2_from_sp2 (void)
7616 {
7617 constraint (inst.operands[2].imm != 2,
7618 _("only two consecutive VFP SP registers allowed here"));
7619 inst.instruction |= inst.operands[0].reg << 12;
7620 inst.instruction |= inst.operands[1].reg << 16;
7621 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7622 }
7623
7624 static void
7625 do_vfp_sp_from_reg (void)
7626 {
7627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7628 inst.instruction |= inst.operands[1].reg << 12;
7629 }
7630
7631 static void
7632 do_vfp_sp2_from_reg2 (void)
7633 {
7634 constraint (inst.operands[0].imm != 2,
7635 _("only two consecutive VFP SP registers allowed here"));
7636 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7637 inst.instruction |= inst.operands[1].reg << 12;
7638 inst.instruction |= inst.operands[2].reg << 16;
7639 }
7640
7641 static void
7642 do_vfp_sp_ldst (void)
7643 {
7644 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7645 encode_arm_cp_address (1, FALSE, TRUE, 0);
7646 }
7647
7648 static void
7649 do_vfp_dp_ldst (void)
7650 {
7651 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7652 encode_arm_cp_address (1, FALSE, TRUE, 0);
7653 }
7654
7655
7656 static void
7657 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7658 {
7659 if (inst.operands[0].writeback)
7660 inst.instruction |= WRITE_BACK;
7661 else
7662 constraint (ldstm_type != VFP_LDSTMIA,
7663 _("this addressing mode requires base-register writeback"));
7664 inst.instruction |= inst.operands[0].reg << 16;
7665 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7666 inst.instruction |= inst.operands[1].imm;
7667 }
7668
7669 static void
7670 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7671 {
7672 int count;
7673
7674 if (inst.operands[0].writeback)
7675 inst.instruction |= WRITE_BACK;
7676 else
7677 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7678 _("this addressing mode requires base-register writeback"));
7679
7680 inst.instruction |= inst.operands[0].reg << 16;
7681 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7682
7683 count = inst.operands[1].imm << 1;
7684 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7685 count += 1;
7686
7687 inst.instruction |= count;
7688 }
7689
7690 static void
7691 do_vfp_sp_ldstmia (void)
7692 {
7693 vfp_sp_ldstm (VFP_LDSTMIA);
7694 }
7695
7696 static void
7697 do_vfp_sp_ldstmdb (void)
7698 {
7699 vfp_sp_ldstm (VFP_LDSTMDB);
7700 }
7701
7702 static void
7703 do_vfp_dp_ldstmia (void)
7704 {
7705 vfp_dp_ldstm (VFP_LDSTMIA);
7706 }
7707
7708 static void
7709 do_vfp_dp_ldstmdb (void)
7710 {
7711 vfp_dp_ldstm (VFP_LDSTMDB);
7712 }
7713
7714 static void
7715 do_vfp_xp_ldstmia (void)
7716 {
7717 vfp_dp_ldstm (VFP_LDSTMIAX);
7718 }
7719
7720 static void
7721 do_vfp_xp_ldstmdb (void)
7722 {
7723 vfp_dp_ldstm (VFP_LDSTMDBX);
7724 }
7725
7726 static void
7727 do_vfp_dp_rd_rm (void)
7728 {
7729 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7730 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7731 }
7732
7733 static void
7734 do_vfp_dp_rn_rd (void)
7735 {
7736 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7737 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7738 }
7739
7740 static void
7741 do_vfp_dp_rd_rn (void)
7742 {
7743 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7744 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7745 }
7746
7747 static void
7748 do_vfp_dp_rd_rn_rm (void)
7749 {
7750 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7751 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7752 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7753 }
7754
7755 static void
7756 do_vfp_dp_rd (void)
7757 {
7758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7759 }
7760
7761 static void
7762 do_vfp_dp_rm_rd_rn (void)
7763 {
7764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7765 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7766 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7767 }
7768
7769 /* VFPv3 instructions. */
7770 static void
7771 do_vfp_sp_const (void)
7772 {
7773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7774 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7775 inst.instruction |= (inst.operands[1].imm & 0x0f);
7776 }
7777
7778 static void
7779 do_vfp_dp_const (void)
7780 {
7781 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7782 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
7783 inst.instruction |= (inst.operands[1].imm & 0x0f);
7784 }
7785
7786 static void
7787 vfp_conv (int srcsize)
7788 {
7789 unsigned immbits = srcsize - inst.operands[1].imm;
7790 inst.instruction |= (immbits & 1) << 5;
7791 inst.instruction |= (immbits >> 1);
7792 }
7793
7794 static void
7795 do_vfp_sp_conv_16 (void)
7796 {
7797 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7798 vfp_conv (16);
7799 }
7800
7801 static void
7802 do_vfp_dp_conv_16 (void)
7803 {
7804 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7805 vfp_conv (16);
7806 }
7807
7808 static void
7809 do_vfp_sp_conv_32 (void)
7810 {
7811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7812 vfp_conv (32);
7813 }
7814
7815 static void
7816 do_vfp_dp_conv_32 (void)
7817 {
7818 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7819 vfp_conv (32);
7820 }
7821
7822 \f
7823 /* FPA instructions. Also in a logical order. */
7824
7825 static void
7826 do_fpa_cmp (void)
7827 {
7828 inst.instruction |= inst.operands[0].reg << 16;
7829 inst.instruction |= inst.operands[1].reg;
7830 }
7831
7832 static void
7833 do_fpa_ldmstm (void)
7834 {
7835 inst.instruction |= inst.operands[0].reg << 12;
7836 switch (inst.operands[1].imm)
7837 {
7838 case 1: inst.instruction |= CP_T_X; break;
7839 case 2: inst.instruction |= CP_T_Y; break;
7840 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7841 case 4: break;
7842 default: abort ();
7843 }
7844
7845 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7846 {
7847 /* The instruction specified "ea" or "fd", so we can only accept
7848 [Rn]{!}. The instruction does not really support stacking or
7849 unstacking, so we have to emulate these by setting appropriate
7850 bits and offsets. */
7851 constraint (inst.reloc.exp.X_op != O_constant
7852 || inst.reloc.exp.X_add_number != 0,
7853 _("this instruction does not support indexing"));
7854
7855 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7856 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7857
7858 if (!(inst.instruction & INDEX_UP))
7859 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7860
7861 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7862 {
7863 inst.operands[2].preind = 0;
7864 inst.operands[2].postind = 1;
7865 }
7866 }
7867
7868 encode_arm_cp_address (2, TRUE, TRUE, 0);
7869 }
7870
7871 \f
7872 /* iWMMXt instructions: strictly in alphabetical order. */
7873
7874 static void
7875 do_iwmmxt_tandorc (void)
7876 {
7877 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7878 }
7879
7880 static void
7881 do_iwmmxt_textrc (void)
7882 {
7883 inst.instruction |= inst.operands[0].reg << 12;
7884 inst.instruction |= inst.operands[1].imm;
7885 }
7886
7887 static void
7888 do_iwmmxt_textrm (void)
7889 {
7890 inst.instruction |= inst.operands[0].reg << 12;
7891 inst.instruction |= inst.operands[1].reg << 16;
7892 inst.instruction |= inst.operands[2].imm;
7893 }
7894
7895 static void
7896 do_iwmmxt_tinsr (void)
7897 {
7898 inst.instruction |= inst.operands[0].reg << 16;
7899 inst.instruction |= inst.operands[1].reg << 12;
7900 inst.instruction |= inst.operands[2].imm;
7901 }
7902
7903 static void
7904 do_iwmmxt_tmia (void)
7905 {
7906 inst.instruction |= inst.operands[0].reg << 5;
7907 inst.instruction |= inst.operands[1].reg;
7908 inst.instruction |= inst.operands[2].reg << 12;
7909 }
7910
7911 static void
7912 do_iwmmxt_waligni (void)
7913 {
7914 inst.instruction |= inst.operands[0].reg << 12;
7915 inst.instruction |= inst.operands[1].reg << 16;
7916 inst.instruction |= inst.operands[2].reg;
7917 inst.instruction |= inst.operands[3].imm << 20;
7918 }
7919
7920 static void
7921 do_iwmmxt_wmerge (void)
7922 {
7923 inst.instruction |= inst.operands[0].reg << 12;
7924 inst.instruction |= inst.operands[1].reg << 16;
7925 inst.instruction |= inst.operands[2].reg;
7926 inst.instruction |= inst.operands[3].imm << 21;
7927 }
7928
7929 static void
7930 do_iwmmxt_wmov (void)
7931 {
7932 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7933 inst.instruction |= inst.operands[0].reg << 12;
7934 inst.instruction |= inst.operands[1].reg << 16;
7935 inst.instruction |= inst.operands[1].reg;
7936 }
7937
7938 static void
7939 do_iwmmxt_wldstbh (void)
7940 {
7941 int reloc;
7942 inst.instruction |= inst.operands[0].reg << 12;
7943 if (thumb_mode)
7944 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7945 else
7946 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7947 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7948 }
7949
7950 static void
7951 do_iwmmxt_wldstw (void)
7952 {
7953 /* RIWR_RIWC clears .isreg for a control register. */
7954 if (!inst.operands[0].isreg)
7955 {
7956 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7957 inst.instruction |= 0xf0000000;
7958 }
7959
7960 inst.instruction |= inst.operands[0].reg << 12;
7961 encode_arm_cp_address (1, TRUE, TRUE, 0);
7962 }
7963
7964 static void
7965 do_iwmmxt_wldstd (void)
7966 {
7967 inst.instruction |= inst.operands[0].reg << 12;
7968 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7969 && inst.operands[1].immisreg)
7970 {
7971 inst.instruction &= ~0x1a000ff;
7972 inst.instruction |= (0xf << 28);
7973 if (inst.operands[1].preind)
7974 inst.instruction |= PRE_INDEX;
7975 if (!inst.operands[1].negative)
7976 inst.instruction |= INDEX_UP;
7977 if (inst.operands[1].writeback)
7978 inst.instruction |= WRITE_BACK;
7979 inst.instruction |= inst.operands[1].reg << 16;
7980 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7981 inst.instruction |= inst.operands[1].imm;
7982 }
7983 else
7984 encode_arm_cp_address (1, TRUE, FALSE, 0);
7985 }
7986
7987 static void
7988 do_iwmmxt_wshufh (void)
7989 {
7990 inst.instruction |= inst.operands[0].reg << 12;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7993 inst.instruction |= (inst.operands[2].imm & 0x0f);
7994 }
7995
7996 static void
7997 do_iwmmxt_wzero (void)
7998 {
7999 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8000 inst.instruction |= inst.operands[0].reg;
8001 inst.instruction |= inst.operands[0].reg << 12;
8002 inst.instruction |= inst.operands[0].reg << 16;
8003 }
8004
8005 static void
8006 do_iwmmxt_wrwrwr_or_imm5 (void)
8007 {
8008 if (inst.operands[2].isreg)
8009 do_rd_rn_rm ();
8010 else {
8011 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8012 _("immediate operand requires iWMMXt2"));
8013 do_rd_rn ();
8014 if (inst.operands[2].imm == 0)
8015 {
8016 switch ((inst.instruction >> 20) & 0xf)
8017 {
8018 case 4:
8019 case 5:
8020 case 6:
8021 case 7:
8022 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8023 inst.operands[2].imm = 16;
8024 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8025 break;
8026 case 8:
8027 case 9:
8028 case 10:
8029 case 11:
8030 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8031 inst.operands[2].imm = 32;
8032 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8033 break;
8034 case 12:
8035 case 13:
8036 case 14:
8037 case 15:
8038 {
8039 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8040 unsigned long wrn;
8041 wrn = (inst.instruction >> 16) & 0xf;
8042 inst.instruction &= 0xff0fff0f;
8043 inst.instruction |= wrn;
8044 /* Bail out here; the instruction is now assembled. */
8045 return;
8046 }
8047 }
8048 }
8049 /* Map 32 -> 0, etc. */
8050 inst.operands[2].imm &= 0x1f;
8051 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8052 }
8053 }
8054 \f
8055 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8056 operations first, then control, shift, and load/store. */
8057
8058 /* Insns like "foo X,Y,Z". */
8059
8060 static void
8061 do_mav_triple (void)
8062 {
8063 inst.instruction |= inst.operands[0].reg << 16;
8064 inst.instruction |= inst.operands[1].reg;
8065 inst.instruction |= inst.operands[2].reg << 12;
8066 }
8067
8068 /* Insns like "foo W,X,Y,Z".
8069 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8070
8071 static void
8072 do_mav_quad (void)
8073 {
8074 inst.instruction |= inst.operands[0].reg << 5;
8075 inst.instruction |= inst.operands[1].reg << 12;
8076 inst.instruction |= inst.operands[2].reg << 16;
8077 inst.instruction |= inst.operands[3].reg;
8078 }
8079
8080 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8081 static void
8082 do_mav_dspsc (void)
8083 {
8084 inst.instruction |= inst.operands[1].reg << 12;
8085 }
8086
8087 /* Maverick shift immediate instructions.
8088 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8089 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8090
8091 static void
8092 do_mav_shift (void)
8093 {
8094 int imm = inst.operands[2].imm;
8095
8096 inst.instruction |= inst.operands[0].reg << 12;
8097 inst.instruction |= inst.operands[1].reg << 16;
8098
8099 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8100 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8101 Bit 4 should be 0. */
8102 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8103
8104 inst.instruction |= imm;
8105 }
8106 \f
8107 /* XScale instructions. Also sorted arithmetic before move. */
8108
8109 /* Xscale multiply-accumulate (argument parse)
8110 MIAcc acc0,Rm,Rs
8111 MIAPHcc acc0,Rm,Rs
8112 MIAxycc acc0,Rm,Rs. */
8113
8114 static void
8115 do_xsc_mia (void)
8116 {
8117 inst.instruction |= inst.operands[1].reg;
8118 inst.instruction |= inst.operands[2].reg << 12;
8119 }
8120
8121 /* Xscale move-accumulator-register (argument parse)
8122
8123 MARcc acc0,RdLo,RdHi. */
8124
8125 static void
8126 do_xsc_mar (void)
8127 {
8128 inst.instruction |= inst.operands[1].reg << 12;
8129 inst.instruction |= inst.operands[2].reg << 16;
8130 }
8131
8132 /* Xscale move-register-accumulator (argument parse)
8133
8134 MRAcc RdLo,RdHi,acc0. */
8135
8136 static void
8137 do_xsc_mra (void)
8138 {
8139 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8140 inst.instruction |= inst.operands[0].reg << 12;
8141 inst.instruction |= inst.operands[1].reg << 16;
8142 }
8143 \f
8144 /* Encoding functions relevant only to Thumb. */
8145
8146 /* inst.operands[i] is a shifted-register operand; encode
8147 it into inst.instruction in the format used by Thumb32. */
8148
8149 static void
8150 encode_thumb32_shifted_operand (int i)
8151 {
8152 unsigned int value = inst.reloc.exp.X_add_number;
8153 unsigned int shift = inst.operands[i].shift_kind;
8154
8155 constraint (inst.operands[i].immisreg,
8156 _("shift by register not allowed in thumb mode"));
8157 inst.instruction |= inst.operands[i].reg;
8158 if (shift == SHIFT_RRX)
8159 inst.instruction |= SHIFT_ROR << 4;
8160 else
8161 {
8162 constraint (inst.reloc.exp.X_op != O_constant,
8163 _("expression too complex"));
8164
8165 constraint (value > 32
8166 || (value == 32 && (shift == SHIFT_LSL
8167 || shift == SHIFT_ROR)),
8168 _("shift expression is too large"));
8169
8170 if (value == 0)
8171 shift = SHIFT_LSL;
8172 else if (value == 32)
8173 value = 0;
8174
8175 inst.instruction |= shift << 4;
8176 inst.instruction |= (value & 0x1c) << 10;
8177 inst.instruction |= (value & 0x03) << 6;
8178 }
8179 }
8180
8181
8182 /* inst.operands[i] was set up by parse_address. Encode it into a
8183 Thumb32 format load or store instruction. Reject forms that cannot
8184 be used with such instructions. If is_t is true, reject forms that
8185 cannot be used with a T instruction; if is_d is true, reject forms
8186 that cannot be used with a D instruction. */
8187
8188 static void
8189 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8190 {
8191 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8192
8193 constraint (!inst.operands[i].isreg,
8194 _("Instruction does not support =N addresses"));
8195
8196 inst.instruction |= inst.operands[i].reg << 16;
8197 if (inst.operands[i].immisreg)
8198 {
8199 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8200 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8201 constraint (inst.operands[i].negative,
8202 _("Thumb does not support negative register indexing"));
8203 constraint (inst.operands[i].postind,
8204 _("Thumb does not support register post-indexing"));
8205 constraint (inst.operands[i].writeback,
8206 _("Thumb does not support register indexing with writeback"));
8207 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8208 _("Thumb supports only LSL in shifted register indexing"));
8209
8210 inst.instruction |= inst.operands[i].imm;
8211 if (inst.operands[i].shifted)
8212 {
8213 constraint (inst.reloc.exp.X_op != O_constant,
8214 _("expression too complex"));
8215 constraint (inst.reloc.exp.X_add_number < 0
8216 || inst.reloc.exp.X_add_number > 3,
8217 _("shift out of range"));
8218 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8219 }
8220 inst.reloc.type = BFD_RELOC_UNUSED;
8221 }
8222 else if (inst.operands[i].preind)
8223 {
8224 constraint (is_pc && inst.operands[i].writeback,
8225 _("cannot use writeback with PC-relative addressing"));
8226 constraint (is_t && inst.operands[i].writeback,
8227 _("cannot use writeback with this instruction"));
8228
8229 if (is_d)
8230 {
8231 inst.instruction |= 0x01000000;
8232 if (inst.operands[i].writeback)
8233 inst.instruction |= 0x00200000;
8234 }
8235 else
8236 {
8237 inst.instruction |= 0x00000c00;
8238 if (inst.operands[i].writeback)
8239 inst.instruction |= 0x00000100;
8240 }
8241 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8242 }
8243 else if (inst.operands[i].postind)
8244 {
8245 assert (inst.operands[i].writeback);
8246 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8247 constraint (is_t, _("cannot use post-indexing with this instruction"));
8248
8249 if (is_d)
8250 inst.instruction |= 0x00200000;
8251 else
8252 inst.instruction |= 0x00000900;
8253 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8254 }
8255 else /* unindexed - only for coprocessor */
8256 inst.error = _("instruction does not accept unindexed addressing");
8257 }
8258
8259 /* Table of Thumb instructions which exist in both 16- and 32-bit
8260 encodings (the latter only in post-V6T2 cores). The index is the
8261 value used in the insns table below. When there is more than one
8262 possible 16-bit encoding for the instruction, this table always
8263 holds variant (1).
8264 Also contains several pseudo-instructions used during relaxation. */
8265 #define T16_32_TAB \
8266 X(adc, 4140, eb400000), \
8267 X(adcs, 4140, eb500000), \
8268 X(add, 1c00, eb000000), \
8269 X(adds, 1c00, eb100000), \
8270 X(addi, 0000, f1000000), \
8271 X(addis, 0000, f1100000), \
8272 X(add_pc,000f, f20f0000), \
8273 X(add_sp,000d, f10d0000), \
8274 X(adr, 000f, f20f0000), \
8275 X(and, 4000, ea000000), \
8276 X(ands, 4000, ea100000), \
8277 X(asr, 1000, fa40f000), \
8278 X(asrs, 1000, fa50f000), \
8279 X(b, e000, f000b000), \
8280 X(bcond, d000, f0008000), \
8281 X(bic, 4380, ea200000), \
8282 X(bics, 4380, ea300000), \
8283 X(cmn, 42c0, eb100f00), \
8284 X(cmp, 2800, ebb00f00), \
8285 X(cpsie, b660, f3af8400), \
8286 X(cpsid, b670, f3af8600), \
8287 X(cpy, 4600, ea4f0000), \
8288 X(dec_sp,80dd, f1ad0d00), \
8289 X(eor, 4040, ea800000), \
8290 X(eors, 4040, ea900000), \
8291 X(inc_sp,00dd, f10d0d00), \
8292 X(ldmia, c800, e8900000), \
8293 X(ldr, 6800, f8500000), \
8294 X(ldrb, 7800, f8100000), \
8295 X(ldrh, 8800, f8300000), \
8296 X(ldrsb, 5600, f9100000), \
8297 X(ldrsh, 5e00, f9300000), \
8298 X(ldr_pc,4800, f85f0000), \
8299 X(ldr_pc2,4800, f85f0000), \
8300 X(ldr_sp,9800, f85d0000), \
8301 X(lsl, 0000, fa00f000), \
8302 X(lsls, 0000, fa10f000), \
8303 X(lsr, 0800, fa20f000), \
8304 X(lsrs, 0800, fa30f000), \
8305 X(mov, 2000, ea4f0000), \
8306 X(movs, 2000, ea5f0000), \
8307 X(mul, 4340, fb00f000), \
8308 X(muls, 4340, ffffffff), /* no 32b muls */ \
8309 X(mvn, 43c0, ea6f0000), \
8310 X(mvns, 43c0, ea7f0000), \
8311 X(neg, 4240, f1c00000), /* rsb #0 */ \
8312 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8313 X(orr, 4300, ea400000), \
8314 X(orrs, 4300, ea500000), \
8315 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8316 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8317 X(rev, ba00, fa90f080), \
8318 X(rev16, ba40, fa90f090), \
8319 X(revsh, bac0, fa90f0b0), \
8320 X(ror, 41c0, fa60f000), \
8321 X(rors, 41c0, fa70f000), \
8322 X(sbc, 4180, eb600000), \
8323 X(sbcs, 4180, eb700000), \
8324 X(stmia, c000, e8800000), \
8325 X(str, 6000, f8400000), \
8326 X(strb, 7000, f8000000), \
8327 X(strh, 8000, f8200000), \
8328 X(str_sp,9000, f84d0000), \
8329 X(sub, 1e00, eba00000), \
8330 X(subs, 1e00, ebb00000), \
8331 X(subi, 8000, f1a00000), \
8332 X(subis, 8000, f1b00000), \
8333 X(sxtb, b240, fa4ff080), \
8334 X(sxth, b200, fa0ff080), \
8335 X(tst, 4200, ea100f00), \
8336 X(uxtb, b2c0, fa5ff080), \
8337 X(uxth, b280, fa1ff080), \
8338 X(nop, bf00, f3af8000), \
8339 X(yield, bf10, f3af8001), \
8340 X(wfe, bf20, f3af8002), \
8341 X(wfi, bf30, f3af8003), \
8342 X(sev, bf40, f3af9004), /* typo, 8004? */
8343
8344 /* To catch errors in encoding functions, the codes are all offset by
8345 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8346 as 16-bit instructions. */
8347 #define X(a,b,c) T_MNEM_##a
8348 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8349 #undef X
8350
8351 #define X(a,b,c) 0x##b
8352 static const unsigned short thumb_op16[] = { T16_32_TAB };
8353 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8354 #undef X
8355
8356 #define X(a,b,c) 0x##c
8357 static const unsigned int thumb_op32[] = { T16_32_TAB };
8358 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8359 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8360 #undef X
8361 #undef T16_32_TAB
8362
8363 /* Thumb instruction encoders, in alphabetical order. */
8364
8365 /* ADDW or SUBW. */
8366 static void
8367 do_t_add_sub_w (void)
8368 {
8369 int Rd, Rn;
8370
8371 Rd = inst.operands[0].reg;
8372 Rn = inst.operands[1].reg;
8373
8374 constraint (Rd == 15, _("PC not allowed as destination"));
8375 inst.instruction |= (Rn << 16) | (Rd << 8);
8376 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8377 }
8378
8379 /* Parse an add or subtract instruction. We get here with inst.instruction
8380 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8381
8382 static void
8383 do_t_add_sub (void)
8384 {
8385 int Rd, Rs, Rn;
8386
8387 Rd = inst.operands[0].reg;
8388 Rs = (inst.operands[1].present
8389 ? inst.operands[1].reg /* Rd, Rs, foo */
8390 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8391
8392 if (unified_syntax)
8393 {
8394 bfd_boolean flags;
8395 bfd_boolean narrow;
8396 int opcode;
8397
8398 flags = (inst.instruction == T_MNEM_adds
8399 || inst.instruction == T_MNEM_subs);
8400 if (flags)
8401 narrow = (current_it_mask == 0);
8402 else
8403 narrow = (current_it_mask != 0);
8404 if (!inst.operands[2].isreg)
8405 {
8406 int add;
8407
8408 add = (inst.instruction == T_MNEM_add
8409 || inst.instruction == T_MNEM_adds);
8410 opcode = 0;
8411 if (inst.size_req != 4)
8412 {
8413 /* Attempt to use a narrow opcode, with relaxation if
8414 appropriate. */
8415 if (Rd == REG_SP && Rs == REG_SP && !flags)
8416 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8417 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8418 opcode = T_MNEM_add_sp;
8419 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8420 opcode = T_MNEM_add_pc;
8421 else if (Rd <= 7 && Rs <= 7 && narrow)
8422 {
8423 if (flags)
8424 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8425 else
8426 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8427 }
8428 if (opcode)
8429 {
8430 inst.instruction = THUMB_OP16(opcode);
8431 inst.instruction |= (Rd << 4) | Rs;
8432 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8433 if (inst.size_req != 2)
8434 inst.relax = opcode;
8435 }
8436 else
8437 constraint (inst.size_req == 2, BAD_HIREG);
8438 }
8439 if (inst.size_req == 4
8440 || (inst.size_req != 2 && !opcode))
8441 {
8442 if (Rs == REG_PC)
8443 {
8444 /* Always use addw/subw. */
8445 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8446 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8447 }
8448 else
8449 {
8450 inst.instruction = THUMB_OP32 (inst.instruction);
8451 inst.instruction = (inst.instruction & 0xe1ffffff)
8452 | 0x10000000;
8453 if (flags)
8454 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8455 else
8456 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8457 }
8458 inst.instruction |= Rd << 8;
8459 inst.instruction |= Rs << 16;
8460 }
8461 }
8462 else
8463 {
8464 Rn = inst.operands[2].reg;
8465 /* See if we can do this with a 16-bit instruction. */
8466 if (!inst.operands[2].shifted && inst.size_req != 4)
8467 {
8468 if (Rd > 7 || Rs > 7 || Rn > 7)
8469 narrow = FALSE;
8470
8471 if (narrow)
8472 {
8473 inst.instruction = ((inst.instruction == T_MNEM_adds
8474 || inst.instruction == T_MNEM_add)
8475 ? T_OPCODE_ADD_R3
8476 : T_OPCODE_SUB_R3);
8477 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8478 return;
8479 }
8480
8481 if (inst.instruction == T_MNEM_add)
8482 {
8483 if (Rd == Rs)
8484 {
8485 inst.instruction = T_OPCODE_ADD_HI;
8486 inst.instruction |= (Rd & 8) << 4;
8487 inst.instruction |= (Rd & 7);
8488 inst.instruction |= Rn << 3;
8489 return;
8490 }
8491 /* ... because addition is commutative! */
8492 else if (Rd == Rn)
8493 {
8494 inst.instruction = T_OPCODE_ADD_HI;
8495 inst.instruction |= (Rd & 8) << 4;
8496 inst.instruction |= (Rd & 7);
8497 inst.instruction |= Rs << 3;
8498 return;
8499 }
8500 }
8501 }
8502 /* If we get here, it can't be done in 16 bits. */
8503 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8504 _("shift must be constant"));
8505 inst.instruction = THUMB_OP32 (inst.instruction);
8506 inst.instruction |= Rd << 8;
8507 inst.instruction |= Rs << 16;
8508 encode_thumb32_shifted_operand (2);
8509 }
8510 }
8511 else
8512 {
8513 constraint (inst.instruction == T_MNEM_adds
8514 || inst.instruction == T_MNEM_subs,
8515 BAD_THUMB32);
8516
8517 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8518 {
8519 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8520 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8521 BAD_HIREG);
8522
8523 inst.instruction = (inst.instruction == T_MNEM_add
8524 ? 0x0000 : 0x8000);
8525 inst.instruction |= (Rd << 4) | Rs;
8526 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8527 return;
8528 }
8529
8530 Rn = inst.operands[2].reg;
8531 constraint (inst.operands[2].shifted, _("unshifted register required"));
8532
8533 /* We now have Rd, Rs, and Rn set to registers. */
8534 if (Rd > 7 || Rs > 7 || Rn > 7)
8535 {
8536 /* Can't do this for SUB. */
8537 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8538 inst.instruction = T_OPCODE_ADD_HI;
8539 inst.instruction |= (Rd & 8) << 4;
8540 inst.instruction |= (Rd & 7);
8541 if (Rs == Rd)
8542 inst.instruction |= Rn << 3;
8543 else if (Rn == Rd)
8544 inst.instruction |= Rs << 3;
8545 else
8546 constraint (1, _("dest must overlap one source register"));
8547 }
8548 else
8549 {
8550 inst.instruction = (inst.instruction == T_MNEM_add
8551 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8552 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8553 }
8554 }
8555 }
8556
8557 static void
8558 do_t_adr (void)
8559 {
8560 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8561 {
8562 /* Defer to section relaxation. */
8563 inst.relax = inst.instruction;
8564 inst.instruction = THUMB_OP16 (inst.instruction);
8565 inst.instruction |= inst.operands[0].reg << 4;
8566 }
8567 else if (unified_syntax && inst.size_req != 2)
8568 {
8569 /* Generate a 32-bit opcode. */
8570 inst.instruction = THUMB_OP32 (inst.instruction);
8571 inst.instruction |= inst.operands[0].reg << 8;
8572 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8573 inst.reloc.pc_rel = 1;
8574 }
8575 else
8576 {
8577 /* Generate a 16-bit opcode. */
8578 inst.instruction = THUMB_OP16 (inst.instruction);
8579 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8580 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8581 inst.reloc.pc_rel = 1;
8582
8583 inst.instruction |= inst.operands[0].reg << 4;
8584 }
8585 }
8586
8587 /* Arithmetic instructions for which there is just one 16-bit
8588 instruction encoding, and it allows only two low registers.
8589 For maximal compatibility with ARM syntax, we allow three register
8590 operands even when Thumb-32 instructions are not available, as long
8591 as the first two are identical. For instance, both "sbc r0,r1" and
8592 "sbc r0,r0,r1" are allowed. */
8593 static void
8594 do_t_arit3 (void)
8595 {
8596 int Rd, Rs, Rn;
8597
8598 Rd = inst.operands[0].reg;
8599 Rs = (inst.operands[1].present
8600 ? inst.operands[1].reg /* Rd, Rs, foo */
8601 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8602 Rn = inst.operands[2].reg;
8603
8604 if (unified_syntax)
8605 {
8606 if (!inst.operands[2].isreg)
8607 {
8608 /* For an immediate, we always generate a 32-bit opcode;
8609 section relaxation will shrink it later if possible. */
8610 inst.instruction = THUMB_OP32 (inst.instruction);
8611 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8612 inst.instruction |= Rd << 8;
8613 inst.instruction |= Rs << 16;
8614 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8615 }
8616 else
8617 {
8618 bfd_boolean narrow;
8619
8620 /* See if we can do this with a 16-bit instruction. */
8621 if (THUMB_SETS_FLAGS (inst.instruction))
8622 narrow = current_it_mask == 0;
8623 else
8624 narrow = current_it_mask != 0;
8625
8626 if (Rd > 7 || Rn > 7 || Rs > 7)
8627 narrow = FALSE;
8628 if (inst.operands[2].shifted)
8629 narrow = FALSE;
8630 if (inst.size_req == 4)
8631 narrow = FALSE;
8632
8633 if (narrow
8634 && Rd == Rs)
8635 {
8636 inst.instruction = THUMB_OP16 (inst.instruction);
8637 inst.instruction |= Rd;
8638 inst.instruction |= Rn << 3;
8639 return;
8640 }
8641
8642 /* If we get here, it can't be done in 16 bits. */
8643 constraint (inst.operands[2].shifted
8644 && inst.operands[2].immisreg,
8645 _("shift must be constant"));
8646 inst.instruction = THUMB_OP32 (inst.instruction);
8647 inst.instruction |= Rd << 8;
8648 inst.instruction |= Rs << 16;
8649 encode_thumb32_shifted_operand (2);
8650 }
8651 }
8652 else
8653 {
8654 /* On its face this is a lie - the instruction does set the
8655 flags. However, the only supported mnemonic in this mode
8656 says it doesn't. */
8657 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8658
8659 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8660 _("unshifted register required"));
8661 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8662 constraint (Rd != Rs,
8663 _("dest and source1 must be the same register"));
8664
8665 inst.instruction = THUMB_OP16 (inst.instruction);
8666 inst.instruction |= Rd;
8667 inst.instruction |= Rn << 3;
8668 }
8669 }
8670
8671 /* Similarly, but for instructions where the arithmetic operation is
8672 commutative, so we can allow either of them to be different from
8673 the destination operand in a 16-bit instruction. For instance, all
8674 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8675 accepted. */
8676 static void
8677 do_t_arit3c (void)
8678 {
8679 int Rd, Rs, Rn;
8680
8681 Rd = inst.operands[0].reg;
8682 Rs = (inst.operands[1].present
8683 ? inst.operands[1].reg /* Rd, Rs, foo */
8684 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8685 Rn = inst.operands[2].reg;
8686
8687 if (unified_syntax)
8688 {
8689 if (!inst.operands[2].isreg)
8690 {
8691 /* For an immediate, we always generate a 32-bit opcode;
8692 section relaxation will shrink it later if possible. */
8693 inst.instruction = THUMB_OP32 (inst.instruction);
8694 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8695 inst.instruction |= Rd << 8;
8696 inst.instruction |= Rs << 16;
8697 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8698 }
8699 else
8700 {
8701 bfd_boolean narrow;
8702
8703 /* See if we can do this with a 16-bit instruction. */
8704 if (THUMB_SETS_FLAGS (inst.instruction))
8705 narrow = current_it_mask == 0;
8706 else
8707 narrow = current_it_mask != 0;
8708
8709 if (Rd > 7 || Rn > 7 || Rs > 7)
8710 narrow = FALSE;
8711 if (inst.operands[2].shifted)
8712 narrow = FALSE;
8713 if (inst.size_req == 4)
8714 narrow = FALSE;
8715
8716 if (narrow)
8717 {
8718 if (Rd == Rs)
8719 {
8720 inst.instruction = THUMB_OP16 (inst.instruction);
8721 inst.instruction |= Rd;
8722 inst.instruction |= Rn << 3;
8723 return;
8724 }
8725 if (Rd == Rn)
8726 {
8727 inst.instruction = THUMB_OP16 (inst.instruction);
8728 inst.instruction |= Rd;
8729 inst.instruction |= Rs << 3;
8730 return;
8731 }
8732 }
8733
8734 /* If we get here, it can't be done in 16 bits. */
8735 constraint (inst.operands[2].shifted
8736 && inst.operands[2].immisreg,
8737 _("shift must be constant"));
8738 inst.instruction = THUMB_OP32 (inst.instruction);
8739 inst.instruction |= Rd << 8;
8740 inst.instruction |= Rs << 16;
8741 encode_thumb32_shifted_operand (2);
8742 }
8743 }
8744 else
8745 {
8746 /* On its face this is a lie - the instruction does set the
8747 flags. However, the only supported mnemonic in this mode
8748 says it doesn't. */
8749 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8750
8751 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8752 _("unshifted register required"));
8753 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8754
8755 inst.instruction = THUMB_OP16 (inst.instruction);
8756 inst.instruction |= Rd;
8757
8758 if (Rd == Rs)
8759 inst.instruction |= Rn << 3;
8760 else if (Rd == Rn)
8761 inst.instruction |= Rs << 3;
8762 else
8763 constraint (1, _("dest must overlap one source register"));
8764 }
8765 }
8766
8767 static void
8768 do_t_barrier (void)
8769 {
8770 if (inst.operands[0].present)
8771 {
8772 constraint ((inst.instruction & 0xf0) != 0x40
8773 && inst.operands[0].imm != 0xf,
8774 "bad barrier type");
8775 inst.instruction |= inst.operands[0].imm;
8776 }
8777 else
8778 inst.instruction |= 0xf;
8779 }
8780
8781 static void
8782 do_t_bfc (void)
8783 {
8784 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8785 constraint (msb > 32, _("bit-field extends past end of register"));
8786 /* The instruction encoding stores the LSB and MSB,
8787 not the LSB and width. */
8788 inst.instruction |= inst.operands[0].reg << 8;
8789 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8790 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8791 inst.instruction |= msb - 1;
8792 }
8793
8794 static void
8795 do_t_bfi (void)
8796 {
8797 unsigned int msb;
8798
8799 /* #0 in second position is alternative syntax for bfc, which is
8800 the same instruction but with REG_PC in the Rm field. */
8801 if (!inst.operands[1].isreg)
8802 inst.operands[1].reg = REG_PC;
8803
8804 msb = inst.operands[2].imm + inst.operands[3].imm;
8805 constraint (msb > 32, _("bit-field extends past end of register"));
8806 /* The instruction encoding stores the LSB and MSB,
8807 not the LSB and width. */
8808 inst.instruction |= inst.operands[0].reg << 8;
8809 inst.instruction |= inst.operands[1].reg << 16;
8810 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8811 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8812 inst.instruction |= msb - 1;
8813 }
8814
8815 static void
8816 do_t_bfx (void)
8817 {
8818 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8819 _("bit-field extends past end of register"));
8820 inst.instruction |= inst.operands[0].reg << 8;
8821 inst.instruction |= inst.operands[1].reg << 16;
8822 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8823 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8824 inst.instruction |= inst.operands[3].imm - 1;
8825 }
8826
8827 /* ARM V5 Thumb BLX (argument parse)
8828 BLX <target_addr> which is BLX(1)
8829 BLX <Rm> which is BLX(2)
8830 Unfortunately, there are two different opcodes for this mnemonic.
8831 So, the insns[].value is not used, and the code here zaps values
8832 into inst.instruction.
8833
8834 ??? How to take advantage of the additional two bits of displacement
8835 available in Thumb32 mode? Need new relocation? */
8836
8837 static void
8838 do_t_blx (void)
8839 {
8840 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8841 if (inst.operands[0].isreg)
8842 /* We have a register, so this is BLX(2). */
8843 inst.instruction |= inst.operands[0].reg << 3;
8844 else
8845 {
8846 /* No register. This must be BLX(1). */
8847 inst.instruction = 0xf000e800;
8848 #ifdef OBJ_ELF
8849 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8850 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8851 else
8852 #endif
8853 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8854 inst.reloc.pc_rel = 1;
8855 }
8856 }
8857
8858 static void
8859 do_t_branch (void)
8860 {
8861 int opcode;
8862 int cond;
8863
8864 if (current_it_mask)
8865 {
8866 /* Conditional branches inside IT blocks are encoded as unconditional
8867 branches. */
8868 cond = COND_ALWAYS;
8869 /* A branch must be the last instruction in an IT block. */
8870 constraint (current_it_mask != 0x10, BAD_BRANCH);
8871 }
8872 else
8873 cond = inst.cond;
8874
8875 if (cond != COND_ALWAYS)
8876 opcode = T_MNEM_bcond;
8877 else
8878 opcode = inst.instruction;
8879
8880 if (unified_syntax && inst.size_req == 4)
8881 {
8882 inst.instruction = THUMB_OP32(opcode);
8883 if (cond == COND_ALWAYS)
8884 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8885 else
8886 {
8887 assert (cond != 0xF);
8888 inst.instruction |= cond << 22;
8889 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8890 }
8891 }
8892 else
8893 {
8894 inst.instruction = THUMB_OP16(opcode);
8895 if (cond == COND_ALWAYS)
8896 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8897 else
8898 {
8899 inst.instruction |= cond << 8;
8900 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8901 }
8902 /* Allow section relaxation. */
8903 if (unified_syntax && inst.size_req != 2)
8904 inst.relax = opcode;
8905 }
8906
8907 inst.reloc.pc_rel = 1;
8908 }
8909
8910 static void
8911 do_t_bkpt (void)
8912 {
8913 constraint (inst.cond != COND_ALWAYS,
8914 _("instruction is always unconditional"));
8915 if (inst.operands[0].present)
8916 {
8917 constraint (inst.operands[0].imm > 255,
8918 _("immediate value out of range"));
8919 inst.instruction |= inst.operands[0].imm;
8920 }
8921 }
8922
8923 static void
8924 do_t_branch23 (void)
8925 {
8926 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8927 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8928 inst.reloc.pc_rel = 1;
8929
8930 /* If the destination of the branch is a defined symbol which does not have
8931 the THUMB_FUNC attribute, then we must be calling a function which has
8932 the (interfacearm) attribute. We look for the Thumb entry point to that
8933 function and change the branch to refer to that function instead. */
8934 if ( inst.reloc.exp.X_op == O_symbol
8935 && inst.reloc.exp.X_add_symbol != NULL
8936 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8937 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8938 inst.reloc.exp.X_add_symbol =
8939 find_real_start (inst.reloc.exp.X_add_symbol);
8940 }
8941
8942 static void
8943 do_t_bx (void)
8944 {
8945 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8946 inst.instruction |= inst.operands[0].reg << 3;
8947 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8948 should cause the alignment to be checked once it is known. This is
8949 because BX PC only works if the instruction is word aligned. */
8950 }
8951
8952 static void
8953 do_t_bxj (void)
8954 {
8955 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8956 if (inst.operands[0].reg == REG_PC)
8957 as_tsktsk (_("use of r15 in bxj is not really useful"));
8958
8959 inst.instruction |= inst.operands[0].reg << 16;
8960 }
8961
8962 static void
8963 do_t_clz (void)
8964 {
8965 inst.instruction |= inst.operands[0].reg << 8;
8966 inst.instruction |= inst.operands[1].reg << 16;
8967 inst.instruction |= inst.operands[1].reg;
8968 }
8969
8970 static void
8971 do_t_cps (void)
8972 {
8973 constraint (current_it_mask, BAD_NOT_IT);
8974 inst.instruction |= inst.operands[0].imm;
8975 }
8976
8977 static void
8978 do_t_cpsi (void)
8979 {
8980 constraint (current_it_mask, BAD_NOT_IT);
8981 if (unified_syntax
8982 && (inst.operands[1].present || inst.size_req == 4)
8983 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8984 {
8985 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8986 inst.instruction = 0xf3af8000;
8987 inst.instruction |= imod << 9;
8988 inst.instruction |= inst.operands[0].imm << 5;
8989 if (inst.operands[1].present)
8990 inst.instruction |= 0x100 | inst.operands[1].imm;
8991 }
8992 else
8993 {
8994 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8995 && (inst.operands[0].imm & 4),
8996 _("selected processor does not support 'A' form "
8997 "of this instruction"));
8998 constraint (inst.operands[1].present || inst.size_req == 4,
8999 _("Thumb does not support the 2-argument "
9000 "form of this instruction"));
9001 inst.instruction |= inst.operands[0].imm;
9002 }
9003 }
9004
9005 /* THUMB CPY instruction (argument parse). */
9006
9007 static void
9008 do_t_cpy (void)
9009 {
9010 if (inst.size_req == 4)
9011 {
9012 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9013 inst.instruction |= inst.operands[0].reg << 8;
9014 inst.instruction |= inst.operands[1].reg;
9015 }
9016 else
9017 {
9018 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9019 inst.instruction |= (inst.operands[0].reg & 0x7);
9020 inst.instruction |= inst.operands[1].reg << 3;
9021 }
9022 }
9023
9024 static void
9025 do_t_cbz (void)
9026 {
9027 constraint (current_it_mask, BAD_NOT_IT);
9028 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9029 inst.instruction |= inst.operands[0].reg;
9030 inst.reloc.pc_rel = 1;
9031 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9032 }
9033
9034 static void
9035 do_t_dbg (void)
9036 {
9037 inst.instruction |= inst.operands[0].imm;
9038 }
9039
9040 static void
9041 do_t_div (void)
9042 {
9043 if (!inst.operands[1].present)
9044 inst.operands[1].reg = inst.operands[0].reg;
9045 inst.instruction |= inst.operands[0].reg << 8;
9046 inst.instruction |= inst.operands[1].reg << 16;
9047 inst.instruction |= inst.operands[2].reg;
9048 }
9049
9050 static void
9051 do_t_hint (void)
9052 {
9053 if (unified_syntax && inst.size_req == 4)
9054 inst.instruction = THUMB_OP32 (inst.instruction);
9055 else
9056 inst.instruction = THUMB_OP16 (inst.instruction);
9057 }
9058
9059 static void
9060 do_t_it (void)
9061 {
9062 unsigned int cond = inst.operands[0].imm;
9063
9064 constraint (current_it_mask, BAD_NOT_IT);
9065 current_it_mask = (inst.instruction & 0xf) | 0x10;
9066 current_cc = cond;
9067
9068 /* If the condition is a negative condition, invert the mask. */
9069 if ((cond & 0x1) == 0x0)
9070 {
9071 unsigned int mask = inst.instruction & 0x000f;
9072
9073 if ((mask & 0x7) == 0)
9074 /* no conversion needed */;
9075 else if ((mask & 0x3) == 0)
9076 mask ^= 0x8;
9077 else if ((mask & 0x1) == 0)
9078 mask ^= 0xC;
9079 else
9080 mask ^= 0xE;
9081
9082 inst.instruction &= 0xfff0;
9083 inst.instruction |= mask;
9084 }
9085
9086 inst.instruction |= cond << 4;
9087 }
9088
9089 /* Helper function used for both push/pop and ldm/stm. */
9090 static void
9091 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9092 {
9093 bfd_boolean load;
9094
9095 load = (inst.instruction & (1 << 20)) != 0;
9096
9097 if (mask & (1 << 13))
9098 inst.error = _("SP not allowed in register list");
9099 if (load)
9100 {
9101 if (mask & (1 << 14)
9102 && mask & (1 << 15))
9103 inst.error = _("LR and PC should not both be in register list");
9104
9105 if ((mask & (1 << base)) != 0
9106 && writeback)
9107 as_warn (_("base register should not be in register list "
9108 "when written back"));
9109 }
9110 else
9111 {
9112 if (mask & (1 << 15))
9113 inst.error = _("PC not allowed in register list");
9114
9115 if (mask & (1 << base))
9116 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9117 }
9118
9119 if ((mask & (mask - 1)) == 0)
9120 {
9121 /* Single register transfers implemented as str/ldr. */
9122 if (writeback)
9123 {
9124 if (inst.instruction & (1 << 23))
9125 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9126 else
9127 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9128 }
9129 else
9130 {
9131 if (inst.instruction & (1 << 23))
9132 inst.instruction = 0x00800000; /* ia -> [base] */
9133 else
9134 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9135 }
9136
9137 inst.instruction |= 0xf8400000;
9138 if (load)
9139 inst.instruction |= 0x00100000;
9140
9141 mask = ffs(mask) - 1;
9142 mask <<= 12;
9143 }
9144 else if (writeback)
9145 inst.instruction |= WRITE_BACK;
9146
9147 inst.instruction |= mask;
9148 inst.instruction |= base << 16;
9149 }
9150
9151 static void
9152 do_t_ldmstm (void)
9153 {
9154 /* This really doesn't seem worth it. */
9155 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9156 _("expression too complex"));
9157 constraint (inst.operands[1].writeback,
9158 _("Thumb load/store multiple does not support {reglist}^"));
9159
9160 if (unified_syntax)
9161 {
9162 bfd_boolean narrow;
9163 unsigned mask;
9164
9165 narrow = FALSE;
9166 /* See if we can use a 16-bit instruction. */
9167 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9168 && inst.size_req != 4
9169 && !(inst.operands[1].imm & ~0xff))
9170 {
9171 mask = 1 << inst.operands[0].reg;
9172
9173 if (inst.operands[0].reg <= 7
9174 && (inst.instruction == T_MNEM_stmia
9175 ? inst.operands[0].writeback
9176 : (inst.operands[0].writeback
9177 == !(inst.operands[1].imm & mask))))
9178 {
9179 if (inst.instruction == T_MNEM_stmia
9180 && (inst.operands[1].imm & mask)
9181 && (inst.operands[1].imm & (mask - 1)))
9182 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9183 inst.operands[0].reg);
9184
9185 inst.instruction = THUMB_OP16 (inst.instruction);
9186 inst.instruction |= inst.operands[0].reg << 8;
9187 inst.instruction |= inst.operands[1].imm;
9188 narrow = TRUE;
9189 }
9190 else if (inst.operands[0] .reg == REG_SP
9191 && inst.operands[0].writeback)
9192 {
9193 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9194 ? T_MNEM_push : T_MNEM_pop);
9195 inst.instruction |= inst.operands[1].imm;
9196 narrow = TRUE;
9197 }
9198 }
9199
9200 if (!narrow)
9201 {
9202 if (inst.instruction < 0xffff)
9203 inst.instruction = THUMB_OP32 (inst.instruction);
9204
9205 encode_thumb2_ldmstm(inst.operands[0].reg, inst.operands[1].imm,
9206 inst.operands[0].writeback);
9207 }
9208 }
9209 else
9210 {
9211 constraint (inst.operands[0].reg > 7
9212 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9213 constraint (inst.instruction != T_MNEM_ldmia
9214 && inst.instruction != T_MNEM_stmia,
9215 _("Thumb-2 instruction only valid in unified syntax"));
9216 if (inst.instruction == T_MNEM_stmia)
9217 {
9218 if (!inst.operands[0].writeback)
9219 as_warn (_("this instruction will write back the base register"));
9220 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9221 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9222 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9223 inst.operands[0].reg);
9224 }
9225 else
9226 {
9227 if (!inst.operands[0].writeback
9228 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9229 as_warn (_("this instruction will write back the base register"));
9230 else if (inst.operands[0].writeback
9231 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9232 as_warn (_("this instruction will not write back the base register"));
9233 }
9234
9235 inst.instruction = THUMB_OP16 (inst.instruction);
9236 inst.instruction |= inst.operands[0].reg << 8;
9237 inst.instruction |= inst.operands[1].imm;
9238 }
9239 }
9240
9241 static void
9242 do_t_ldrex (void)
9243 {
9244 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9245 || inst.operands[1].postind || inst.operands[1].writeback
9246 || inst.operands[1].immisreg || inst.operands[1].shifted
9247 || inst.operands[1].negative,
9248 BAD_ADDR_MODE);
9249
9250 inst.instruction |= inst.operands[0].reg << 12;
9251 inst.instruction |= inst.operands[1].reg << 16;
9252 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9253 }
9254
9255 static void
9256 do_t_ldrexd (void)
9257 {
9258 if (!inst.operands[1].present)
9259 {
9260 constraint (inst.operands[0].reg == REG_LR,
9261 _("r14 not allowed as first register "
9262 "when second register is omitted"));
9263 inst.operands[1].reg = inst.operands[0].reg + 1;
9264 }
9265 constraint (inst.operands[0].reg == inst.operands[1].reg,
9266 BAD_OVERLAP);
9267
9268 inst.instruction |= inst.operands[0].reg << 12;
9269 inst.instruction |= inst.operands[1].reg << 8;
9270 inst.instruction |= inst.operands[2].reg << 16;
9271 }
9272
9273 static void
9274 do_t_ldst (void)
9275 {
9276 unsigned long opcode;
9277 int Rn;
9278
9279 opcode = inst.instruction;
9280 if (unified_syntax)
9281 {
9282 if (!inst.operands[1].isreg)
9283 {
9284 if (opcode <= 0xffff)
9285 inst.instruction = THUMB_OP32 (opcode);
9286 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9287 return;
9288 }
9289 if (inst.operands[1].isreg
9290 && !inst.operands[1].writeback
9291 && !inst.operands[1].shifted && !inst.operands[1].postind
9292 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9293 && opcode <= 0xffff
9294 && inst.size_req != 4)
9295 {
9296 /* Insn may have a 16-bit form. */
9297 Rn = inst.operands[1].reg;
9298 if (inst.operands[1].immisreg)
9299 {
9300 inst.instruction = THUMB_OP16 (opcode);
9301 /* [Rn, Ri] */
9302 if (Rn <= 7 && inst.operands[1].imm <= 7)
9303 goto op16;
9304 }
9305 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9306 && opcode != T_MNEM_ldrsb)
9307 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9308 || (Rn == REG_SP && opcode == T_MNEM_str))
9309 {
9310 /* [Rn, #const] */
9311 if (Rn > 7)
9312 {
9313 if (Rn == REG_PC)
9314 {
9315 if (inst.reloc.pc_rel)
9316 opcode = T_MNEM_ldr_pc2;
9317 else
9318 opcode = T_MNEM_ldr_pc;
9319 }
9320 else
9321 {
9322 if (opcode == T_MNEM_ldr)
9323 opcode = T_MNEM_ldr_sp;
9324 else
9325 opcode = T_MNEM_str_sp;
9326 }
9327 inst.instruction = inst.operands[0].reg << 8;
9328 }
9329 else
9330 {
9331 inst.instruction = inst.operands[0].reg;
9332 inst.instruction |= inst.operands[1].reg << 3;
9333 }
9334 inst.instruction |= THUMB_OP16 (opcode);
9335 if (inst.size_req == 2)
9336 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9337 else
9338 inst.relax = opcode;
9339 return;
9340 }
9341 }
9342 /* Definitely a 32-bit variant. */
9343 inst.instruction = THUMB_OP32 (opcode);
9344 inst.instruction |= inst.operands[0].reg << 12;
9345 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9346 return;
9347 }
9348
9349 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9350
9351 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9352 {
9353 /* Only [Rn,Rm] is acceptable. */
9354 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9355 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9356 || inst.operands[1].postind || inst.operands[1].shifted
9357 || inst.operands[1].negative,
9358 _("Thumb does not support this addressing mode"));
9359 inst.instruction = THUMB_OP16 (inst.instruction);
9360 goto op16;
9361 }
9362
9363 inst.instruction = THUMB_OP16 (inst.instruction);
9364 if (!inst.operands[1].isreg)
9365 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9366 return;
9367
9368 constraint (!inst.operands[1].preind
9369 || inst.operands[1].shifted
9370 || inst.operands[1].writeback,
9371 _("Thumb does not support this addressing mode"));
9372 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9373 {
9374 constraint (inst.instruction & 0x0600,
9375 _("byte or halfword not valid for base register"));
9376 constraint (inst.operands[1].reg == REG_PC
9377 && !(inst.instruction & THUMB_LOAD_BIT),
9378 _("r15 based store not allowed"));
9379 constraint (inst.operands[1].immisreg,
9380 _("invalid base register for register offset"));
9381
9382 if (inst.operands[1].reg == REG_PC)
9383 inst.instruction = T_OPCODE_LDR_PC;
9384 else if (inst.instruction & THUMB_LOAD_BIT)
9385 inst.instruction = T_OPCODE_LDR_SP;
9386 else
9387 inst.instruction = T_OPCODE_STR_SP;
9388
9389 inst.instruction |= inst.operands[0].reg << 8;
9390 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9391 return;
9392 }
9393
9394 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9395 if (!inst.operands[1].immisreg)
9396 {
9397 /* Immediate offset. */
9398 inst.instruction |= inst.operands[0].reg;
9399 inst.instruction |= inst.operands[1].reg << 3;
9400 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9401 return;
9402 }
9403
9404 /* Register offset. */
9405 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9406 constraint (inst.operands[1].negative,
9407 _("Thumb does not support this addressing mode"));
9408
9409 op16:
9410 switch (inst.instruction)
9411 {
9412 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9413 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9414 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9415 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9416 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9417 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9418 case 0x5600 /* ldrsb */:
9419 case 0x5e00 /* ldrsh */: break;
9420 default: abort ();
9421 }
9422
9423 inst.instruction |= inst.operands[0].reg;
9424 inst.instruction |= inst.operands[1].reg << 3;
9425 inst.instruction |= inst.operands[1].imm << 6;
9426 }
9427
9428 static void
9429 do_t_ldstd (void)
9430 {
9431 if (!inst.operands[1].present)
9432 {
9433 inst.operands[1].reg = inst.operands[0].reg + 1;
9434 constraint (inst.operands[0].reg == REG_LR,
9435 _("r14 not allowed here"));
9436 }
9437 inst.instruction |= inst.operands[0].reg << 12;
9438 inst.instruction |= inst.operands[1].reg << 8;
9439 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9440
9441 }
9442
9443 static void
9444 do_t_ldstt (void)
9445 {
9446 inst.instruction |= inst.operands[0].reg << 12;
9447 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9448 }
9449
9450 static void
9451 do_t_mla (void)
9452 {
9453 inst.instruction |= inst.operands[0].reg << 8;
9454 inst.instruction |= inst.operands[1].reg << 16;
9455 inst.instruction |= inst.operands[2].reg;
9456 inst.instruction |= inst.operands[3].reg << 12;
9457 }
9458
9459 static void
9460 do_t_mlal (void)
9461 {
9462 inst.instruction |= inst.operands[0].reg << 12;
9463 inst.instruction |= inst.operands[1].reg << 8;
9464 inst.instruction |= inst.operands[2].reg << 16;
9465 inst.instruction |= inst.operands[3].reg;
9466 }
9467
9468 static void
9469 do_t_mov_cmp (void)
9470 {
9471 if (unified_syntax)
9472 {
9473 int r0off = (inst.instruction == T_MNEM_mov
9474 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9475 unsigned long opcode;
9476 bfd_boolean narrow;
9477 bfd_boolean low_regs;
9478
9479 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9480 opcode = inst.instruction;
9481 if (current_it_mask)
9482 narrow = opcode != T_MNEM_movs;
9483 else
9484 narrow = opcode != T_MNEM_movs || low_regs;
9485 if (inst.size_req == 4
9486 || inst.operands[1].shifted)
9487 narrow = FALSE;
9488
9489 if (!inst.operands[1].isreg)
9490 {
9491 /* Immediate operand. */
9492 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9493 narrow = 0;
9494 if (low_regs && narrow)
9495 {
9496 inst.instruction = THUMB_OP16 (opcode);
9497 inst.instruction |= inst.operands[0].reg << 8;
9498 if (inst.size_req == 2)
9499 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9500 else
9501 inst.relax = opcode;
9502 }
9503 else
9504 {
9505 inst.instruction = THUMB_OP32 (inst.instruction);
9506 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9507 inst.instruction |= inst.operands[0].reg << r0off;
9508 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9509 }
9510 }
9511 else if (!narrow)
9512 {
9513 inst.instruction = THUMB_OP32 (inst.instruction);
9514 inst.instruction |= inst.operands[0].reg << r0off;
9515 encode_thumb32_shifted_operand (1);
9516 }
9517 else
9518 switch (inst.instruction)
9519 {
9520 case T_MNEM_mov:
9521 inst.instruction = T_OPCODE_MOV_HR;
9522 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9523 inst.instruction |= (inst.operands[0].reg & 0x7);
9524 inst.instruction |= inst.operands[1].reg << 3;
9525 break;
9526
9527 case T_MNEM_movs:
9528 /* We know we have low registers at this point.
9529 Generate ADD Rd, Rs, #0. */
9530 inst.instruction = T_OPCODE_ADD_I3;
9531 inst.instruction |= inst.operands[0].reg;
9532 inst.instruction |= inst.operands[1].reg << 3;
9533 break;
9534
9535 case T_MNEM_cmp:
9536 if (low_regs)
9537 {
9538 inst.instruction = T_OPCODE_CMP_LR;
9539 inst.instruction |= inst.operands[0].reg;
9540 inst.instruction |= inst.operands[1].reg << 3;
9541 }
9542 else
9543 {
9544 inst.instruction = T_OPCODE_CMP_HR;
9545 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9546 inst.instruction |= (inst.operands[0].reg & 0x7);
9547 inst.instruction |= inst.operands[1].reg << 3;
9548 }
9549 break;
9550 }
9551 return;
9552 }
9553
9554 inst.instruction = THUMB_OP16 (inst.instruction);
9555 if (inst.operands[1].isreg)
9556 {
9557 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9558 {
9559 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9560 since a MOV instruction produces unpredictable results. */
9561 if (inst.instruction == T_OPCODE_MOV_I8)
9562 inst.instruction = T_OPCODE_ADD_I3;
9563 else
9564 inst.instruction = T_OPCODE_CMP_LR;
9565
9566 inst.instruction |= inst.operands[0].reg;
9567 inst.instruction |= inst.operands[1].reg << 3;
9568 }
9569 else
9570 {
9571 if (inst.instruction == T_OPCODE_MOV_I8)
9572 inst.instruction = T_OPCODE_MOV_HR;
9573 else
9574 inst.instruction = T_OPCODE_CMP_HR;
9575 do_t_cpy ();
9576 }
9577 }
9578 else
9579 {
9580 constraint (inst.operands[0].reg > 7,
9581 _("only lo regs allowed with immediate"));
9582 inst.instruction |= inst.operands[0].reg << 8;
9583 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9584 }
9585 }
9586
9587 static void
9588 do_t_mov16 (void)
9589 {
9590 bfd_vma imm;
9591 bfd_boolean top;
9592
9593 top = (inst.instruction & 0x00800000) != 0;
9594 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9595 {
9596 constraint (top, _(":lower16: not allowed this instruction"));
9597 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9598 }
9599 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9600 {
9601 constraint (!top, _(":upper16: not allowed this instruction"));
9602 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9603 }
9604
9605 inst.instruction |= inst.operands[0].reg << 8;
9606 if (inst.reloc.type == BFD_RELOC_UNUSED)
9607 {
9608 imm = inst.reloc.exp.X_add_number;
9609 inst.instruction |= (imm & 0xf000) << 4;
9610 inst.instruction |= (imm & 0x0800) << 15;
9611 inst.instruction |= (imm & 0x0700) << 4;
9612 inst.instruction |= (imm & 0x00ff);
9613 }
9614 }
9615
9616 static void
9617 do_t_mvn_tst (void)
9618 {
9619 if (unified_syntax)
9620 {
9621 int r0off = (inst.instruction == T_MNEM_mvn
9622 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9623 bfd_boolean narrow;
9624
9625 if (inst.size_req == 4
9626 || inst.instruction > 0xffff
9627 || inst.operands[1].shifted
9628 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9629 narrow = FALSE;
9630 else if (inst.instruction == T_MNEM_cmn)
9631 narrow = TRUE;
9632 else if (THUMB_SETS_FLAGS (inst.instruction))
9633 narrow = (current_it_mask == 0);
9634 else
9635 narrow = (current_it_mask != 0);
9636
9637 if (!inst.operands[1].isreg)
9638 {
9639 /* For an immediate, we always generate a 32-bit opcode;
9640 section relaxation will shrink it later if possible. */
9641 if (inst.instruction < 0xffff)
9642 inst.instruction = THUMB_OP32 (inst.instruction);
9643 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9644 inst.instruction |= inst.operands[0].reg << r0off;
9645 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9646 }
9647 else
9648 {
9649 /* See if we can do this with a 16-bit instruction. */
9650 if (narrow)
9651 {
9652 inst.instruction = THUMB_OP16 (inst.instruction);
9653 inst.instruction |= inst.operands[0].reg;
9654 inst.instruction |= inst.operands[1].reg << 3;
9655 }
9656 else
9657 {
9658 constraint (inst.operands[1].shifted
9659 && inst.operands[1].immisreg,
9660 _("shift must be constant"));
9661 if (inst.instruction < 0xffff)
9662 inst.instruction = THUMB_OP32 (inst.instruction);
9663 inst.instruction |= inst.operands[0].reg << r0off;
9664 encode_thumb32_shifted_operand (1);
9665 }
9666 }
9667 }
9668 else
9669 {
9670 constraint (inst.instruction > 0xffff
9671 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9672 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9673 _("unshifted register required"));
9674 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9675 BAD_HIREG);
9676
9677 inst.instruction = THUMB_OP16 (inst.instruction);
9678 inst.instruction |= inst.operands[0].reg;
9679 inst.instruction |= inst.operands[1].reg << 3;
9680 }
9681 }
9682
9683 static void
9684 do_t_mrs (void)
9685 {
9686 int flags;
9687
9688 if (do_vfp_nsyn_mrs () == SUCCESS)
9689 return;
9690
9691 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9692 if (flags == 0)
9693 {
9694 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9695 _("selected processor does not support "
9696 "requested special purpose register"));
9697 }
9698 else
9699 {
9700 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9701 _("selected processor does not support "
9702 "requested special purpose register %x"));
9703 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9704 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9705 _("'CPSR' or 'SPSR' expected"));
9706 }
9707
9708 inst.instruction |= inst.operands[0].reg << 8;
9709 inst.instruction |= (flags & SPSR_BIT) >> 2;
9710 inst.instruction |= inst.operands[1].imm & 0xff;
9711 }
9712
9713 static void
9714 do_t_msr (void)
9715 {
9716 int flags;
9717
9718 if (do_vfp_nsyn_msr () == SUCCESS)
9719 return;
9720
9721 constraint (!inst.operands[1].isreg,
9722 _("Thumb encoding does not support an immediate here"));
9723 flags = inst.operands[0].imm;
9724 if (flags & ~0xff)
9725 {
9726 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9727 _("selected processor does not support "
9728 "requested special purpose register"));
9729 }
9730 else
9731 {
9732 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9733 _("selected processor does not support "
9734 "requested special purpose register"));
9735 flags |= PSR_f;
9736 }
9737 inst.instruction |= (flags & SPSR_BIT) >> 2;
9738 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9739 inst.instruction |= (flags & 0xff);
9740 inst.instruction |= inst.operands[1].reg << 16;
9741 }
9742
9743 static void
9744 do_t_mul (void)
9745 {
9746 if (!inst.operands[2].present)
9747 inst.operands[2].reg = inst.operands[0].reg;
9748
9749 /* There is no 32-bit MULS and no 16-bit MUL. */
9750 if (unified_syntax && inst.instruction == T_MNEM_mul)
9751 {
9752 inst.instruction = THUMB_OP32 (inst.instruction);
9753 inst.instruction |= inst.operands[0].reg << 8;
9754 inst.instruction |= inst.operands[1].reg << 16;
9755 inst.instruction |= inst.operands[2].reg << 0;
9756 }
9757 else
9758 {
9759 constraint (!unified_syntax
9760 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9761 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9762 BAD_HIREG);
9763
9764 inst.instruction = THUMB_OP16 (inst.instruction);
9765 inst.instruction |= inst.operands[0].reg;
9766
9767 if (inst.operands[0].reg == inst.operands[1].reg)
9768 inst.instruction |= inst.operands[2].reg << 3;
9769 else if (inst.operands[0].reg == inst.operands[2].reg)
9770 inst.instruction |= inst.operands[1].reg << 3;
9771 else
9772 constraint (1, _("dest must overlap one source register"));
9773 }
9774 }
9775
9776 static void
9777 do_t_mull (void)
9778 {
9779 inst.instruction |= inst.operands[0].reg << 12;
9780 inst.instruction |= inst.operands[1].reg << 8;
9781 inst.instruction |= inst.operands[2].reg << 16;
9782 inst.instruction |= inst.operands[3].reg;
9783
9784 if (inst.operands[0].reg == inst.operands[1].reg)
9785 as_tsktsk (_("rdhi and rdlo must be different"));
9786 }
9787
9788 static void
9789 do_t_nop (void)
9790 {
9791 if (unified_syntax)
9792 {
9793 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9794 {
9795 inst.instruction = THUMB_OP32 (inst.instruction);
9796 inst.instruction |= inst.operands[0].imm;
9797 }
9798 else
9799 {
9800 inst.instruction = THUMB_OP16 (inst.instruction);
9801 inst.instruction |= inst.operands[0].imm << 4;
9802 }
9803 }
9804 else
9805 {
9806 constraint (inst.operands[0].present,
9807 _("Thumb does not support NOP with hints"));
9808 inst.instruction = 0x46c0;
9809 }
9810 }
9811
9812 static void
9813 do_t_neg (void)
9814 {
9815 if (unified_syntax)
9816 {
9817 bfd_boolean narrow;
9818
9819 if (THUMB_SETS_FLAGS (inst.instruction))
9820 narrow = (current_it_mask == 0);
9821 else
9822 narrow = (current_it_mask != 0);
9823 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9824 narrow = FALSE;
9825 if (inst.size_req == 4)
9826 narrow = FALSE;
9827
9828 if (!narrow)
9829 {
9830 inst.instruction = THUMB_OP32 (inst.instruction);
9831 inst.instruction |= inst.operands[0].reg << 8;
9832 inst.instruction |= inst.operands[1].reg << 16;
9833 }
9834 else
9835 {
9836 inst.instruction = THUMB_OP16 (inst.instruction);
9837 inst.instruction |= inst.operands[0].reg;
9838 inst.instruction |= inst.operands[1].reg << 3;
9839 }
9840 }
9841 else
9842 {
9843 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9844 BAD_HIREG);
9845 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9846
9847 inst.instruction = THUMB_OP16 (inst.instruction);
9848 inst.instruction |= inst.operands[0].reg;
9849 inst.instruction |= inst.operands[1].reg << 3;
9850 }
9851 }
9852
9853 static void
9854 do_t_pkhbt (void)
9855 {
9856 inst.instruction |= inst.operands[0].reg << 8;
9857 inst.instruction |= inst.operands[1].reg << 16;
9858 inst.instruction |= inst.operands[2].reg;
9859 if (inst.operands[3].present)
9860 {
9861 unsigned int val = inst.reloc.exp.X_add_number;
9862 constraint (inst.reloc.exp.X_op != O_constant,
9863 _("expression too complex"));
9864 inst.instruction |= (val & 0x1c) << 10;
9865 inst.instruction |= (val & 0x03) << 6;
9866 }
9867 }
9868
9869 static void
9870 do_t_pkhtb (void)
9871 {
9872 if (!inst.operands[3].present)
9873 inst.instruction &= ~0x00000020;
9874 do_t_pkhbt ();
9875 }
9876
9877 static void
9878 do_t_pld (void)
9879 {
9880 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9881 }
9882
9883 static void
9884 do_t_push_pop (void)
9885 {
9886 unsigned mask;
9887
9888 constraint (inst.operands[0].writeback,
9889 _("push/pop do not support {reglist}^"));
9890 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9891 _("expression too complex"));
9892
9893 mask = inst.operands[0].imm;
9894 if ((mask & ~0xff) == 0)
9895 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
9896 else if ((inst.instruction == T_MNEM_push
9897 && (mask & ~0xff) == 1 << REG_LR)
9898 || (inst.instruction == T_MNEM_pop
9899 && (mask & ~0xff) == 1 << REG_PC))
9900 {
9901 inst.instruction = THUMB_OP16 (inst.instruction);
9902 inst.instruction |= THUMB_PP_PC_LR;
9903 inst.instruction |= mask & 0xff;
9904 }
9905 else if (unified_syntax)
9906 {
9907 inst.instruction = THUMB_OP32 (inst.instruction);
9908 encode_thumb2_ldmstm(13, mask, TRUE);
9909 }
9910 else
9911 {
9912 inst.error = _("invalid register list to push/pop instruction");
9913 return;
9914 }
9915 }
9916
9917 static void
9918 do_t_rbit (void)
9919 {
9920 inst.instruction |= inst.operands[0].reg << 8;
9921 inst.instruction |= inst.operands[1].reg << 16;
9922 }
9923
9924 static void
9925 do_t_rev (void)
9926 {
9927 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9928 && inst.size_req != 4)
9929 {
9930 inst.instruction = THUMB_OP16 (inst.instruction);
9931 inst.instruction |= inst.operands[0].reg;
9932 inst.instruction |= inst.operands[1].reg << 3;
9933 }
9934 else if (unified_syntax)
9935 {
9936 inst.instruction = THUMB_OP32 (inst.instruction);
9937 inst.instruction |= inst.operands[0].reg << 8;
9938 inst.instruction |= inst.operands[1].reg << 16;
9939 inst.instruction |= inst.operands[1].reg;
9940 }
9941 else
9942 inst.error = BAD_HIREG;
9943 }
9944
9945 static void
9946 do_t_rsb (void)
9947 {
9948 int Rd, Rs;
9949
9950 Rd = inst.operands[0].reg;
9951 Rs = (inst.operands[1].present
9952 ? inst.operands[1].reg /* Rd, Rs, foo */
9953 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9954
9955 inst.instruction |= Rd << 8;
9956 inst.instruction |= Rs << 16;
9957 if (!inst.operands[2].isreg)
9958 {
9959 bfd_boolean narrow;
9960
9961 if ((inst.instruction & 0x00100000) != 0)
9962 narrow = (current_it_mask == 0);
9963 else
9964 narrow = (current_it_mask != 0);
9965
9966 if (Rd > 7 || Rs > 7)
9967 narrow = FALSE;
9968
9969 if (inst.size_req == 4 || !unified_syntax)
9970 narrow = FALSE;
9971
9972 if (inst.reloc.exp.X_op != O_constant
9973 || inst.reloc.exp.X_add_number != 0)
9974 narrow = FALSE;
9975
9976 /* Turn rsb #0 into 16-bit neg. We should probably do this via
9977 relaxation, but it doesn't seem worth the hassle. */
9978 if (narrow)
9979 {
9980 inst.reloc.type = BFD_RELOC_UNUSED;
9981 inst.instruction = THUMB_OP16 (T_MNEM_negs);
9982 inst.instruction |= Rs << 3;
9983 inst.instruction |= Rd;
9984 }
9985 else
9986 {
9987 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9988 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9989 }
9990 }
9991 else
9992 encode_thumb32_shifted_operand (2);
9993 }
9994
9995 static void
9996 do_t_setend (void)
9997 {
9998 constraint (current_it_mask, BAD_NOT_IT);
9999 if (inst.operands[0].imm)
10000 inst.instruction |= 0x8;
10001 }
10002
10003 static void
10004 do_t_shift (void)
10005 {
10006 if (!inst.operands[1].present)
10007 inst.operands[1].reg = inst.operands[0].reg;
10008
10009 if (unified_syntax)
10010 {
10011 bfd_boolean narrow;
10012 int shift_kind;
10013
10014 switch (inst.instruction)
10015 {
10016 case T_MNEM_asr:
10017 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10018 case T_MNEM_lsl:
10019 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10020 case T_MNEM_lsr:
10021 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10022 case T_MNEM_ror:
10023 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10024 default: abort ();
10025 }
10026
10027 if (THUMB_SETS_FLAGS (inst.instruction))
10028 narrow = (current_it_mask == 0);
10029 else
10030 narrow = (current_it_mask != 0);
10031 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10032 narrow = FALSE;
10033 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10034 narrow = FALSE;
10035 if (inst.operands[2].isreg
10036 && (inst.operands[1].reg != inst.operands[0].reg
10037 || inst.operands[2].reg > 7))
10038 narrow = FALSE;
10039 if (inst.size_req == 4)
10040 narrow = FALSE;
10041
10042 if (!narrow)
10043 {
10044 if (inst.operands[2].isreg)
10045 {
10046 inst.instruction = THUMB_OP32 (inst.instruction);
10047 inst.instruction |= inst.operands[0].reg << 8;
10048 inst.instruction |= inst.operands[1].reg << 16;
10049 inst.instruction |= inst.operands[2].reg;
10050 }
10051 else
10052 {
10053 inst.operands[1].shifted = 1;
10054 inst.operands[1].shift_kind = shift_kind;
10055 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
10056 ? T_MNEM_movs : T_MNEM_mov);
10057 inst.instruction |= inst.operands[0].reg << 8;
10058 encode_thumb32_shifted_operand (1);
10059 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10060 inst.reloc.type = BFD_RELOC_UNUSED;
10061 }
10062 }
10063 else
10064 {
10065 if (inst.operands[2].isreg)
10066 {
10067 switch (shift_kind)
10068 {
10069 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
10070 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
10071 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
10072 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
10073 default: abort ();
10074 }
10075
10076 inst.instruction |= inst.operands[0].reg;
10077 inst.instruction |= inst.operands[2].reg << 3;
10078 }
10079 else
10080 {
10081 switch (shift_kind)
10082 {
10083 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10084 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10085 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10086 default: abort ();
10087 }
10088 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10089 inst.instruction |= inst.operands[0].reg;
10090 inst.instruction |= inst.operands[1].reg << 3;
10091 }
10092 }
10093 }
10094 else
10095 {
10096 constraint (inst.operands[0].reg > 7
10097 || inst.operands[1].reg > 7, BAD_HIREG);
10098 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10099
10100 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
10101 {
10102 constraint (inst.operands[2].reg > 7, BAD_HIREG);
10103 constraint (inst.operands[0].reg != inst.operands[1].reg,
10104 _("source1 and dest must be same register"));
10105
10106 switch (inst.instruction)
10107 {
10108 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
10109 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
10110 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
10111 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
10112 default: abort ();
10113 }
10114
10115 inst.instruction |= inst.operands[0].reg;
10116 inst.instruction |= inst.operands[2].reg << 3;
10117 }
10118 else
10119 {
10120 switch (inst.instruction)
10121 {
10122 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10123 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10124 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10125 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10126 default: abort ();
10127 }
10128 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10129 inst.instruction |= inst.operands[0].reg;
10130 inst.instruction |= inst.operands[1].reg << 3;
10131 }
10132 }
10133 }
10134
10135 static void
10136 do_t_simd (void)
10137 {
10138 inst.instruction |= inst.operands[0].reg << 8;
10139 inst.instruction |= inst.operands[1].reg << 16;
10140 inst.instruction |= inst.operands[2].reg;
10141 }
10142
10143 static void
10144 do_t_smc (void)
10145 {
10146 unsigned int value = inst.reloc.exp.X_add_number;
10147 constraint (inst.reloc.exp.X_op != O_constant,
10148 _("expression too complex"));
10149 inst.reloc.type = BFD_RELOC_UNUSED;
10150 inst.instruction |= (value & 0xf000) >> 12;
10151 inst.instruction |= (value & 0x0ff0);
10152 inst.instruction |= (value & 0x000f) << 16;
10153 }
10154
10155 static void
10156 do_t_ssat (void)
10157 {
10158 inst.instruction |= inst.operands[0].reg << 8;
10159 inst.instruction |= inst.operands[1].imm - 1;
10160 inst.instruction |= inst.operands[2].reg << 16;
10161
10162 if (inst.operands[3].present)
10163 {
10164 constraint (inst.reloc.exp.X_op != O_constant,
10165 _("expression too complex"));
10166
10167 if (inst.reloc.exp.X_add_number != 0)
10168 {
10169 if (inst.operands[3].shift_kind == SHIFT_ASR)
10170 inst.instruction |= 0x00200000; /* sh bit */
10171 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10172 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10173 }
10174 inst.reloc.type = BFD_RELOC_UNUSED;
10175 }
10176 }
10177
10178 static void
10179 do_t_ssat16 (void)
10180 {
10181 inst.instruction |= inst.operands[0].reg << 8;
10182 inst.instruction |= inst.operands[1].imm - 1;
10183 inst.instruction |= inst.operands[2].reg << 16;
10184 }
10185
10186 static void
10187 do_t_strex (void)
10188 {
10189 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10190 || inst.operands[2].postind || inst.operands[2].writeback
10191 || inst.operands[2].immisreg || inst.operands[2].shifted
10192 || inst.operands[2].negative,
10193 BAD_ADDR_MODE);
10194
10195 inst.instruction |= inst.operands[0].reg << 8;
10196 inst.instruction |= inst.operands[1].reg << 12;
10197 inst.instruction |= inst.operands[2].reg << 16;
10198 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10199 }
10200
10201 static void
10202 do_t_strexd (void)
10203 {
10204 if (!inst.operands[2].present)
10205 inst.operands[2].reg = inst.operands[1].reg + 1;
10206
10207 constraint (inst.operands[0].reg == inst.operands[1].reg
10208 || inst.operands[0].reg == inst.operands[2].reg
10209 || inst.operands[0].reg == inst.operands[3].reg
10210 || inst.operands[1].reg == inst.operands[2].reg,
10211 BAD_OVERLAP);
10212
10213 inst.instruction |= inst.operands[0].reg;
10214 inst.instruction |= inst.operands[1].reg << 12;
10215 inst.instruction |= inst.operands[2].reg << 8;
10216 inst.instruction |= inst.operands[3].reg << 16;
10217 }
10218
10219 static void
10220 do_t_sxtah (void)
10221 {
10222 inst.instruction |= inst.operands[0].reg << 8;
10223 inst.instruction |= inst.operands[1].reg << 16;
10224 inst.instruction |= inst.operands[2].reg;
10225 inst.instruction |= inst.operands[3].imm << 4;
10226 }
10227
10228 static void
10229 do_t_sxth (void)
10230 {
10231 if (inst.instruction <= 0xffff && inst.size_req != 4
10232 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10233 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10234 {
10235 inst.instruction = THUMB_OP16 (inst.instruction);
10236 inst.instruction |= inst.operands[0].reg;
10237 inst.instruction |= inst.operands[1].reg << 3;
10238 }
10239 else if (unified_syntax)
10240 {
10241 if (inst.instruction <= 0xffff)
10242 inst.instruction = THUMB_OP32 (inst.instruction);
10243 inst.instruction |= inst.operands[0].reg << 8;
10244 inst.instruction |= inst.operands[1].reg;
10245 inst.instruction |= inst.operands[2].imm << 4;
10246 }
10247 else
10248 {
10249 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10250 _("Thumb encoding does not support rotation"));
10251 constraint (1, BAD_HIREG);
10252 }
10253 }
10254
10255 static void
10256 do_t_swi (void)
10257 {
10258 inst.reloc.type = BFD_RELOC_ARM_SWI;
10259 }
10260
10261 static void
10262 do_t_tb (void)
10263 {
10264 int half;
10265
10266 half = (inst.instruction & 0x10) != 0;
10267 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10268 constraint (inst.operands[0].immisreg,
10269 _("instruction requires register index"));
10270 constraint (inst.operands[0].imm == 15,
10271 _("PC is not a valid index register"));
10272 constraint (!half && inst.operands[0].shifted,
10273 _("instruction does not allow shifted index"));
10274 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10275 }
10276
10277 static void
10278 do_t_usat (void)
10279 {
10280 inst.instruction |= inst.operands[0].reg << 8;
10281 inst.instruction |= inst.operands[1].imm;
10282 inst.instruction |= inst.operands[2].reg << 16;
10283
10284 if (inst.operands[3].present)
10285 {
10286 constraint (inst.reloc.exp.X_op != O_constant,
10287 _("expression too complex"));
10288 if (inst.reloc.exp.X_add_number != 0)
10289 {
10290 if (inst.operands[3].shift_kind == SHIFT_ASR)
10291 inst.instruction |= 0x00200000; /* sh bit */
10292
10293 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10294 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10295 }
10296 inst.reloc.type = BFD_RELOC_UNUSED;
10297 }
10298 }
10299
10300 static void
10301 do_t_usat16 (void)
10302 {
10303 inst.instruction |= inst.operands[0].reg << 8;
10304 inst.instruction |= inst.operands[1].imm;
10305 inst.instruction |= inst.operands[2].reg << 16;
10306 }
10307
10308 /* Neon instruction encoder helpers. */
10309
10310 /* Encodings for the different types for various Neon opcodes. */
10311
10312 /* An "invalid" code for the following tables. */
10313 #define N_INV -1u
10314
10315 struct neon_tab_entry
10316 {
10317 unsigned integer;
10318 unsigned float_or_poly;
10319 unsigned scalar_or_imm;
10320 };
10321
10322 /* Map overloaded Neon opcodes to their respective encodings. */
10323 #define NEON_ENC_TAB \
10324 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10325 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10326 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10327 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10328 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10329 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10330 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10331 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10332 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10333 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10334 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10335 /* Register variants of the following two instructions are encoded as
10336 vcge / vcgt with the operands reversed. */ \
10337 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10338 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10339 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10340 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10341 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10342 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10343 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10344 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10345 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10346 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10347 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10348 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10349 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10350 X(vshl, 0x0000400, N_INV, 0x0800510), \
10351 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10352 X(vand, 0x0000110, N_INV, 0x0800030), \
10353 X(vbic, 0x0100110, N_INV, 0x0800030), \
10354 X(veor, 0x1000110, N_INV, N_INV), \
10355 X(vorn, 0x0300110, N_INV, 0x0800010), \
10356 X(vorr, 0x0200110, N_INV, 0x0800010), \
10357 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10358 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10359 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10360 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10361 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10362 X(vst1, 0x0000000, 0x0800000, N_INV), \
10363 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10364 X(vst2, 0x0000100, 0x0800100, N_INV), \
10365 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10366 X(vst3, 0x0000200, 0x0800200, N_INV), \
10367 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10368 X(vst4, 0x0000300, 0x0800300, N_INV), \
10369 X(vmovn, 0x1b20200, N_INV, N_INV), \
10370 X(vtrn, 0x1b20080, N_INV, N_INV), \
10371 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10372 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10373 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10374 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10375 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10376 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10377 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10378 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10379 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10380
10381 enum neon_opc
10382 {
10383 #define X(OPC,I,F,S) N_MNEM_##OPC
10384 NEON_ENC_TAB
10385 #undef X
10386 };
10387
10388 static const struct neon_tab_entry neon_enc_tab[] =
10389 {
10390 #define X(OPC,I,F,S) { (I), (F), (S) }
10391 NEON_ENC_TAB
10392 #undef X
10393 };
10394
10395 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10396 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10397 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10398 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10399 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10400 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10401 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10402 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10403 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10404 #define NEON_ENC_SINGLE(X) \
10405 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10406 #define NEON_ENC_DOUBLE(X) \
10407 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10408
10409 /* Define shapes for instruction operands. The following mnemonic characters
10410 are used in this table:
10411
10412 F - VFP S<n> register
10413 D - Neon D<n> register
10414 Q - Neon Q<n> register
10415 I - Immediate
10416 S - Scalar
10417 R - ARM register
10418 L - D<n> register list
10419
10420 This table is used to generate various data:
10421 - enumerations of the form NS_DDR to be used as arguments to
10422 neon_select_shape.
10423 - a table classifying shapes into single, double, quad, mixed.
10424 - a table used to drive neon_select_shape.
10425 */
10426
10427 #define NEON_SHAPE_DEF \
10428 X(3, (D, D, D), DOUBLE), \
10429 X(3, (Q, Q, Q), QUAD), \
10430 X(3, (D, D, I), DOUBLE), \
10431 X(3, (Q, Q, I), QUAD), \
10432 X(3, (D, D, S), DOUBLE), \
10433 X(3, (Q, Q, S), QUAD), \
10434 X(2, (D, D), DOUBLE), \
10435 X(2, (Q, Q), QUAD), \
10436 X(2, (D, S), DOUBLE), \
10437 X(2, (Q, S), QUAD), \
10438 X(2, (D, R), DOUBLE), \
10439 X(2, (Q, R), QUAD), \
10440 X(2, (D, I), DOUBLE), \
10441 X(2, (Q, I), QUAD), \
10442 X(3, (D, L, D), DOUBLE), \
10443 X(2, (D, Q), MIXED), \
10444 X(2, (Q, D), MIXED), \
10445 X(3, (D, Q, I), MIXED), \
10446 X(3, (Q, D, I), MIXED), \
10447 X(3, (Q, D, D), MIXED), \
10448 X(3, (D, Q, Q), MIXED), \
10449 X(3, (Q, Q, D), MIXED), \
10450 X(3, (Q, D, S), MIXED), \
10451 X(3, (D, Q, S), MIXED), \
10452 X(4, (D, D, D, I), DOUBLE), \
10453 X(4, (Q, Q, Q, I), QUAD), \
10454 X(2, (F, F), SINGLE), \
10455 X(3, (F, F, F), SINGLE), \
10456 X(2, (F, I), SINGLE), \
10457 X(2, (F, D), MIXED), \
10458 X(2, (D, F), MIXED), \
10459 X(3, (F, F, I), MIXED), \
10460 X(4, (R, R, F, F), SINGLE), \
10461 X(4, (F, F, R, R), SINGLE), \
10462 X(3, (D, R, R), DOUBLE), \
10463 X(3, (R, R, D), DOUBLE), \
10464 X(2, (S, R), SINGLE), \
10465 X(2, (R, S), SINGLE), \
10466 X(2, (F, R), SINGLE), \
10467 X(2, (R, F), SINGLE)
10468
10469 #define S2(A,B) NS_##A##B
10470 #define S3(A,B,C) NS_##A##B##C
10471 #define S4(A,B,C,D) NS_##A##B##C##D
10472
10473 #define X(N, L, C) S##N L
10474
10475 enum neon_shape
10476 {
10477 NEON_SHAPE_DEF,
10478 NS_NULL
10479 };
10480
10481 #undef X
10482 #undef S2
10483 #undef S3
10484 #undef S4
10485
10486 enum neon_shape_class
10487 {
10488 SC_SINGLE,
10489 SC_DOUBLE,
10490 SC_QUAD,
10491 SC_MIXED
10492 };
10493
10494 #define X(N, L, C) SC_##C
10495
10496 static enum neon_shape_class neon_shape_class[] =
10497 {
10498 NEON_SHAPE_DEF
10499 };
10500
10501 #undef X
10502
10503 enum neon_shape_el
10504 {
10505 SE_F,
10506 SE_D,
10507 SE_Q,
10508 SE_I,
10509 SE_S,
10510 SE_R,
10511 SE_L
10512 };
10513
10514 /* Register widths of above. */
10515 static unsigned neon_shape_el_size[] =
10516 {
10517 32,
10518 64,
10519 128,
10520 0,
10521 32,
10522 32,
10523 0
10524 };
10525
10526 struct neon_shape_info
10527 {
10528 unsigned els;
10529 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10530 };
10531
10532 #define S2(A,B) { SE_##A, SE_##B }
10533 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10534 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10535
10536 #define X(N, L, C) { N, S##N L }
10537
10538 static struct neon_shape_info neon_shape_tab[] =
10539 {
10540 NEON_SHAPE_DEF
10541 };
10542
10543 #undef X
10544 #undef S2
10545 #undef S3
10546 #undef S4
10547
10548 /* Bit masks used in type checking given instructions.
10549 'N_EQK' means the type must be the same as (or based on in some way) the key
10550 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10551 set, various other bits can be set as well in order to modify the meaning of
10552 the type constraint. */
10553
10554 enum neon_type_mask
10555 {
10556 N_S8 = 0x000001,
10557 N_S16 = 0x000002,
10558 N_S32 = 0x000004,
10559 N_S64 = 0x000008,
10560 N_U8 = 0x000010,
10561 N_U16 = 0x000020,
10562 N_U32 = 0x000040,
10563 N_U64 = 0x000080,
10564 N_I8 = 0x000100,
10565 N_I16 = 0x000200,
10566 N_I32 = 0x000400,
10567 N_I64 = 0x000800,
10568 N_8 = 0x001000,
10569 N_16 = 0x002000,
10570 N_32 = 0x004000,
10571 N_64 = 0x008000,
10572 N_P8 = 0x010000,
10573 N_P16 = 0x020000,
10574 N_F32 = 0x040000,
10575 N_F64 = 0x080000,
10576 N_KEY = 0x100000, /* key element (main type specifier). */
10577 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10578 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10579 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10580 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10581 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10582 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10583 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10584 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10585 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10586 N_UTYP = 0,
10587 N_MAX_NONSPECIAL = N_F64
10588 };
10589
10590 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10591
10592 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10593 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10594 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10595 #define N_SUF_32 (N_SU_32 | N_F32)
10596 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10597 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10598
10599 /* Pass this as the first type argument to neon_check_type to ignore types
10600 altogether. */
10601 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10602
10603 /* Select a "shape" for the current instruction (describing register types or
10604 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10605 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10606 function of operand parsing, so this function doesn't need to be called.
10607 Shapes should be listed in order of decreasing length. */
10608
10609 static enum neon_shape
10610 neon_select_shape (enum neon_shape shape, ...)
10611 {
10612 va_list ap;
10613 enum neon_shape first_shape = shape;
10614
10615 /* Fix missing optional operands. FIXME: we don't know at this point how
10616 many arguments we should have, so this makes the assumption that we have
10617 > 1. This is true of all current Neon opcodes, I think, but may not be
10618 true in the future. */
10619 if (!inst.operands[1].present)
10620 inst.operands[1] = inst.operands[0];
10621
10622 va_start (ap, shape);
10623
10624 for (; shape != NS_NULL; shape = va_arg (ap, int))
10625 {
10626 unsigned j;
10627 int matches = 1;
10628
10629 for (j = 0; j < neon_shape_tab[shape].els; j++)
10630 {
10631 if (!inst.operands[j].present)
10632 {
10633 matches = 0;
10634 break;
10635 }
10636
10637 switch (neon_shape_tab[shape].el[j])
10638 {
10639 case SE_F:
10640 if (!(inst.operands[j].isreg
10641 && inst.operands[j].isvec
10642 && inst.operands[j].issingle
10643 && !inst.operands[j].isquad))
10644 matches = 0;
10645 break;
10646
10647 case SE_D:
10648 if (!(inst.operands[j].isreg
10649 && inst.operands[j].isvec
10650 && !inst.operands[j].isquad
10651 && !inst.operands[j].issingle))
10652 matches = 0;
10653 break;
10654
10655 case SE_R:
10656 if (!(inst.operands[j].isreg
10657 && !inst.operands[j].isvec))
10658 matches = 0;
10659 break;
10660
10661 case SE_Q:
10662 if (!(inst.operands[j].isreg
10663 && inst.operands[j].isvec
10664 && inst.operands[j].isquad
10665 && !inst.operands[j].issingle))
10666 matches = 0;
10667 break;
10668
10669 case SE_I:
10670 if (!(!inst.operands[j].isreg
10671 && !inst.operands[j].isscalar))
10672 matches = 0;
10673 break;
10674
10675 case SE_S:
10676 if (!(!inst.operands[j].isreg
10677 && inst.operands[j].isscalar))
10678 matches = 0;
10679 break;
10680
10681 case SE_L:
10682 break;
10683 }
10684 }
10685 if (matches)
10686 break;
10687 }
10688
10689 va_end (ap);
10690
10691 if (shape == NS_NULL && first_shape != NS_NULL)
10692 first_error (_("invalid instruction shape"));
10693
10694 return shape;
10695 }
10696
10697 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10698 means the Q bit should be set). */
10699
10700 static int
10701 neon_quad (enum neon_shape shape)
10702 {
10703 return neon_shape_class[shape] == SC_QUAD;
10704 }
10705
10706 static void
10707 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10708 unsigned *g_size)
10709 {
10710 /* Allow modification to be made to types which are constrained to be
10711 based on the key element, based on bits set alongside N_EQK. */
10712 if ((typebits & N_EQK) != 0)
10713 {
10714 if ((typebits & N_HLF) != 0)
10715 *g_size /= 2;
10716 else if ((typebits & N_DBL) != 0)
10717 *g_size *= 2;
10718 if ((typebits & N_SGN) != 0)
10719 *g_type = NT_signed;
10720 else if ((typebits & N_UNS) != 0)
10721 *g_type = NT_unsigned;
10722 else if ((typebits & N_INT) != 0)
10723 *g_type = NT_integer;
10724 else if ((typebits & N_FLT) != 0)
10725 *g_type = NT_float;
10726 else if ((typebits & N_SIZ) != 0)
10727 *g_type = NT_untyped;
10728 }
10729 }
10730
10731 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10732 operand type, i.e. the single type specified in a Neon instruction when it
10733 is the only one given. */
10734
10735 static struct neon_type_el
10736 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10737 {
10738 struct neon_type_el dest = *key;
10739
10740 assert ((thisarg & N_EQK) != 0);
10741
10742 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10743
10744 return dest;
10745 }
10746
10747 /* Convert Neon type and size into compact bitmask representation. */
10748
10749 static enum neon_type_mask
10750 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10751 {
10752 switch (type)
10753 {
10754 case NT_untyped:
10755 switch (size)
10756 {
10757 case 8: return N_8;
10758 case 16: return N_16;
10759 case 32: return N_32;
10760 case 64: return N_64;
10761 default: ;
10762 }
10763 break;
10764
10765 case NT_integer:
10766 switch (size)
10767 {
10768 case 8: return N_I8;
10769 case 16: return N_I16;
10770 case 32: return N_I32;
10771 case 64: return N_I64;
10772 default: ;
10773 }
10774 break;
10775
10776 case NT_float:
10777 switch (size)
10778 {
10779 case 32: return N_F32;
10780 case 64: return N_F64;
10781 default: ;
10782 }
10783 break;
10784
10785 case NT_poly:
10786 switch (size)
10787 {
10788 case 8: return N_P8;
10789 case 16: return N_P16;
10790 default: ;
10791 }
10792 break;
10793
10794 case NT_signed:
10795 switch (size)
10796 {
10797 case 8: return N_S8;
10798 case 16: return N_S16;
10799 case 32: return N_S32;
10800 case 64: return N_S64;
10801 default: ;
10802 }
10803 break;
10804
10805 case NT_unsigned:
10806 switch (size)
10807 {
10808 case 8: return N_U8;
10809 case 16: return N_U16;
10810 case 32: return N_U32;
10811 case 64: return N_U64;
10812 default: ;
10813 }
10814 break;
10815
10816 default: ;
10817 }
10818
10819 return N_UTYP;
10820 }
10821
10822 /* Convert compact Neon bitmask type representation to a type and size. Only
10823 handles the case where a single bit is set in the mask. */
10824
10825 static int
10826 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10827 enum neon_type_mask mask)
10828 {
10829 if ((mask & N_EQK) != 0)
10830 return FAIL;
10831
10832 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10833 *size = 8;
10834 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10835 *size = 16;
10836 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10837 *size = 32;
10838 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10839 *size = 64;
10840 else
10841 return FAIL;
10842
10843 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10844 *type = NT_signed;
10845 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10846 *type = NT_unsigned;
10847 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10848 *type = NT_integer;
10849 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10850 *type = NT_untyped;
10851 else if ((mask & (N_P8 | N_P16)) != 0)
10852 *type = NT_poly;
10853 else if ((mask & (N_F32 | N_F64)) != 0)
10854 *type = NT_float;
10855 else
10856 return FAIL;
10857
10858 return SUCCESS;
10859 }
10860
10861 /* Modify a bitmask of allowed types. This is only needed for type
10862 relaxation. */
10863
10864 static unsigned
10865 modify_types_allowed (unsigned allowed, unsigned mods)
10866 {
10867 unsigned size;
10868 enum neon_el_type type;
10869 unsigned destmask;
10870 int i;
10871
10872 destmask = 0;
10873
10874 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10875 {
10876 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10877 {
10878 neon_modify_type_size (mods, &type, &size);
10879 destmask |= type_chk_of_el_type (type, size);
10880 }
10881 }
10882
10883 return destmask;
10884 }
10885
10886 /* Check type and return type classification.
10887 The manual states (paraphrase): If one datatype is given, it indicates the
10888 type given in:
10889 - the second operand, if there is one
10890 - the operand, if there is no second operand
10891 - the result, if there are no operands.
10892 This isn't quite good enough though, so we use a concept of a "key" datatype
10893 which is set on a per-instruction basis, which is the one which matters when
10894 only one data type is written.
10895 Note: this function has side-effects (e.g. filling in missing operands). All
10896 Neon instructions should call it before performing bit encoding. */
10897
10898 static struct neon_type_el
10899 neon_check_type (unsigned els, enum neon_shape ns, ...)
10900 {
10901 va_list ap;
10902 unsigned i, pass, key_el = 0;
10903 unsigned types[NEON_MAX_TYPE_ELS];
10904 enum neon_el_type k_type = NT_invtype;
10905 unsigned k_size = -1u;
10906 struct neon_type_el badtype = {NT_invtype, -1};
10907 unsigned key_allowed = 0;
10908
10909 /* Optional registers in Neon instructions are always (not) in operand 1.
10910 Fill in the missing operand here, if it was omitted. */
10911 if (els > 1 && !inst.operands[1].present)
10912 inst.operands[1] = inst.operands[0];
10913
10914 /* Suck up all the varargs. */
10915 va_start (ap, ns);
10916 for (i = 0; i < els; i++)
10917 {
10918 unsigned thisarg = va_arg (ap, unsigned);
10919 if (thisarg == N_IGNORE_TYPE)
10920 {
10921 va_end (ap);
10922 return badtype;
10923 }
10924 types[i] = thisarg;
10925 if ((thisarg & N_KEY) != 0)
10926 key_el = i;
10927 }
10928 va_end (ap);
10929
10930 if (inst.vectype.elems > 0)
10931 for (i = 0; i < els; i++)
10932 if (inst.operands[i].vectype.type != NT_invtype)
10933 {
10934 first_error (_("types specified in both the mnemonic and operands"));
10935 return badtype;
10936 }
10937
10938 /* Duplicate inst.vectype elements here as necessary.
10939 FIXME: No idea if this is exactly the same as the ARM assembler,
10940 particularly when an insn takes one register and one non-register
10941 operand. */
10942 if (inst.vectype.elems == 1 && els > 1)
10943 {
10944 unsigned j;
10945 inst.vectype.elems = els;
10946 inst.vectype.el[key_el] = inst.vectype.el[0];
10947 for (j = 0; j < els; j++)
10948 if (j != key_el)
10949 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10950 types[j]);
10951 }
10952 else if (inst.vectype.elems == 0 && els > 0)
10953 {
10954 unsigned j;
10955 /* No types were given after the mnemonic, so look for types specified
10956 after each operand. We allow some flexibility here; as long as the
10957 "key" operand has a type, we can infer the others. */
10958 for (j = 0; j < els; j++)
10959 if (inst.operands[j].vectype.type != NT_invtype)
10960 inst.vectype.el[j] = inst.operands[j].vectype;
10961
10962 if (inst.operands[key_el].vectype.type != NT_invtype)
10963 {
10964 for (j = 0; j < els; j++)
10965 if (inst.operands[j].vectype.type == NT_invtype)
10966 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10967 types[j]);
10968 }
10969 else
10970 {
10971 first_error (_("operand types can't be inferred"));
10972 return badtype;
10973 }
10974 }
10975 else if (inst.vectype.elems != els)
10976 {
10977 first_error (_("type specifier has the wrong number of parts"));
10978 return badtype;
10979 }
10980
10981 for (pass = 0; pass < 2; pass++)
10982 {
10983 for (i = 0; i < els; i++)
10984 {
10985 unsigned thisarg = types[i];
10986 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10987 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10988 enum neon_el_type g_type = inst.vectype.el[i].type;
10989 unsigned g_size = inst.vectype.el[i].size;
10990
10991 /* Decay more-specific signed & unsigned types to sign-insensitive
10992 integer types if sign-specific variants are unavailable. */
10993 if ((g_type == NT_signed || g_type == NT_unsigned)
10994 && (types_allowed & N_SU_ALL) == 0)
10995 g_type = NT_integer;
10996
10997 /* If only untyped args are allowed, decay any more specific types to
10998 them. Some instructions only care about signs for some element
10999 sizes, so handle that properly. */
11000 if ((g_size == 8 && (types_allowed & N_8) != 0)
11001 || (g_size == 16 && (types_allowed & N_16) != 0)
11002 || (g_size == 32 && (types_allowed & N_32) != 0)
11003 || (g_size == 64 && (types_allowed & N_64) != 0))
11004 g_type = NT_untyped;
11005
11006 if (pass == 0)
11007 {
11008 if ((thisarg & N_KEY) != 0)
11009 {
11010 k_type = g_type;
11011 k_size = g_size;
11012 key_allowed = thisarg & ~N_KEY;
11013 }
11014 }
11015 else
11016 {
11017 if ((thisarg & N_VFP) != 0)
11018 {
11019 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
11020 unsigned regwidth = neon_shape_el_size[regshape], match;
11021
11022 /* In VFP mode, operands must match register widths. If we
11023 have a key operand, use its width, else use the width of
11024 the current operand. */
11025 if (k_size != -1u)
11026 match = k_size;
11027 else
11028 match = g_size;
11029
11030 if (regwidth != match)
11031 {
11032 first_error (_("operand size must match register width"));
11033 return badtype;
11034 }
11035 }
11036
11037 if ((thisarg & N_EQK) == 0)
11038 {
11039 unsigned given_type = type_chk_of_el_type (g_type, g_size);
11040
11041 if ((given_type & types_allowed) == 0)
11042 {
11043 first_error (_("bad type in Neon instruction"));
11044 return badtype;
11045 }
11046 }
11047 else
11048 {
11049 enum neon_el_type mod_k_type = k_type;
11050 unsigned mod_k_size = k_size;
11051 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
11052 if (g_type != mod_k_type || g_size != mod_k_size)
11053 {
11054 first_error (_("inconsistent types in Neon instruction"));
11055 return badtype;
11056 }
11057 }
11058 }
11059 }
11060 }
11061
11062 return inst.vectype.el[key_el];
11063 }
11064
11065 /* Neon-style VFP instruction forwarding. */
11066
11067 /* Thumb VFP instructions have 0xE in the condition field. */
11068
11069 static void
11070 do_vfp_cond_or_thumb (void)
11071 {
11072 if (thumb_mode)
11073 inst.instruction |= 0xe0000000;
11074 else
11075 inst.instruction |= inst.cond << 28;
11076 }
11077
11078 /* Look up and encode a simple mnemonic, for use as a helper function for the
11079 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11080 etc. It is assumed that operand parsing has already been done, and that the
11081 operands are in the form expected by the given opcode (this isn't necessarily
11082 the same as the form in which they were parsed, hence some massaging must
11083 take place before this function is called).
11084 Checks current arch version against that in the looked-up opcode. */
11085
11086 static void
11087 do_vfp_nsyn_opcode (const char *opname)
11088 {
11089 const struct asm_opcode *opcode;
11090
11091 opcode = hash_find (arm_ops_hsh, opname);
11092
11093 if (!opcode)
11094 abort ();
11095
11096 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
11097 thumb_mode ? *opcode->tvariant : *opcode->avariant),
11098 _(BAD_FPU));
11099
11100 if (thumb_mode)
11101 {
11102 inst.instruction = opcode->tvalue;
11103 opcode->tencode ();
11104 }
11105 else
11106 {
11107 inst.instruction = (inst.cond << 28) | opcode->avalue;
11108 opcode->aencode ();
11109 }
11110 }
11111
11112 static void
11113 do_vfp_nsyn_add_sub (enum neon_shape rs)
11114 {
11115 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
11116
11117 if (rs == NS_FFF)
11118 {
11119 if (is_add)
11120 do_vfp_nsyn_opcode ("fadds");
11121 else
11122 do_vfp_nsyn_opcode ("fsubs");
11123 }
11124 else
11125 {
11126 if (is_add)
11127 do_vfp_nsyn_opcode ("faddd");
11128 else
11129 do_vfp_nsyn_opcode ("fsubd");
11130 }
11131 }
11132
11133 /* Check operand types to see if this is a VFP instruction, and if so call
11134 PFN (). */
11135
11136 static int
11137 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11138 {
11139 enum neon_shape rs;
11140 struct neon_type_el et;
11141
11142 switch (args)
11143 {
11144 case 2:
11145 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11146 et = neon_check_type (2, rs,
11147 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11148 break;
11149
11150 case 3:
11151 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11152 et = neon_check_type (3, rs,
11153 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11154 break;
11155
11156 default:
11157 abort ();
11158 }
11159
11160 if (et.type != NT_invtype)
11161 {
11162 pfn (rs);
11163 return SUCCESS;
11164 }
11165 else
11166 inst.error = NULL;
11167
11168 return FAIL;
11169 }
11170
11171 static void
11172 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11173 {
11174 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11175
11176 if (rs == NS_FFF)
11177 {
11178 if (is_mla)
11179 do_vfp_nsyn_opcode ("fmacs");
11180 else
11181 do_vfp_nsyn_opcode ("fmscs");
11182 }
11183 else
11184 {
11185 if (is_mla)
11186 do_vfp_nsyn_opcode ("fmacd");
11187 else
11188 do_vfp_nsyn_opcode ("fmscd");
11189 }
11190 }
11191
11192 static void
11193 do_vfp_nsyn_mul (enum neon_shape rs)
11194 {
11195 if (rs == NS_FFF)
11196 do_vfp_nsyn_opcode ("fmuls");
11197 else
11198 do_vfp_nsyn_opcode ("fmuld");
11199 }
11200
11201 static void
11202 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11203 {
11204 int is_neg = (inst.instruction & 0x80) != 0;
11205 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11206
11207 if (rs == NS_FF)
11208 {
11209 if (is_neg)
11210 do_vfp_nsyn_opcode ("fnegs");
11211 else
11212 do_vfp_nsyn_opcode ("fabss");
11213 }
11214 else
11215 {
11216 if (is_neg)
11217 do_vfp_nsyn_opcode ("fnegd");
11218 else
11219 do_vfp_nsyn_opcode ("fabsd");
11220 }
11221 }
11222
11223 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11224 insns belong to Neon, and are handled elsewhere. */
11225
11226 static void
11227 do_vfp_nsyn_ldm_stm (int is_dbmode)
11228 {
11229 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11230 if (is_ldm)
11231 {
11232 if (is_dbmode)
11233 do_vfp_nsyn_opcode ("fldmdbs");
11234 else
11235 do_vfp_nsyn_opcode ("fldmias");
11236 }
11237 else
11238 {
11239 if (is_dbmode)
11240 do_vfp_nsyn_opcode ("fstmdbs");
11241 else
11242 do_vfp_nsyn_opcode ("fstmias");
11243 }
11244 }
11245
11246 static void
11247 do_vfp_nsyn_sqrt (void)
11248 {
11249 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11250 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11251
11252 if (rs == NS_FF)
11253 do_vfp_nsyn_opcode ("fsqrts");
11254 else
11255 do_vfp_nsyn_opcode ("fsqrtd");
11256 }
11257
11258 static void
11259 do_vfp_nsyn_div (void)
11260 {
11261 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11262 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11263 N_F32 | N_F64 | N_KEY | N_VFP);
11264
11265 if (rs == NS_FFF)
11266 do_vfp_nsyn_opcode ("fdivs");
11267 else
11268 do_vfp_nsyn_opcode ("fdivd");
11269 }
11270
11271 static void
11272 do_vfp_nsyn_nmul (void)
11273 {
11274 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11275 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11276 N_F32 | N_F64 | N_KEY | N_VFP);
11277
11278 if (rs == NS_FFF)
11279 {
11280 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11281 do_vfp_sp_dyadic ();
11282 }
11283 else
11284 {
11285 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11286 do_vfp_dp_rd_rn_rm ();
11287 }
11288 do_vfp_cond_or_thumb ();
11289 }
11290
11291 static void
11292 do_vfp_nsyn_cmp (void)
11293 {
11294 if (inst.operands[1].isreg)
11295 {
11296 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11297 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11298
11299 if (rs == NS_FF)
11300 {
11301 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11302 do_vfp_sp_monadic ();
11303 }
11304 else
11305 {
11306 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11307 do_vfp_dp_rd_rm ();
11308 }
11309 }
11310 else
11311 {
11312 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11313 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11314
11315 switch (inst.instruction & 0x0fffffff)
11316 {
11317 case N_MNEM_vcmp:
11318 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11319 break;
11320 case N_MNEM_vcmpe:
11321 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11322 break;
11323 default:
11324 abort ();
11325 }
11326
11327 if (rs == NS_FI)
11328 {
11329 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11330 do_vfp_sp_compare_z ();
11331 }
11332 else
11333 {
11334 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11335 do_vfp_dp_rd ();
11336 }
11337 }
11338 do_vfp_cond_or_thumb ();
11339 }
11340
11341 static void
11342 nsyn_insert_sp (void)
11343 {
11344 inst.operands[1] = inst.operands[0];
11345 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11346 inst.operands[0].reg = 13;
11347 inst.operands[0].isreg = 1;
11348 inst.operands[0].writeback = 1;
11349 inst.operands[0].present = 1;
11350 }
11351
11352 static void
11353 do_vfp_nsyn_push (void)
11354 {
11355 nsyn_insert_sp ();
11356 if (inst.operands[1].issingle)
11357 do_vfp_nsyn_opcode ("fstmdbs");
11358 else
11359 do_vfp_nsyn_opcode ("fstmdbd");
11360 }
11361
11362 static void
11363 do_vfp_nsyn_pop (void)
11364 {
11365 nsyn_insert_sp ();
11366 if (inst.operands[1].issingle)
11367 do_vfp_nsyn_opcode ("fldmias");
11368 else
11369 do_vfp_nsyn_opcode ("fldmiad");
11370 }
11371
11372 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11373 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11374
11375 static unsigned
11376 neon_dp_fixup (unsigned i)
11377 {
11378 if (thumb_mode)
11379 {
11380 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11381 if (i & (1 << 24))
11382 i |= 1 << 28;
11383
11384 i &= ~(1 << 24);
11385
11386 i |= 0xef000000;
11387 }
11388 else
11389 i |= 0xf2000000;
11390
11391 return i;
11392 }
11393
11394 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11395 (0, 1, 2, 3). */
11396
11397 static unsigned
11398 neon_logbits (unsigned x)
11399 {
11400 return ffs (x) - 4;
11401 }
11402
11403 #define LOW4(R) ((R) & 0xf)
11404 #define HI1(R) (((R) >> 4) & 1)
11405
11406 /* Encode insns with bit pattern:
11407
11408 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11409 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11410
11411 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11412 different meaning for some instruction. */
11413
11414 static void
11415 neon_three_same (int isquad, int ubit, int size)
11416 {
11417 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11418 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11419 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11420 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11421 inst.instruction |= LOW4 (inst.operands[2].reg);
11422 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11423 inst.instruction |= (isquad != 0) << 6;
11424 inst.instruction |= (ubit != 0) << 24;
11425 if (size != -1)
11426 inst.instruction |= neon_logbits (size) << 20;
11427
11428 inst.instruction = neon_dp_fixup (inst.instruction);
11429 }
11430
11431 /* Encode instructions of the form:
11432
11433 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11434 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11435
11436 Don't write size if SIZE == -1. */
11437
11438 static void
11439 neon_two_same (int qbit, int ubit, int size)
11440 {
11441 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11442 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11443 inst.instruction |= LOW4 (inst.operands[1].reg);
11444 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11445 inst.instruction |= (qbit != 0) << 6;
11446 inst.instruction |= (ubit != 0) << 24;
11447
11448 if (size != -1)
11449 inst.instruction |= neon_logbits (size) << 18;
11450
11451 inst.instruction = neon_dp_fixup (inst.instruction);
11452 }
11453
11454 /* Neon instruction encoders, in approximate order of appearance. */
11455
11456 static void
11457 do_neon_dyadic_i_su (void)
11458 {
11459 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11460 struct neon_type_el et = neon_check_type (3, rs,
11461 N_EQK, N_EQK, N_SU_32 | N_KEY);
11462 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11463 }
11464
11465 static void
11466 do_neon_dyadic_i64_su (void)
11467 {
11468 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11469 struct neon_type_el et = neon_check_type (3, rs,
11470 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11471 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11472 }
11473
11474 static void
11475 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11476 unsigned immbits)
11477 {
11478 unsigned size = et.size >> 3;
11479 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11481 inst.instruction |= LOW4 (inst.operands[1].reg);
11482 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11483 inst.instruction |= (isquad != 0) << 6;
11484 inst.instruction |= immbits << 16;
11485 inst.instruction |= (size >> 3) << 7;
11486 inst.instruction |= (size & 0x7) << 19;
11487 if (write_ubit)
11488 inst.instruction |= (uval != 0) << 24;
11489
11490 inst.instruction = neon_dp_fixup (inst.instruction);
11491 }
11492
11493 static void
11494 do_neon_shl_imm (void)
11495 {
11496 if (!inst.operands[2].isreg)
11497 {
11498 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11499 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11500 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11501 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11502 }
11503 else
11504 {
11505 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11506 struct neon_type_el et = neon_check_type (3, rs,
11507 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11508 unsigned int tmp;
11509
11510 /* VSHL/VQSHL 3-register variants have syntax such as:
11511 vshl.xx Dd, Dm, Dn
11512 whereas other 3-register operations encoded by neon_three_same have
11513 syntax like:
11514 vadd.xx Dd, Dn, Dm
11515 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11516 here. */
11517 tmp = inst.operands[2].reg;
11518 inst.operands[2].reg = inst.operands[1].reg;
11519 inst.operands[1].reg = tmp;
11520 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11521 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11522 }
11523 }
11524
11525 static void
11526 do_neon_qshl_imm (void)
11527 {
11528 if (!inst.operands[2].isreg)
11529 {
11530 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11531 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11532
11533 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11534 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11535 inst.operands[2].imm);
11536 }
11537 else
11538 {
11539 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11540 struct neon_type_el et = neon_check_type (3, rs,
11541 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11542 unsigned int tmp;
11543
11544 /* See note in do_neon_shl_imm. */
11545 tmp = inst.operands[2].reg;
11546 inst.operands[2].reg = inst.operands[1].reg;
11547 inst.operands[1].reg = tmp;
11548 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11549 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11550 }
11551 }
11552
11553 static void
11554 do_neon_rshl (void)
11555 {
11556 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11557 struct neon_type_el et = neon_check_type (3, rs,
11558 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11559 unsigned int tmp;
11560
11561 tmp = inst.operands[2].reg;
11562 inst.operands[2].reg = inst.operands[1].reg;
11563 inst.operands[1].reg = tmp;
11564 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11565 }
11566
11567 static int
11568 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11569 {
11570 /* Handle .I8 pseudo-instructions. */
11571 if (size == 8)
11572 {
11573 /* Unfortunately, this will make everything apart from zero out-of-range.
11574 FIXME is this the intended semantics? There doesn't seem much point in
11575 accepting .I8 if so. */
11576 immediate |= immediate << 8;
11577 size = 16;
11578 }
11579
11580 if (size >= 32)
11581 {
11582 if (immediate == (immediate & 0x000000ff))
11583 {
11584 *immbits = immediate;
11585 return 0x1;
11586 }
11587 else if (immediate == (immediate & 0x0000ff00))
11588 {
11589 *immbits = immediate >> 8;
11590 return 0x3;
11591 }
11592 else if (immediate == (immediate & 0x00ff0000))
11593 {
11594 *immbits = immediate >> 16;
11595 return 0x5;
11596 }
11597 else if (immediate == (immediate & 0xff000000))
11598 {
11599 *immbits = immediate >> 24;
11600 return 0x7;
11601 }
11602 if ((immediate & 0xffff) != (immediate >> 16))
11603 goto bad_immediate;
11604 immediate &= 0xffff;
11605 }
11606
11607 if (immediate == (immediate & 0x000000ff))
11608 {
11609 *immbits = immediate;
11610 return 0x9;
11611 }
11612 else if (immediate == (immediate & 0x0000ff00))
11613 {
11614 *immbits = immediate >> 8;
11615 return 0xb;
11616 }
11617
11618 bad_immediate:
11619 first_error (_("immediate value out of range"));
11620 return FAIL;
11621 }
11622
11623 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11624 A, B, C, D. */
11625
11626 static int
11627 neon_bits_same_in_bytes (unsigned imm)
11628 {
11629 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11630 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11631 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11632 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11633 }
11634
11635 /* For immediate of above form, return 0bABCD. */
11636
11637 static unsigned
11638 neon_squash_bits (unsigned imm)
11639 {
11640 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11641 | ((imm & 0x01000000) >> 21);
11642 }
11643
11644 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11645
11646 static unsigned
11647 neon_qfloat_bits (unsigned imm)
11648 {
11649 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11650 }
11651
11652 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11653 the instruction. *OP is passed as the initial value of the op field, and
11654 may be set to a different value depending on the constant (i.e.
11655 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11656 MVN). If the immediate looks like a repeated parttern then also
11657 try smaller element sizes. */
11658
11659 static int
11660 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
11661 unsigned *immbits, int *op, int size,
11662 enum neon_el_type type)
11663 {
11664 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
11665 float. */
11666 if (type == NT_float && !float_p)
11667 return FAIL;
11668
11669 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11670 {
11671 if (size != 32 || *op == 1)
11672 return FAIL;
11673 *immbits = neon_qfloat_bits (immlo);
11674 return 0xf;
11675 }
11676
11677 if (size == 64)
11678 {
11679 if (neon_bits_same_in_bytes (immhi)
11680 && neon_bits_same_in_bytes (immlo))
11681 {
11682 if (*op == 1)
11683 return FAIL;
11684 *immbits = (neon_squash_bits (immhi) << 4)
11685 | neon_squash_bits (immlo);
11686 *op = 1;
11687 return 0xe;
11688 }
11689
11690 if (immhi != immlo)
11691 return FAIL;
11692 }
11693
11694 if (size >= 32)
11695 {
11696 if (immlo == (immlo & 0x000000ff))
11697 {
11698 *immbits = immlo;
11699 return 0x0;
11700 }
11701 else if (immlo == (immlo & 0x0000ff00))
11702 {
11703 *immbits = immlo >> 8;
11704 return 0x2;
11705 }
11706 else if (immlo == (immlo & 0x00ff0000))
11707 {
11708 *immbits = immlo >> 16;
11709 return 0x4;
11710 }
11711 else if (immlo == (immlo & 0xff000000))
11712 {
11713 *immbits = immlo >> 24;
11714 return 0x6;
11715 }
11716 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11717 {
11718 *immbits = (immlo >> 8) & 0xff;
11719 return 0xc;
11720 }
11721 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11722 {
11723 *immbits = (immlo >> 16) & 0xff;
11724 return 0xd;
11725 }
11726
11727 if ((immlo & 0xffff) != (immlo >> 16))
11728 return FAIL;
11729 immlo &= 0xffff;
11730 }
11731
11732 if (size >= 16)
11733 {
11734 if (immlo == (immlo & 0x000000ff))
11735 {
11736 *immbits = immlo;
11737 return 0x8;
11738 }
11739 else if (immlo == (immlo & 0x0000ff00))
11740 {
11741 *immbits = immlo >> 8;
11742 return 0xa;
11743 }
11744
11745 if ((immlo & 0xff) != (immlo >> 8))
11746 return FAIL;
11747 immlo &= 0xff;
11748 }
11749
11750 if (immlo == (immlo & 0x000000ff))
11751 {
11752 /* Don't allow MVN with 8-bit immediate. */
11753 if (*op == 1)
11754 return FAIL;
11755 *immbits = immlo;
11756 return 0xe;
11757 }
11758
11759 return FAIL;
11760 }
11761
11762 /* Write immediate bits [7:0] to the following locations:
11763
11764 |28/24|23 19|18 16|15 4|3 0|
11765 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11766
11767 This function is used by VMOV/VMVN/VORR/VBIC. */
11768
11769 static void
11770 neon_write_immbits (unsigned immbits)
11771 {
11772 inst.instruction |= immbits & 0xf;
11773 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11774 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11775 }
11776
11777 /* Invert low-order SIZE bits of XHI:XLO. */
11778
11779 static void
11780 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11781 {
11782 unsigned immlo = xlo ? *xlo : 0;
11783 unsigned immhi = xhi ? *xhi : 0;
11784
11785 switch (size)
11786 {
11787 case 8:
11788 immlo = (~immlo) & 0xff;
11789 break;
11790
11791 case 16:
11792 immlo = (~immlo) & 0xffff;
11793 break;
11794
11795 case 64:
11796 immhi = (~immhi) & 0xffffffff;
11797 /* fall through. */
11798
11799 case 32:
11800 immlo = (~immlo) & 0xffffffff;
11801 break;
11802
11803 default:
11804 abort ();
11805 }
11806
11807 if (xlo)
11808 *xlo = immlo;
11809
11810 if (xhi)
11811 *xhi = immhi;
11812 }
11813
11814 static void
11815 do_neon_logic (void)
11816 {
11817 if (inst.operands[2].present && inst.operands[2].isreg)
11818 {
11819 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11820 neon_check_type (3, rs, N_IGNORE_TYPE);
11821 /* U bit and size field were set as part of the bitmask. */
11822 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11823 neon_three_same (neon_quad (rs), 0, -1);
11824 }
11825 else
11826 {
11827 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11828 struct neon_type_el et = neon_check_type (2, rs,
11829 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11830 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11831 unsigned immbits;
11832 int cmode;
11833
11834 if (et.type == NT_invtype)
11835 return;
11836
11837 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11838
11839 immbits = inst.operands[1].imm;
11840 if (et.size == 64)
11841 {
11842 /* .i64 is a pseudo-op, so the immediate must be a repeating
11843 pattern. */
11844 if (immbits != (inst.operands[1].regisimm ?
11845 inst.operands[1].reg : 0))
11846 {
11847 /* Set immbits to an invalid constant. */
11848 immbits = 0xdeadbeef;
11849 }
11850 }
11851
11852 switch (opcode)
11853 {
11854 case N_MNEM_vbic:
11855 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11856 break;
11857
11858 case N_MNEM_vorr:
11859 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11860 break;
11861
11862 case N_MNEM_vand:
11863 /* Pseudo-instruction for VBIC. */
11864 neon_invert_size (&immbits, 0, et.size);
11865 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11866 break;
11867
11868 case N_MNEM_vorn:
11869 /* Pseudo-instruction for VORR. */
11870 neon_invert_size (&immbits, 0, et.size);
11871 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11872 break;
11873
11874 default:
11875 abort ();
11876 }
11877
11878 if (cmode == FAIL)
11879 return;
11880
11881 inst.instruction |= neon_quad (rs) << 6;
11882 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11883 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11884 inst.instruction |= cmode << 8;
11885 neon_write_immbits (immbits);
11886
11887 inst.instruction = neon_dp_fixup (inst.instruction);
11888 }
11889 }
11890
11891 static void
11892 do_neon_bitfield (void)
11893 {
11894 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11895 neon_check_type (3, rs, N_IGNORE_TYPE);
11896 neon_three_same (neon_quad (rs), 0, -1);
11897 }
11898
11899 static void
11900 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11901 unsigned destbits)
11902 {
11903 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11904 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11905 types | N_KEY);
11906 if (et.type == NT_float)
11907 {
11908 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11909 neon_three_same (neon_quad (rs), 0, -1);
11910 }
11911 else
11912 {
11913 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11914 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11915 }
11916 }
11917
11918 static void
11919 do_neon_dyadic_if_su (void)
11920 {
11921 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11922 }
11923
11924 static void
11925 do_neon_dyadic_if_su_d (void)
11926 {
11927 /* This version only allow D registers, but that constraint is enforced during
11928 operand parsing so we don't need to do anything extra here. */
11929 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11930 }
11931
11932 static void
11933 do_neon_dyadic_if_i_d (void)
11934 {
11935 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11936 affected if we specify unsigned args. */
11937 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11938 }
11939
11940 enum vfp_or_neon_is_neon_bits
11941 {
11942 NEON_CHECK_CC = 1,
11943 NEON_CHECK_ARCH = 2
11944 };
11945
11946 /* Call this function if an instruction which may have belonged to the VFP or
11947 Neon instruction sets, but turned out to be a Neon instruction (due to the
11948 operand types involved, etc.). We have to check and/or fix-up a couple of
11949 things:
11950
11951 - Make sure the user hasn't attempted to make a Neon instruction
11952 conditional.
11953 - Alter the value in the condition code field if necessary.
11954 - Make sure that the arch supports Neon instructions.
11955
11956 Which of these operations take place depends on bits from enum
11957 vfp_or_neon_is_neon_bits.
11958
11959 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11960 current instruction's condition is COND_ALWAYS, the condition field is
11961 changed to inst.uncond_value. This is necessary because instructions shared
11962 between VFP and Neon may be conditional for the VFP variants only, and the
11963 unconditional Neon version must have, e.g., 0xF in the condition field. */
11964
11965 static int
11966 vfp_or_neon_is_neon (unsigned check)
11967 {
11968 /* Conditions are always legal in Thumb mode (IT blocks). */
11969 if (!thumb_mode && (check & NEON_CHECK_CC))
11970 {
11971 if (inst.cond != COND_ALWAYS)
11972 {
11973 first_error (_(BAD_COND));
11974 return FAIL;
11975 }
11976 if (inst.uncond_value != -1)
11977 inst.instruction |= inst.uncond_value << 28;
11978 }
11979
11980 if ((check & NEON_CHECK_ARCH)
11981 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11982 {
11983 first_error (_(BAD_FPU));
11984 return FAIL;
11985 }
11986
11987 return SUCCESS;
11988 }
11989
11990 static void
11991 do_neon_addsub_if_i (void)
11992 {
11993 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11994 return;
11995
11996 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11997 return;
11998
11999 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12000 affected if we specify unsigned args. */
12001 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
12002 }
12003
12004 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12005 result to be:
12006 V<op> A,B (A is operand 0, B is operand 2)
12007 to mean:
12008 V<op> A,B,A
12009 not:
12010 V<op> A,B,B
12011 so handle that case specially. */
12012
12013 static void
12014 neon_exchange_operands (void)
12015 {
12016 void *scratch = alloca (sizeof (inst.operands[0]));
12017 if (inst.operands[1].present)
12018 {
12019 /* Swap operands[1] and operands[2]. */
12020 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
12021 inst.operands[1] = inst.operands[2];
12022 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
12023 }
12024 else
12025 {
12026 inst.operands[1] = inst.operands[2];
12027 inst.operands[2] = inst.operands[0];
12028 }
12029 }
12030
12031 static void
12032 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
12033 {
12034 if (inst.operands[2].isreg)
12035 {
12036 if (invert)
12037 neon_exchange_operands ();
12038 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
12039 }
12040 else
12041 {
12042 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12043 struct neon_type_el et = neon_check_type (2, rs,
12044 N_EQK | N_SIZ, immtypes | N_KEY);
12045
12046 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12047 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12048 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12049 inst.instruction |= LOW4 (inst.operands[1].reg);
12050 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12051 inst.instruction |= neon_quad (rs) << 6;
12052 inst.instruction |= (et.type == NT_float) << 10;
12053 inst.instruction |= neon_logbits (et.size) << 18;
12054
12055 inst.instruction = neon_dp_fixup (inst.instruction);
12056 }
12057 }
12058
12059 static void
12060 do_neon_cmp (void)
12061 {
12062 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
12063 }
12064
12065 static void
12066 do_neon_cmp_inv (void)
12067 {
12068 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
12069 }
12070
12071 static void
12072 do_neon_ceq (void)
12073 {
12074 neon_compare (N_IF_32, N_IF_32, FALSE);
12075 }
12076
12077 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12078 scalars, which are encoded in 5 bits, M : Rm.
12079 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12080 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12081 index in M. */
12082
12083 static unsigned
12084 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
12085 {
12086 unsigned regno = NEON_SCALAR_REG (scalar);
12087 unsigned elno = NEON_SCALAR_INDEX (scalar);
12088
12089 switch (elsize)
12090 {
12091 case 16:
12092 if (regno > 7 || elno > 3)
12093 goto bad_scalar;
12094 return regno | (elno << 3);
12095
12096 case 32:
12097 if (regno > 15 || elno > 1)
12098 goto bad_scalar;
12099 return regno | (elno << 4);
12100
12101 default:
12102 bad_scalar:
12103 first_error (_("scalar out of range for multiply instruction"));
12104 }
12105
12106 return 0;
12107 }
12108
12109 /* Encode multiply / multiply-accumulate scalar instructions. */
12110
12111 static void
12112 neon_mul_mac (struct neon_type_el et, int ubit)
12113 {
12114 unsigned scalar;
12115
12116 /* Give a more helpful error message if we have an invalid type. */
12117 if (et.type == NT_invtype)
12118 return;
12119
12120 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
12121 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12122 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12123 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12124 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12125 inst.instruction |= LOW4 (scalar);
12126 inst.instruction |= HI1 (scalar) << 5;
12127 inst.instruction |= (et.type == NT_float) << 8;
12128 inst.instruction |= neon_logbits (et.size) << 20;
12129 inst.instruction |= (ubit != 0) << 24;
12130
12131 inst.instruction = neon_dp_fixup (inst.instruction);
12132 }
12133
12134 static void
12135 do_neon_mac_maybe_scalar (void)
12136 {
12137 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
12138 return;
12139
12140 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12141 return;
12142
12143 if (inst.operands[2].isscalar)
12144 {
12145 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12146 struct neon_type_el et = neon_check_type (3, rs,
12147 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
12148 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12149 neon_mul_mac (et, neon_quad (rs));
12150 }
12151 else
12152 {
12153 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12154 affected if we specify unsigned args. */
12155 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12156 }
12157 }
12158
12159 static void
12160 do_neon_tst (void)
12161 {
12162 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12163 struct neon_type_el et = neon_check_type (3, rs,
12164 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12165 neon_three_same (neon_quad (rs), 0, et.size);
12166 }
12167
12168 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12169 same types as the MAC equivalents. The polynomial type for this instruction
12170 is encoded the same as the integer type. */
12171
12172 static void
12173 do_neon_mul (void)
12174 {
12175 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12176 return;
12177
12178 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12179 return;
12180
12181 if (inst.operands[2].isscalar)
12182 do_neon_mac_maybe_scalar ();
12183 else
12184 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12185 }
12186
12187 static void
12188 do_neon_qdmulh (void)
12189 {
12190 if (inst.operands[2].isscalar)
12191 {
12192 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12193 struct neon_type_el et = neon_check_type (3, rs,
12194 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12195 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12196 neon_mul_mac (et, neon_quad (rs));
12197 }
12198 else
12199 {
12200 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12201 struct neon_type_el et = neon_check_type (3, rs,
12202 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12203 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12204 /* The U bit (rounding) comes from bit mask. */
12205 neon_three_same (neon_quad (rs), 0, et.size);
12206 }
12207 }
12208
12209 static void
12210 do_neon_fcmp_absolute (void)
12211 {
12212 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12213 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12214 /* Size field comes from bit mask. */
12215 neon_three_same (neon_quad (rs), 1, -1);
12216 }
12217
12218 static void
12219 do_neon_fcmp_absolute_inv (void)
12220 {
12221 neon_exchange_operands ();
12222 do_neon_fcmp_absolute ();
12223 }
12224
12225 static void
12226 do_neon_step (void)
12227 {
12228 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12229 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12230 neon_three_same (neon_quad (rs), 0, -1);
12231 }
12232
12233 static void
12234 do_neon_abs_neg (void)
12235 {
12236 enum neon_shape rs;
12237 struct neon_type_el et;
12238
12239 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12240 return;
12241
12242 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12243 return;
12244
12245 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12246 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12247
12248 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12249 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12250 inst.instruction |= LOW4 (inst.operands[1].reg);
12251 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12252 inst.instruction |= neon_quad (rs) << 6;
12253 inst.instruction |= (et.type == NT_float) << 10;
12254 inst.instruction |= neon_logbits (et.size) << 18;
12255
12256 inst.instruction = neon_dp_fixup (inst.instruction);
12257 }
12258
12259 static void
12260 do_neon_sli (void)
12261 {
12262 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12263 struct neon_type_el et = neon_check_type (2, rs,
12264 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12265 int imm = inst.operands[2].imm;
12266 constraint (imm < 0 || (unsigned)imm >= et.size,
12267 _("immediate out of range for insert"));
12268 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12269 }
12270
12271 static void
12272 do_neon_sri (void)
12273 {
12274 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12275 struct neon_type_el et = neon_check_type (2, rs,
12276 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12277 int imm = inst.operands[2].imm;
12278 constraint (imm < 1 || (unsigned)imm > et.size,
12279 _("immediate out of range for insert"));
12280 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12281 }
12282
12283 static void
12284 do_neon_qshlu_imm (void)
12285 {
12286 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12287 struct neon_type_el et = neon_check_type (2, rs,
12288 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12289 int imm = inst.operands[2].imm;
12290 constraint (imm < 0 || (unsigned)imm >= et.size,
12291 _("immediate out of range for shift"));
12292 /* Only encodes the 'U present' variant of the instruction.
12293 In this case, signed types have OP (bit 8) set to 0.
12294 Unsigned types have OP set to 1. */
12295 inst.instruction |= (et.type == NT_unsigned) << 8;
12296 /* The rest of the bits are the same as other immediate shifts. */
12297 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12298 }
12299
12300 static void
12301 do_neon_qmovn (void)
12302 {
12303 struct neon_type_el et = neon_check_type (2, NS_DQ,
12304 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12305 /* Saturating move where operands can be signed or unsigned, and the
12306 destination has the same signedness. */
12307 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12308 if (et.type == NT_unsigned)
12309 inst.instruction |= 0xc0;
12310 else
12311 inst.instruction |= 0x80;
12312 neon_two_same (0, 1, et.size / 2);
12313 }
12314
12315 static void
12316 do_neon_qmovun (void)
12317 {
12318 struct neon_type_el et = neon_check_type (2, NS_DQ,
12319 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12320 /* Saturating move with unsigned results. Operands must be signed. */
12321 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12322 neon_two_same (0, 1, et.size / 2);
12323 }
12324
12325 static void
12326 do_neon_rshift_sat_narrow (void)
12327 {
12328 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12329 or unsigned. If operands are unsigned, results must also be unsigned. */
12330 struct neon_type_el et = neon_check_type (2, NS_DQI,
12331 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12332 int imm = inst.operands[2].imm;
12333 /* This gets the bounds check, size encoding and immediate bits calculation
12334 right. */
12335 et.size /= 2;
12336
12337 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12338 VQMOVN.I<size> <Dd>, <Qm>. */
12339 if (imm == 0)
12340 {
12341 inst.operands[2].present = 0;
12342 inst.instruction = N_MNEM_vqmovn;
12343 do_neon_qmovn ();
12344 return;
12345 }
12346
12347 constraint (imm < 1 || (unsigned)imm > et.size,
12348 _("immediate out of range"));
12349 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12350 }
12351
12352 static void
12353 do_neon_rshift_sat_narrow_u (void)
12354 {
12355 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12356 or unsigned. If operands are unsigned, results must also be unsigned. */
12357 struct neon_type_el et = neon_check_type (2, NS_DQI,
12358 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12359 int imm = inst.operands[2].imm;
12360 /* This gets the bounds check, size encoding and immediate bits calculation
12361 right. */
12362 et.size /= 2;
12363
12364 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12365 VQMOVUN.I<size> <Dd>, <Qm>. */
12366 if (imm == 0)
12367 {
12368 inst.operands[2].present = 0;
12369 inst.instruction = N_MNEM_vqmovun;
12370 do_neon_qmovun ();
12371 return;
12372 }
12373
12374 constraint (imm < 1 || (unsigned)imm > et.size,
12375 _("immediate out of range"));
12376 /* FIXME: The manual is kind of unclear about what value U should have in
12377 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12378 must be 1. */
12379 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12380 }
12381
12382 static void
12383 do_neon_movn (void)
12384 {
12385 struct neon_type_el et = neon_check_type (2, NS_DQ,
12386 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12387 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12388 neon_two_same (0, 1, et.size / 2);
12389 }
12390
12391 static void
12392 do_neon_rshift_narrow (void)
12393 {
12394 struct neon_type_el et = neon_check_type (2, NS_DQI,
12395 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12396 int imm = inst.operands[2].imm;
12397 /* This gets the bounds check, size encoding and immediate bits calculation
12398 right. */
12399 et.size /= 2;
12400
12401 /* If immediate is zero then we are a pseudo-instruction for
12402 VMOVN.I<size> <Dd>, <Qm> */
12403 if (imm == 0)
12404 {
12405 inst.operands[2].present = 0;
12406 inst.instruction = N_MNEM_vmovn;
12407 do_neon_movn ();
12408 return;
12409 }
12410
12411 constraint (imm < 1 || (unsigned)imm > et.size,
12412 _("immediate out of range for narrowing operation"));
12413 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12414 }
12415
12416 static void
12417 do_neon_shll (void)
12418 {
12419 /* FIXME: Type checking when lengthening. */
12420 struct neon_type_el et = neon_check_type (2, NS_QDI,
12421 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12422 unsigned imm = inst.operands[2].imm;
12423
12424 if (imm == et.size)
12425 {
12426 /* Maximum shift variant. */
12427 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12428 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12429 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12430 inst.instruction |= LOW4 (inst.operands[1].reg);
12431 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12432 inst.instruction |= neon_logbits (et.size) << 18;
12433
12434 inst.instruction = neon_dp_fixup (inst.instruction);
12435 }
12436 else
12437 {
12438 /* A more-specific type check for non-max versions. */
12439 et = neon_check_type (2, NS_QDI,
12440 N_EQK | N_DBL, N_SU_32 | N_KEY);
12441 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12442 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12443 }
12444 }
12445
12446 /* Check the various types for the VCVT instruction, and return which version
12447 the current instruction is. */
12448
12449 static int
12450 neon_cvt_flavour (enum neon_shape rs)
12451 {
12452 #define CVT_VAR(C,X,Y) \
12453 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12454 if (et.type != NT_invtype) \
12455 { \
12456 inst.error = NULL; \
12457 return (C); \
12458 }
12459 struct neon_type_el et;
12460 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12461 || rs == NS_FF) ? N_VFP : 0;
12462 /* The instruction versions which take an immediate take one register
12463 argument, which is extended to the width of the full register. Thus the
12464 "source" and "destination" registers must have the same width. Hack that
12465 here by making the size equal to the key (wider, in this case) operand. */
12466 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12467
12468 CVT_VAR (0, N_S32, N_F32);
12469 CVT_VAR (1, N_U32, N_F32);
12470 CVT_VAR (2, N_F32, N_S32);
12471 CVT_VAR (3, N_F32, N_U32);
12472
12473 whole_reg = N_VFP;
12474
12475 /* VFP instructions. */
12476 CVT_VAR (4, N_F32, N_F64);
12477 CVT_VAR (5, N_F64, N_F32);
12478 CVT_VAR (6, N_S32, N_F64 | key);
12479 CVT_VAR (7, N_U32, N_F64 | key);
12480 CVT_VAR (8, N_F64 | key, N_S32);
12481 CVT_VAR (9, N_F64 | key, N_U32);
12482 /* VFP instructions with bitshift. */
12483 CVT_VAR (10, N_F32 | key, N_S16);
12484 CVT_VAR (11, N_F32 | key, N_U16);
12485 CVT_VAR (12, N_F64 | key, N_S16);
12486 CVT_VAR (13, N_F64 | key, N_U16);
12487 CVT_VAR (14, N_S16, N_F32 | key);
12488 CVT_VAR (15, N_U16, N_F32 | key);
12489 CVT_VAR (16, N_S16, N_F64 | key);
12490 CVT_VAR (17, N_U16, N_F64 | key);
12491
12492 return -1;
12493 #undef CVT_VAR
12494 }
12495
12496 /* Neon-syntax VFP conversions. */
12497
12498 static void
12499 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12500 {
12501 const char *opname = 0;
12502
12503 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12504 {
12505 /* Conversions with immediate bitshift. */
12506 const char *enc[] =
12507 {
12508 "ftosls",
12509 "ftouls",
12510 "fsltos",
12511 "fultos",
12512 NULL,
12513 NULL,
12514 "ftosld",
12515 "ftould",
12516 "fsltod",
12517 "fultod",
12518 "fshtos",
12519 "fuhtos",
12520 "fshtod",
12521 "fuhtod",
12522 "ftoshs",
12523 "ftouhs",
12524 "ftoshd",
12525 "ftouhd"
12526 };
12527
12528 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12529 {
12530 opname = enc[flavour];
12531 constraint (inst.operands[0].reg != inst.operands[1].reg,
12532 _("operands 0 and 1 must be the same register"));
12533 inst.operands[1] = inst.operands[2];
12534 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12535 }
12536 }
12537 else
12538 {
12539 /* Conversions without bitshift. */
12540 const char *enc[] =
12541 {
12542 "ftosis",
12543 "ftouis",
12544 "fsitos",
12545 "fuitos",
12546 "fcvtsd",
12547 "fcvtds",
12548 "ftosid",
12549 "ftouid",
12550 "fsitod",
12551 "fuitod"
12552 };
12553
12554 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12555 opname = enc[flavour];
12556 }
12557
12558 if (opname)
12559 do_vfp_nsyn_opcode (opname);
12560 }
12561
12562 static void
12563 do_vfp_nsyn_cvtz (void)
12564 {
12565 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12566 int flavour = neon_cvt_flavour (rs);
12567 const char *enc[] =
12568 {
12569 "ftosizs",
12570 "ftouizs",
12571 NULL,
12572 NULL,
12573 NULL,
12574 NULL,
12575 "ftosizd",
12576 "ftouizd"
12577 };
12578
12579 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12580 do_vfp_nsyn_opcode (enc[flavour]);
12581 }
12582
12583 static void
12584 do_neon_cvt (void)
12585 {
12586 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12587 NS_FD, NS_DF, NS_FF, NS_NULL);
12588 int flavour = neon_cvt_flavour (rs);
12589
12590 /* VFP rather than Neon conversions. */
12591 if (flavour >= 4)
12592 {
12593 do_vfp_nsyn_cvt (rs, flavour);
12594 return;
12595 }
12596
12597 switch (rs)
12598 {
12599 case NS_DDI:
12600 case NS_QQI:
12601 {
12602 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12603 return;
12604
12605 /* Fixed-point conversion with #0 immediate is encoded as an
12606 integer conversion. */
12607 if (inst.operands[2].present && inst.operands[2].imm == 0)
12608 goto int_encode;
12609 unsigned immbits = 32 - inst.operands[2].imm;
12610 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12611 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12612 if (flavour != -1)
12613 inst.instruction |= enctab[flavour];
12614 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12615 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12616 inst.instruction |= LOW4 (inst.operands[1].reg);
12617 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12618 inst.instruction |= neon_quad (rs) << 6;
12619 inst.instruction |= 1 << 21;
12620 inst.instruction |= immbits << 16;
12621
12622 inst.instruction = neon_dp_fixup (inst.instruction);
12623 }
12624 break;
12625
12626 case NS_DD:
12627 case NS_QQ:
12628 int_encode:
12629 {
12630 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12631
12632 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12633
12634 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12635 return;
12636
12637 if (flavour != -1)
12638 inst.instruction |= enctab[flavour];
12639
12640 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12641 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12642 inst.instruction |= LOW4 (inst.operands[1].reg);
12643 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12644 inst.instruction |= neon_quad (rs) << 6;
12645 inst.instruction |= 2 << 18;
12646
12647 inst.instruction = neon_dp_fixup (inst.instruction);
12648 }
12649 break;
12650
12651 default:
12652 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12653 do_vfp_nsyn_cvt (rs, flavour);
12654 }
12655 }
12656
12657 static void
12658 neon_move_immediate (void)
12659 {
12660 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12661 struct neon_type_el et = neon_check_type (2, rs,
12662 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12663 unsigned immlo, immhi = 0, immbits;
12664 int op, cmode, float_p;
12665
12666 constraint (et.type == NT_invtype,
12667 _("operand size must be specified for immediate VMOV"));
12668
12669 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12670 op = (inst.instruction & (1 << 5)) != 0;
12671
12672 immlo = inst.operands[1].imm;
12673 if (inst.operands[1].regisimm)
12674 immhi = inst.operands[1].reg;
12675
12676 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12677 _("immediate has bits set outside the operand size"));
12678
12679 float_p = inst.operands[1].immisfloat;
12680
12681 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
12682 et.size, et.type)) == FAIL)
12683 {
12684 /* Invert relevant bits only. */
12685 neon_invert_size (&immlo, &immhi, et.size);
12686 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12687 with one or the other; those cases are caught by
12688 neon_cmode_for_move_imm. */
12689 op = !op;
12690 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
12691 &op, et.size, et.type)) == FAIL)
12692 {
12693 first_error (_("immediate out of range"));
12694 return;
12695 }
12696 }
12697
12698 inst.instruction &= ~(1 << 5);
12699 inst.instruction |= op << 5;
12700
12701 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12702 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12703 inst.instruction |= neon_quad (rs) << 6;
12704 inst.instruction |= cmode << 8;
12705
12706 neon_write_immbits (immbits);
12707 }
12708
12709 static void
12710 do_neon_mvn (void)
12711 {
12712 if (inst.operands[1].isreg)
12713 {
12714 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12715
12716 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12717 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12718 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12719 inst.instruction |= LOW4 (inst.operands[1].reg);
12720 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12721 inst.instruction |= neon_quad (rs) << 6;
12722 }
12723 else
12724 {
12725 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12726 neon_move_immediate ();
12727 }
12728
12729 inst.instruction = neon_dp_fixup (inst.instruction);
12730 }
12731
12732 /* Encode instructions of form:
12733
12734 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12735 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12736
12737 */
12738
12739 static void
12740 neon_mixed_length (struct neon_type_el et, unsigned size)
12741 {
12742 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12743 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12744 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12745 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12746 inst.instruction |= LOW4 (inst.operands[2].reg);
12747 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12748 inst.instruction |= (et.type == NT_unsigned) << 24;
12749 inst.instruction |= neon_logbits (size) << 20;
12750
12751 inst.instruction = neon_dp_fixup (inst.instruction);
12752 }
12753
12754 static void
12755 do_neon_dyadic_long (void)
12756 {
12757 /* FIXME: Type checking for lengthening op. */
12758 struct neon_type_el et = neon_check_type (3, NS_QDD,
12759 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12760 neon_mixed_length (et, et.size);
12761 }
12762
12763 static void
12764 do_neon_abal (void)
12765 {
12766 struct neon_type_el et = neon_check_type (3, NS_QDD,
12767 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12768 neon_mixed_length (et, et.size);
12769 }
12770
12771 static void
12772 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12773 {
12774 if (inst.operands[2].isscalar)
12775 {
12776 struct neon_type_el et = neon_check_type (3, NS_QDS,
12777 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12778 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12779 neon_mul_mac (et, et.type == NT_unsigned);
12780 }
12781 else
12782 {
12783 struct neon_type_el et = neon_check_type (3, NS_QDD,
12784 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12785 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12786 neon_mixed_length (et, et.size);
12787 }
12788 }
12789
12790 static void
12791 do_neon_mac_maybe_scalar_long (void)
12792 {
12793 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12794 }
12795
12796 static void
12797 do_neon_dyadic_wide (void)
12798 {
12799 struct neon_type_el et = neon_check_type (3, NS_QQD,
12800 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12801 neon_mixed_length (et, et.size);
12802 }
12803
12804 static void
12805 do_neon_dyadic_narrow (void)
12806 {
12807 struct neon_type_el et = neon_check_type (3, NS_QDD,
12808 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12809 /* Operand sign is unimportant, and the U bit is part of the opcode,
12810 so force the operand type to integer. */
12811 et.type = NT_integer;
12812 neon_mixed_length (et, et.size / 2);
12813 }
12814
12815 static void
12816 do_neon_mul_sat_scalar_long (void)
12817 {
12818 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12819 }
12820
12821 static void
12822 do_neon_vmull (void)
12823 {
12824 if (inst.operands[2].isscalar)
12825 do_neon_mac_maybe_scalar_long ();
12826 else
12827 {
12828 struct neon_type_el et = neon_check_type (3, NS_QDD,
12829 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12830 if (et.type == NT_poly)
12831 inst.instruction = NEON_ENC_POLY (inst.instruction);
12832 else
12833 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12834 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12835 zero. Should be OK as-is. */
12836 neon_mixed_length (et, et.size);
12837 }
12838 }
12839
12840 static void
12841 do_neon_ext (void)
12842 {
12843 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12844 struct neon_type_el et = neon_check_type (3, rs,
12845 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12846 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12847 constraint (imm >= (neon_quad (rs) ? 16 : 8), _("shift out of range"));
12848 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12849 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12850 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12851 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12852 inst.instruction |= LOW4 (inst.operands[2].reg);
12853 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12854 inst.instruction |= neon_quad (rs) << 6;
12855 inst.instruction |= imm << 8;
12856
12857 inst.instruction = neon_dp_fixup (inst.instruction);
12858 }
12859
12860 static void
12861 do_neon_rev (void)
12862 {
12863 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12864 struct neon_type_el et = neon_check_type (2, rs,
12865 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12866 unsigned op = (inst.instruction >> 7) & 3;
12867 /* N (width of reversed regions) is encoded as part of the bitmask. We
12868 extract it here to check the elements to be reversed are smaller.
12869 Otherwise we'd get a reserved instruction. */
12870 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12871 assert (elsize != 0);
12872 constraint (et.size >= elsize,
12873 _("elements must be smaller than reversal region"));
12874 neon_two_same (neon_quad (rs), 1, et.size);
12875 }
12876
12877 static void
12878 do_neon_dup (void)
12879 {
12880 if (inst.operands[1].isscalar)
12881 {
12882 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12883 struct neon_type_el et = neon_check_type (2, rs,
12884 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12885 unsigned sizebits = et.size >> 3;
12886 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12887 int logsize = neon_logbits (et.size);
12888 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12889
12890 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12891 return;
12892
12893 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12894 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12895 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12896 inst.instruction |= LOW4 (dm);
12897 inst.instruction |= HI1 (dm) << 5;
12898 inst.instruction |= neon_quad (rs) << 6;
12899 inst.instruction |= x << 17;
12900 inst.instruction |= sizebits << 16;
12901
12902 inst.instruction = neon_dp_fixup (inst.instruction);
12903 }
12904 else
12905 {
12906 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12907 struct neon_type_el et = neon_check_type (2, rs,
12908 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12909 /* Duplicate ARM register to lanes of vector. */
12910 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12911 switch (et.size)
12912 {
12913 case 8: inst.instruction |= 0x400000; break;
12914 case 16: inst.instruction |= 0x000020; break;
12915 case 32: inst.instruction |= 0x000000; break;
12916 default: break;
12917 }
12918 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12919 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12920 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12921 inst.instruction |= neon_quad (rs) << 21;
12922 /* The encoding for this instruction is identical for the ARM and Thumb
12923 variants, except for the condition field. */
12924 do_vfp_cond_or_thumb ();
12925 }
12926 }
12927
12928 /* VMOV has particularly many variations. It can be one of:
12929 0. VMOV<c><q> <Qd>, <Qm>
12930 1. VMOV<c><q> <Dd>, <Dm>
12931 (Register operations, which are VORR with Rm = Rn.)
12932 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12933 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12934 (Immediate loads.)
12935 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12936 (ARM register to scalar.)
12937 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12938 (Two ARM registers to vector.)
12939 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12940 (Scalar to ARM register.)
12941 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12942 (Vector to two ARM registers.)
12943 8. VMOV.F32 <Sd>, <Sm>
12944 9. VMOV.F64 <Dd>, <Dm>
12945 (VFP register moves.)
12946 10. VMOV.F32 <Sd>, #imm
12947 11. VMOV.F64 <Dd>, #imm
12948 (VFP float immediate load.)
12949 12. VMOV <Rd>, <Sm>
12950 (VFP single to ARM reg.)
12951 13. VMOV <Sd>, <Rm>
12952 (ARM reg to VFP single.)
12953 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12954 (Two ARM regs to two VFP singles.)
12955 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12956 (Two VFP singles to two ARM regs.)
12957
12958 These cases can be disambiguated using neon_select_shape, except cases 1/9
12959 and 3/11 which depend on the operand type too.
12960
12961 All the encoded bits are hardcoded by this function.
12962
12963 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12964 Cases 5, 7 may be used with VFPv2 and above.
12965
12966 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12967 can specify a type where it doesn't make sense to, and is ignored).
12968 */
12969
12970 static void
12971 do_neon_mov (void)
12972 {
12973 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12974 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12975 NS_NULL);
12976 struct neon_type_el et;
12977 const char *ldconst = 0;
12978
12979 switch (rs)
12980 {
12981 case NS_DD: /* case 1/9. */
12982 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12983 /* It is not an error here if no type is given. */
12984 inst.error = NULL;
12985 if (et.type == NT_float && et.size == 64)
12986 {
12987 do_vfp_nsyn_opcode ("fcpyd");
12988 break;
12989 }
12990 /* fall through. */
12991
12992 case NS_QQ: /* case 0/1. */
12993 {
12994 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12995 return;
12996 /* The architecture manual I have doesn't explicitly state which
12997 value the U bit should have for register->register moves, but
12998 the equivalent VORR instruction has U = 0, so do that. */
12999 inst.instruction = 0x0200110;
13000 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13001 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13002 inst.instruction |= LOW4 (inst.operands[1].reg);
13003 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13004 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13005 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13006 inst.instruction |= neon_quad (rs) << 6;
13007
13008 inst.instruction = neon_dp_fixup (inst.instruction);
13009 }
13010 break;
13011
13012 case NS_DI: /* case 3/11. */
13013 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
13014 inst.error = NULL;
13015 if (et.type == NT_float && et.size == 64)
13016 {
13017 /* case 11 (fconstd). */
13018 ldconst = "fconstd";
13019 goto encode_fconstd;
13020 }
13021 /* fall through. */
13022
13023 case NS_QI: /* case 2/3. */
13024 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13025 return;
13026 inst.instruction = 0x0800010;
13027 neon_move_immediate ();
13028 inst.instruction = neon_dp_fixup (inst.instruction);
13029 break;
13030
13031 case NS_SR: /* case 4. */
13032 {
13033 unsigned bcdebits = 0;
13034 struct neon_type_el et = neon_check_type (2, NS_NULL,
13035 N_8 | N_16 | N_32 | N_KEY, N_EQK);
13036 int logsize = neon_logbits (et.size);
13037 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
13038 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
13039
13040 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13041 _(BAD_FPU));
13042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13043 && et.size != 32, _(BAD_FPU));
13044 constraint (et.type == NT_invtype, _("bad type for scalar"));
13045 constraint (x >= 64 / et.size, _("scalar index out of range"));
13046
13047 switch (et.size)
13048 {
13049 case 8: bcdebits = 0x8; break;
13050 case 16: bcdebits = 0x1; break;
13051 case 32: bcdebits = 0x0; break;
13052 default: ;
13053 }
13054
13055 bcdebits |= x << logsize;
13056
13057 inst.instruction = 0xe000b10;
13058 do_vfp_cond_or_thumb ();
13059 inst.instruction |= LOW4 (dn) << 16;
13060 inst.instruction |= HI1 (dn) << 7;
13061 inst.instruction |= inst.operands[1].reg << 12;
13062 inst.instruction |= (bcdebits & 3) << 5;
13063 inst.instruction |= (bcdebits >> 2) << 21;
13064 }
13065 break;
13066
13067 case NS_DRR: /* case 5 (fmdrr). */
13068 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13069 _(BAD_FPU));
13070
13071 inst.instruction = 0xc400b10;
13072 do_vfp_cond_or_thumb ();
13073 inst.instruction |= LOW4 (inst.operands[0].reg);
13074 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
13075 inst.instruction |= inst.operands[1].reg << 12;
13076 inst.instruction |= inst.operands[2].reg << 16;
13077 break;
13078
13079 case NS_RS: /* case 6. */
13080 {
13081 struct neon_type_el et = neon_check_type (2, NS_NULL,
13082 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
13083 unsigned logsize = neon_logbits (et.size);
13084 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
13085 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
13086 unsigned abcdebits = 0;
13087
13088 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
13089 _(BAD_FPU));
13090 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
13091 && et.size != 32, _(BAD_FPU));
13092 constraint (et.type == NT_invtype, _("bad type for scalar"));
13093 constraint (x >= 64 / et.size, _("scalar index out of range"));
13094
13095 switch (et.size)
13096 {
13097 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
13098 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
13099 case 32: abcdebits = 0x00; break;
13100 default: ;
13101 }
13102
13103 abcdebits |= x << logsize;
13104 inst.instruction = 0xe100b10;
13105 do_vfp_cond_or_thumb ();
13106 inst.instruction |= LOW4 (dn) << 16;
13107 inst.instruction |= HI1 (dn) << 7;
13108 inst.instruction |= inst.operands[0].reg << 12;
13109 inst.instruction |= (abcdebits & 3) << 5;
13110 inst.instruction |= (abcdebits >> 2) << 21;
13111 }
13112 break;
13113
13114 case NS_RRD: /* case 7 (fmrrd). */
13115 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
13116 _(BAD_FPU));
13117
13118 inst.instruction = 0xc500b10;
13119 do_vfp_cond_or_thumb ();
13120 inst.instruction |= inst.operands[0].reg << 12;
13121 inst.instruction |= inst.operands[1].reg << 16;
13122 inst.instruction |= LOW4 (inst.operands[2].reg);
13123 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13124 break;
13125
13126 case NS_FF: /* case 8 (fcpys). */
13127 do_vfp_nsyn_opcode ("fcpys");
13128 break;
13129
13130 case NS_FI: /* case 10 (fconsts). */
13131 ldconst = "fconsts";
13132 encode_fconstd:
13133 if (is_quarter_float (inst.operands[1].imm))
13134 {
13135 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
13136 do_vfp_nsyn_opcode (ldconst);
13137 }
13138 else
13139 first_error (_("immediate out of range"));
13140 break;
13141
13142 case NS_RF: /* case 12 (fmrs). */
13143 do_vfp_nsyn_opcode ("fmrs");
13144 break;
13145
13146 case NS_FR: /* case 13 (fmsr). */
13147 do_vfp_nsyn_opcode ("fmsr");
13148 break;
13149
13150 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13151 (one of which is a list), but we have parsed four. Do some fiddling to
13152 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13153 expect. */
13154 case NS_RRFF: /* case 14 (fmrrs). */
13155 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
13156 _("VFP registers must be adjacent"));
13157 inst.operands[2].imm = 2;
13158 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13159 do_vfp_nsyn_opcode ("fmrrs");
13160 break;
13161
13162 case NS_FFRR: /* case 15 (fmsrr). */
13163 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13164 _("VFP registers must be adjacent"));
13165 inst.operands[1] = inst.operands[2];
13166 inst.operands[2] = inst.operands[3];
13167 inst.operands[0].imm = 2;
13168 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13169 do_vfp_nsyn_opcode ("fmsrr");
13170 break;
13171
13172 default:
13173 abort ();
13174 }
13175 }
13176
13177 static void
13178 do_neon_rshift_round_imm (void)
13179 {
13180 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13181 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13182 int imm = inst.operands[2].imm;
13183
13184 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13185 if (imm == 0)
13186 {
13187 inst.operands[2].present = 0;
13188 do_neon_mov ();
13189 return;
13190 }
13191
13192 constraint (imm < 1 || (unsigned)imm > et.size,
13193 _("immediate out of range for shift"));
13194 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13195 et.size - imm);
13196 }
13197
13198 static void
13199 do_neon_movl (void)
13200 {
13201 struct neon_type_el et = neon_check_type (2, NS_QD,
13202 N_EQK | N_DBL, N_SU_32 | N_KEY);
13203 unsigned sizebits = et.size >> 3;
13204 inst.instruction |= sizebits << 19;
13205 neon_two_same (0, et.type == NT_unsigned, -1);
13206 }
13207
13208 static void
13209 do_neon_trn (void)
13210 {
13211 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13212 struct neon_type_el et = neon_check_type (2, rs,
13213 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13214 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13215 neon_two_same (neon_quad (rs), 1, et.size);
13216 }
13217
13218 static void
13219 do_neon_zip_uzp (void)
13220 {
13221 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13222 struct neon_type_el et = neon_check_type (2, rs,
13223 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13224 if (rs == NS_DD && et.size == 32)
13225 {
13226 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13227 inst.instruction = N_MNEM_vtrn;
13228 do_neon_trn ();
13229 return;
13230 }
13231 neon_two_same (neon_quad (rs), 1, et.size);
13232 }
13233
13234 static void
13235 do_neon_sat_abs_neg (void)
13236 {
13237 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13238 struct neon_type_el et = neon_check_type (2, rs,
13239 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13240 neon_two_same (neon_quad (rs), 1, et.size);
13241 }
13242
13243 static void
13244 do_neon_pair_long (void)
13245 {
13246 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13247 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13248 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13249 inst.instruction |= (et.type == NT_unsigned) << 7;
13250 neon_two_same (neon_quad (rs), 1, et.size);
13251 }
13252
13253 static void
13254 do_neon_recip_est (void)
13255 {
13256 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13257 struct neon_type_el et = neon_check_type (2, rs,
13258 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13259 inst.instruction |= (et.type == NT_float) << 8;
13260 neon_two_same (neon_quad (rs), 1, et.size);
13261 }
13262
13263 static void
13264 do_neon_cls (void)
13265 {
13266 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13267 struct neon_type_el et = neon_check_type (2, rs,
13268 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13269 neon_two_same (neon_quad (rs), 1, et.size);
13270 }
13271
13272 static void
13273 do_neon_clz (void)
13274 {
13275 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13276 struct neon_type_el et = neon_check_type (2, rs,
13277 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13278 neon_two_same (neon_quad (rs), 1, et.size);
13279 }
13280
13281 static void
13282 do_neon_cnt (void)
13283 {
13284 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13285 struct neon_type_el et = neon_check_type (2, rs,
13286 N_EQK | N_INT, N_8 | N_KEY);
13287 neon_two_same (neon_quad (rs), 1, et.size);
13288 }
13289
13290 static void
13291 do_neon_swp (void)
13292 {
13293 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13294 neon_two_same (neon_quad (rs), 1, -1);
13295 }
13296
13297 static void
13298 do_neon_tbl_tbx (void)
13299 {
13300 unsigned listlenbits;
13301 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13302
13303 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13304 {
13305 first_error (_("bad list length for table lookup"));
13306 return;
13307 }
13308
13309 listlenbits = inst.operands[1].imm - 1;
13310 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13311 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13312 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13313 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13314 inst.instruction |= LOW4 (inst.operands[2].reg);
13315 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13316 inst.instruction |= listlenbits << 8;
13317
13318 inst.instruction = neon_dp_fixup (inst.instruction);
13319 }
13320
13321 static void
13322 do_neon_ldm_stm (void)
13323 {
13324 /* P, U and L bits are part of bitmask. */
13325 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13326 unsigned offsetbits = inst.operands[1].imm * 2;
13327
13328 if (inst.operands[1].issingle)
13329 {
13330 do_vfp_nsyn_ldm_stm (is_dbmode);
13331 return;
13332 }
13333
13334 constraint (is_dbmode && !inst.operands[0].writeback,
13335 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13336
13337 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13338 _("register list must contain at least 1 and at most 16 "
13339 "registers"));
13340
13341 inst.instruction |= inst.operands[0].reg << 16;
13342 inst.instruction |= inst.operands[0].writeback << 21;
13343 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13344 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13345
13346 inst.instruction |= offsetbits;
13347
13348 do_vfp_cond_or_thumb ();
13349 }
13350
13351 static void
13352 do_neon_ldr_str (void)
13353 {
13354 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13355
13356 if (inst.operands[0].issingle)
13357 {
13358 if (is_ldr)
13359 do_vfp_nsyn_opcode ("flds");
13360 else
13361 do_vfp_nsyn_opcode ("fsts");
13362 }
13363 else
13364 {
13365 if (is_ldr)
13366 do_vfp_nsyn_opcode ("fldd");
13367 else
13368 do_vfp_nsyn_opcode ("fstd");
13369 }
13370 }
13371
13372 /* "interleave" version also handles non-interleaving register VLD1/VST1
13373 instructions. */
13374
13375 static void
13376 do_neon_ld_st_interleave (void)
13377 {
13378 struct neon_type_el et = neon_check_type (1, NS_NULL,
13379 N_8 | N_16 | N_32 | N_64);
13380 unsigned alignbits = 0;
13381 unsigned idx;
13382 /* The bits in this table go:
13383 0: register stride of one (0) or two (1)
13384 1,2: register list length, minus one (1, 2, 3, 4).
13385 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13386 We use -1 for invalid entries. */
13387 const int typetable[] =
13388 {
13389 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13390 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13391 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13392 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13393 };
13394 int typebits;
13395
13396 if (et.type == NT_invtype)
13397 return;
13398
13399 if (inst.operands[1].immisalign)
13400 switch (inst.operands[1].imm >> 8)
13401 {
13402 case 64: alignbits = 1; break;
13403 case 128:
13404 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13405 goto bad_alignment;
13406 alignbits = 2;
13407 break;
13408 case 256:
13409 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13410 goto bad_alignment;
13411 alignbits = 3;
13412 break;
13413 default:
13414 bad_alignment:
13415 first_error (_("bad alignment"));
13416 return;
13417 }
13418
13419 inst.instruction |= alignbits << 4;
13420 inst.instruction |= neon_logbits (et.size) << 6;
13421
13422 /* Bits [4:6] of the immediate in a list specifier encode register stride
13423 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13424 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13425 up the right value for "type" in a table based on this value and the given
13426 list style, then stick it back. */
13427 idx = ((inst.operands[0].imm >> 4) & 7)
13428 | (((inst.instruction >> 8) & 3) << 3);
13429
13430 typebits = typetable[idx];
13431
13432 constraint (typebits == -1, _("bad list type for instruction"));
13433
13434 inst.instruction &= ~0xf00;
13435 inst.instruction |= typebits << 8;
13436 }
13437
13438 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13439 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13440 otherwise. The variable arguments are a list of pairs of legal (size, align)
13441 values, terminated with -1. */
13442
13443 static int
13444 neon_alignment_bit (int size, int align, int *do_align, ...)
13445 {
13446 va_list ap;
13447 int result = FAIL, thissize, thisalign;
13448
13449 if (!inst.operands[1].immisalign)
13450 {
13451 *do_align = 0;
13452 return SUCCESS;
13453 }
13454
13455 va_start (ap, do_align);
13456
13457 do
13458 {
13459 thissize = va_arg (ap, int);
13460 if (thissize == -1)
13461 break;
13462 thisalign = va_arg (ap, int);
13463
13464 if (size == thissize && align == thisalign)
13465 result = SUCCESS;
13466 }
13467 while (result != SUCCESS);
13468
13469 va_end (ap);
13470
13471 if (result == SUCCESS)
13472 *do_align = 1;
13473 else
13474 first_error (_("unsupported alignment for instruction"));
13475
13476 return result;
13477 }
13478
13479 static void
13480 do_neon_ld_st_lane (void)
13481 {
13482 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13483 int align_good, do_align = 0;
13484 int logsize = neon_logbits (et.size);
13485 int align = inst.operands[1].imm >> 8;
13486 int n = (inst.instruction >> 8) & 3;
13487 int max_el = 64 / et.size;
13488
13489 if (et.type == NT_invtype)
13490 return;
13491
13492 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13493 _("bad list length"));
13494 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13495 _("scalar index out of range"));
13496 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13497 && et.size == 8,
13498 _("stride of 2 unavailable when element size is 8"));
13499
13500 switch (n)
13501 {
13502 case 0: /* VLD1 / VST1. */
13503 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13504 32, 32, -1);
13505 if (align_good == FAIL)
13506 return;
13507 if (do_align)
13508 {
13509 unsigned alignbits = 0;
13510 switch (et.size)
13511 {
13512 case 16: alignbits = 0x1; break;
13513 case 32: alignbits = 0x3; break;
13514 default: ;
13515 }
13516 inst.instruction |= alignbits << 4;
13517 }
13518 break;
13519
13520 case 1: /* VLD2 / VST2. */
13521 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13522 32, 64, -1);
13523 if (align_good == FAIL)
13524 return;
13525 if (do_align)
13526 inst.instruction |= 1 << 4;
13527 break;
13528
13529 case 2: /* VLD3 / VST3. */
13530 constraint (inst.operands[1].immisalign,
13531 _("can't use alignment with this instruction"));
13532 break;
13533
13534 case 3: /* VLD4 / VST4. */
13535 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13536 16, 64, 32, 64, 32, 128, -1);
13537 if (align_good == FAIL)
13538 return;
13539 if (do_align)
13540 {
13541 unsigned alignbits = 0;
13542 switch (et.size)
13543 {
13544 case 8: alignbits = 0x1; break;
13545 case 16: alignbits = 0x1; break;
13546 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13547 default: ;
13548 }
13549 inst.instruction |= alignbits << 4;
13550 }
13551 break;
13552
13553 default: ;
13554 }
13555
13556 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13557 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13558 inst.instruction |= 1 << (4 + logsize);
13559
13560 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13561 inst.instruction |= logsize << 10;
13562 }
13563
13564 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13565
13566 static void
13567 do_neon_ld_dup (void)
13568 {
13569 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13570 int align_good, do_align = 0;
13571
13572 if (et.type == NT_invtype)
13573 return;
13574
13575 switch ((inst.instruction >> 8) & 3)
13576 {
13577 case 0: /* VLD1. */
13578 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13579 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13580 &do_align, 16, 16, 32, 32, -1);
13581 if (align_good == FAIL)
13582 return;
13583 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13584 {
13585 case 1: break;
13586 case 2: inst.instruction |= 1 << 5; break;
13587 default: first_error (_("bad list length")); return;
13588 }
13589 inst.instruction |= neon_logbits (et.size) << 6;
13590 break;
13591
13592 case 1: /* VLD2. */
13593 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13594 &do_align, 8, 16, 16, 32, 32, 64, -1);
13595 if (align_good == FAIL)
13596 return;
13597 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13598 _("bad list length"));
13599 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13600 inst.instruction |= 1 << 5;
13601 inst.instruction |= neon_logbits (et.size) << 6;
13602 break;
13603
13604 case 2: /* VLD3. */
13605 constraint (inst.operands[1].immisalign,
13606 _("can't use alignment with this instruction"));
13607 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13608 _("bad list length"));
13609 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13610 inst.instruction |= 1 << 5;
13611 inst.instruction |= neon_logbits (et.size) << 6;
13612 break;
13613
13614 case 3: /* VLD4. */
13615 {
13616 int align = inst.operands[1].imm >> 8;
13617 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13618 16, 64, 32, 64, 32, 128, -1);
13619 if (align_good == FAIL)
13620 return;
13621 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13622 _("bad list length"));
13623 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13624 inst.instruction |= 1 << 5;
13625 if (et.size == 32 && align == 128)
13626 inst.instruction |= 0x3 << 6;
13627 else
13628 inst.instruction |= neon_logbits (et.size) << 6;
13629 }
13630 break;
13631
13632 default: ;
13633 }
13634
13635 inst.instruction |= do_align << 4;
13636 }
13637
13638 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13639 apart from bits [11:4]. */
13640
13641 static void
13642 do_neon_ldx_stx (void)
13643 {
13644 switch (NEON_LANE (inst.operands[0].imm))
13645 {
13646 case NEON_INTERLEAVE_LANES:
13647 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13648 do_neon_ld_st_interleave ();
13649 break;
13650
13651 case NEON_ALL_LANES:
13652 inst.instruction = NEON_ENC_DUP (inst.instruction);
13653 do_neon_ld_dup ();
13654 break;
13655
13656 default:
13657 inst.instruction = NEON_ENC_LANE (inst.instruction);
13658 do_neon_ld_st_lane ();
13659 }
13660
13661 /* L bit comes from bit mask. */
13662 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13663 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13664 inst.instruction |= inst.operands[1].reg << 16;
13665
13666 if (inst.operands[1].postind)
13667 {
13668 int postreg = inst.operands[1].imm & 0xf;
13669 constraint (!inst.operands[1].immisreg,
13670 _("post-index must be a register"));
13671 constraint (postreg == 0xd || postreg == 0xf,
13672 _("bad register for post-index"));
13673 inst.instruction |= postreg;
13674 }
13675 else if (inst.operands[1].writeback)
13676 {
13677 inst.instruction |= 0xd;
13678 }
13679 else
13680 inst.instruction |= 0xf;
13681
13682 if (thumb_mode)
13683 inst.instruction |= 0xf9000000;
13684 else
13685 inst.instruction |= 0xf4000000;
13686 }
13687
13688 \f
13689 /* Overall per-instruction processing. */
13690
13691 /* We need to be able to fix up arbitrary expressions in some statements.
13692 This is so that we can handle symbols that are an arbitrary distance from
13693 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13694 which returns part of an address in a form which will be valid for
13695 a data instruction. We do this by pushing the expression into a symbol
13696 in the expr_section, and creating a fix for that. */
13697
13698 static void
13699 fix_new_arm (fragS * frag,
13700 int where,
13701 short int size,
13702 expressionS * exp,
13703 int pc_rel,
13704 int reloc)
13705 {
13706 fixS * new_fix;
13707
13708 switch (exp->X_op)
13709 {
13710 case O_constant:
13711 case O_symbol:
13712 case O_add:
13713 case O_subtract:
13714 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13715 break;
13716
13717 default:
13718 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13719 pc_rel, reloc);
13720 break;
13721 }
13722
13723 /* Mark whether the fix is to a THUMB instruction, or an ARM
13724 instruction. */
13725 new_fix->tc_fix_data = thumb_mode;
13726 }
13727
13728 /* Create a frg for an instruction requiring relaxation. */
13729 static void
13730 output_relax_insn (void)
13731 {
13732 char * to;
13733 symbolS *sym;
13734 int offset;
13735
13736 /* The size of the instruction is unknown, so tie the debug info to the
13737 start of the instruction. */
13738 dwarf2_emit_insn (0);
13739
13740 switch (inst.reloc.exp.X_op)
13741 {
13742 case O_symbol:
13743 sym = inst.reloc.exp.X_add_symbol;
13744 offset = inst.reloc.exp.X_add_number;
13745 break;
13746 case O_constant:
13747 sym = NULL;
13748 offset = inst.reloc.exp.X_add_number;
13749 break;
13750 default:
13751 sym = make_expr_symbol (&inst.reloc.exp);
13752 offset = 0;
13753 break;
13754 }
13755 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13756 inst.relax, sym, offset, NULL/*offset, opcode*/);
13757 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13758 }
13759
13760 /* Write a 32-bit thumb instruction to buf. */
13761 static void
13762 put_thumb32_insn (char * buf, unsigned long insn)
13763 {
13764 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13765 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13766 }
13767
13768 static void
13769 output_inst (const char * str)
13770 {
13771 char * to = NULL;
13772
13773 if (inst.error)
13774 {
13775 as_bad ("%s -- `%s'", inst.error, str);
13776 return;
13777 }
13778 if (inst.relax) {
13779 output_relax_insn();
13780 return;
13781 }
13782 if (inst.size == 0)
13783 return;
13784
13785 to = frag_more (inst.size);
13786
13787 if (thumb_mode && (inst.size > THUMB_SIZE))
13788 {
13789 assert (inst.size == (2 * THUMB_SIZE));
13790 put_thumb32_insn (to, inst.instruction);
13791 }
13792 else if (inst.size > INSN_SIZE)
13793 {
13794 assert (inst.size == (2 * INSN_SIZE));
13795 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13796 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13797 }
13798 else
13799 md_number_to_chars (to, inst.instruction, inst.size);
13800
13801 if (inst.reloc.type != BFD_RELOC_UNUSED)
13802 fix_new_arm (frag_now, to - frag_now->fr_literal,
13803 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13804 inst.reloc.type);
13805
13806 dwarf2_emit_insn (inst.size);
13807 }
13808
13809 /* Tag values used in struct asm_opcode's tag field. */
13810 enum opcode_tag
13811 {
13812 OT_unconditional, /* Instruction cannot be conditionalized.
13813 The ARM condition field is still 0xE. */
13814 OT_unconditionalF, /* Instruction cannot be conditionalized
13815 and carries 0xF in its ARM condition field. */
13816 OT_csuffix, /* Instruction takes a conditional suffix. */
13817 OT_csuffixF, /* Some forms of the instruction take a conditional
13818 suffix, others place 0xF where the condition field
13819 would be. */
13820 OT_cinfix3, /* Instruction takes a conditional infix,
13821 beginning at character index 3. (In
13822 unified mode, it becomes a suffix.) */
13823 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
13824 tsts, cmps, cmns, and teqs. */
13825 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13826 character index 3, even in unified mode. Used for
13827 legacy instructions where suffix and infix forms
13828 may be ambiguous. */
13829 OT_csuf_or_in3, /* Instruction takes either a conditional
13830 suffix or an infix at character index 3. */
13831 OT_odd_infix_unc, /* This is the unconditional variant of an
13832 instruction that takes a conditional infix
13833 at an unusual position. In unified mode,
13834 this variant will accept a suffix. */
13835 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13836 are the conditional variants of instructions that
13837 take conditional infixes in unusual positions.
13838 The infix appears at character index
13839 (tag - OT_odd_infix_0). These are not accepted
13840 in unified mode. */
13841 };
13842
13843 /* Subroutine of md_assemble, responsible for looking up the primary
13844 opcode from the mnemonic the user wrote. STR points to the
13845 beginning of the mnemonic.
13846
13847 This is not simply a hash table lookup, because of conditional
13848 variants. Most instructions have conditional variants, which are
13849 expressed with a _conditional affix_ to the mnemonic. If we were
13850 to encode each conditional variant as a literal string in the opcode
13851 table, it would have approximately 20,000 entries.
13852
13853 Most mnemonics take this affix as a suffix, and in unified syntax,
13854 'most' is upgraded to 'all'. However, in the divided syntax, some
13855 instructions take the affix as an infix, notably the s-variants of
13856 the arithmetic instructions. Of those instructions, all but six
13857 have the infix appear after the third character of the mnemonic.
13858
13859 Accordingly, the algorithm for looking up primary opcodes given
13860 an identifier is:
13861
13862 1. Look up the identifier in the opcode table.
13863 If we find a match, go to step U.
13864
13865 2. Look up the last two characters of the identifier in the
13866 conditions table. If we find a match, look up the first N-2
13867 characters of the identifier in the opcode table. If we
13868 find a match, go to step CE.
13869
13870 3. Look up the fourth and fifth characters of the identifier in
13871 the conditions table. If we find a match, extract those
13872 characters from the identifier, and look up the remaining
13873 characters in the opcode table. If we find a match, go
13874 to step CM.
13875
13876 4. Fail.
13877
13878 U. Examine the tag field of the opcode structure, in case this is
13879 one of the six instructions with its conditional infix in an
13880 unusual place. If it is, the tag tells us where to find the
13881 infix; look it up in the conditions table and set inst.cond
13882 accordingly. Otherwise, this is an unconditional instruction.
13883 Again set inst.cond accordingly. Return the opcode structure.
13884
13885 CE. Examine the tag field to make sure this is an instruction that
13886 should receive a conditional suffix. If it is not, fail.
13887 Otherwise, set inst.cond from the suffix we already looked up,
13888 and return the opcode structure.
13889
13890 CM. Examine the tag field to make sure this is an instruction that
13891 should receive a conditional infix after the third character.
13892 If it is not, fail. Otherwise, undo the edits to the current
13893 line of input and proceed as for case CE. */
13894
13895 static const struct asm_opcode *
13896 opcode_lookup (char **str)
13897 {
13898 char *end, *base;
13899 char *affix;
13900 const struct asm_opcode *opcode;
13901 const struct asm_cond *cond;
13902 char save[2];
13903 bfd_boolean neon_supported;
13904
13905 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13906
13907 /* Scan up to the end of the mnemonic, which must end in white space,
13908 '.' (in unified mode, or for Neon instructions), or end of string. */
13909 for (base = end = *str; *end != '\0'; end++)
13910 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13911 break;
13912
13913 if (end == base)
13914 return 0;
13915
13916 /* Handle a possible width suffix and/or Neon type suffix. */
13917 if (end[0] == '.')
13918 {
13919 int offset = 2;
13920
13921 /* The .w and .n suffixes are only valid if the unified syntax is in
13922 use. */
13923 if (unified_syntax && end[1] == 'w')
13924 inst.size_req = 4;
13925 else if (unified_syntax && end[1] == 'n')
13926 inst.size_req = 2;
13927 else
13928 offset = 0;
13929
13930 inst.vectype.elems = 0;
13931
13932 *str = end + offset;
13933
13934 if (end[offset] == '.')
13935 {
13936 /* See if we have a Neon type suffix (possible in either unified or
13937 non-unified ARM syntax mode). */
13938 if (parse_neon_type (&inst.vectype, str) == FAIL)
13939 return 0;
13940 }
13941 else if (end[offset] != '\0' && end[offset] != ' ')
13942 return 0;
13943 }
13944 else
13945 *str = end;
13946
13947 /* Look for unaffixed or special-case affixed mnemonic. */
13948 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13949 if (opcode)
13950 {
13951 /* step U */
13952 if (opcode->tag < OT_odd_infix_0)
13953 {
13954 inst.cond = COND_ALWAYS;
13955 return opcode;
13956 }
13957
13958 if (unified_syntax)
13959 as_warn (_("conditional infixes are deprecated in unified syntax"));
13960 affix = base + (opcode->tag - OT_odd_infix_0);
13961 cond = hash_find_n (arm_cond_hsh, affix, 2);
13962 assert (cond);
13963
13964 inst.cond = cond->value;
13965 return opcode;
13966 }
13967
13968 /* Cannot have a conditional suffix on a mnemonic of less than two
13969 characters. */
13970 if (end - base < 3)
13971 return 0;
13972
13973 /* Look for suffixed mnemonic. */
13974 affix = end - 2;
13975 cond = hash_find_n (arm_cond_hsh, affix, 2);
13976 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13977 if (opcode && cond)
13978 {
13979 /* step CE */
13980 switch (opcode->tag)
13981 {
13982 case OT_cinfix3_legacy:
13983 /* Ignore conditional suffixes matched on infix only mnemonics. */
13984 break;
13985
13986 case OT_cinfix3:
13987 case OT_cinfix3_deprecated:
13988 case OT_odd_infix_unc:
13989 if (!unified_syntax)
13990 return 0;
13991 /* else fall through */
13992
13993 case OT_csuffix:
13994 case OT_csuffixF:
13995 case OT_csuf_or_in3:
13996 inst.cond = cond->value;
13997 return opcode;
13998
13999 case OT_unconditional:
14000 case OT_unconditionalF:
14001 if (thumb_mode)
14002 {
14003 inst.cond = cond->value;
14004 }
14005 else
14006 {
14007 /* delayed diagnostic */
14008 inst.error = BAD_COND;
14009 inst.cond = COND_ALWAYS;
14010 }
14011 return opcode;
14012
14013 default:
14014 return 0;
14015 }
14016 }
14017
14018 /* Cannot have a usual-position infix on a mnemonic of less than
14019 six characters (five would be a suffix). */
14020 if (end - base < 6)
14021 return 0;
14022
14023 /* Look for infixed mnemonic in the usual position. */
14024 affix = base + 3;
14025 cond = hash_find_n (arm_cond_hsh, affix, 2);
14026 if (!cond)
14027 return 0;
14028
14029 memcpy (save, affix, 2);
14030 memmove (affix, affix + 2, (end - affix) - 2);
14031 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
14032 memmove (affix + 2, affix, (end - affix) - 2);
14033 memcpy (affix, save, 2);
14034
14035 if (opcode
14036 && (opcode->tag == OT_cinfix3
14037 || opcode->tag == OT_cinfix3_deprecated
14038 || opcode->tag == OT_csuf_or_in3
14039 || opcode->tag == OT_cinfix3_legacy))
14040 {
14041 /* step CM */
14042 if (unified_syntax
14043 && (opcode->tag == OT_cinfix3
14044 || opcode->tag == OT_cinfix3_deprecated))
14045 as_warn (_("conditional infixes are deprecated in unified syntax"));
14046
14047 inst.cond = cond->value;
14048 return opcode;
14049 }
14050
14051 return 0;
14052 }
14053
14054 void
14055 md_assemble (char *str)
14056 {
14057 char *p = str;
14058 const struct asm_opcode * opcode;
14059
14060 /* Align the previous label if needed. */
14061 if (last_label_seen != NULL)
14062 {
14063 symbol_set_frag (last_label_seen, frag_now);
14064 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
14065 S_SET_SEGMENT (last_label_seen, now_seg);
14066 }
14067
14068 memset (&inst, '\0', sizeof (inst));
14069 inst.reloc.type = BFD_RELOC_UNUSED;
14070
14071 opcode = opcode_lookup (&p);
14072 if (!opcode)
14073 {
14074 /* It wasn't an instruction, but it might be a register alias of
14075 the form alias .req reg, or a Neon .dn/.qn directive. */
14076 if (!create_register_alias (str, p)
14077 && !create_neon_reg_alias (str, p))
14078 as_bad (_("bad instruction `%s'"), str);
14079
14080 return;
14081 }
14082
14083 if (opcode->tag == OT_cinfix3_deprecated)
14084 as_warn (_("s suffix on comparison instruction is deprecated"));
14085
14086 /* The value which unconditional instructions should have in place of the
14087 condition field. */
14088 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
14089
14090 if (thumb_mode)
14091 {
14092 arm_feature_set variant;
14093
14094 variant = cpu_variant;
14095 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14096 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
14097 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
14098 /* Check that this instruction is supported for this CPU. */
14099 if (!opcode->tvariant
14100 || (thumb_mode == 1
14101 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
14102 {
14103 as_bad (_("selected processor does not support `%s'"), str);
14104 return;
14105 }
14106 if (inst.cond != COND_ALWAYS && !unified_syntax
14107 && opcode->tencode != do_t_branch)
14108 {
14109 as_bad (_("Thumb does not support conditional execution"));
14110 return;
14111 }
14112
14113 /* Check conditional suffixes. */
14114 if (current_it_mask)
14115 {
14116 int cond;
14117 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
14118 current_it_mask <<= 1;
14119 current_it_mask &= 0x1f;
14120 /* The BKPT instruction is unconditional even in an IT block. */
14121 if (!inst.error
14122 && cond != inst.cond && opcode->tencode != do_t_bkpt)
14123 {
14124 as_bad (_("incorrect condition in IT block"));
14125 return;
14126 }
14127 }
14128 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
14129 {
14130 as_bad (_("thumb conditional instrunction not in IT block"));
14131 return;
14132 }
14133
14134 mapping_state (MAP_THUMB);
14135 inst.instruction = opcode->tvalue;
14136
14137 if (!parse_operands (p, opcode->operands))
14138 opcode->tencode ();
14139
14140 /* Clear current_it_mask at the end of an IT block. */
14141 if (current_it_mask == 0x10)
14142 current_it_mask = 0;
14143
14144 if (!(inst.error || inst.relax))
14145 {
14146 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
14147 inst.size = (inst.instruction > 0xffff ? 4 : 2);
14148 if (inst.size_req && inst.size_req != inst.size)
14149 {
14150 as_bad (_("cannot honor width suffix -- `%s'"), str);
14151 return;
14152 }
14153 }
14154 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14155 *opcode->tvariant);
14156 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14157 set those bits when Thumb-2 32-bit instructions are seen. ie.
14158 anything other than bl/blx.
14159 This is overly pessimistic for relaxable instructions. */
14160 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
14161 || inst.relax)
14162 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
14163 arm_ext_v6t2);
14164 }
14165 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
14166 {
14167 /* Check that this instruction is supported for this CPU. */
14168 if (!opcode->avariant ||
14169 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14170 {
14171 as_bad (_("selected processor does not support `%s'"), str);
14172 return;
14173 }
14174 if (inst.size_req)
14175 {
14176 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14177 return;
14178 }
14179
14180 mapping_state (MAP_ARM);
14181 inst.instruction = opcode->avalue;
14182 if (opcode->tag == OT_unconditionalF)
14183 inst.instruction |= 0xF << 28;
14184 else
14185 inst.instruction |= inst.cond << 28;
14186 inst.size = INSN_SIZE;
14187 if (!parse_operands (p, opcode->operands))
14188 opcode->aencode ();
14189 /* Arm mode bx is marked as both v4T and v5 because it's still required
14190 on a hypothetical non-thumb v5 core. */
14191 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14192 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14193 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14194 else
14195 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14196 *opcode->avariant);
14197 }
14198 else
14199 {
14200 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14201 "-- `%s'"), str);
14202 return;
14203 }
14204 output_inst (str);
14205 }
14206
14207 /* Various frobbings of labels and their addresses. */
14208
14209 void
14210 arm_start_line_hook (void)
14211 {
14212 last_label_seen = NULL;
14213 }
14214
14215 void
14216 arm_frob_label (symbolS * sym)
14217 {
14218 last_label_seen = sym;
14219
14220 ARM_SET_THUMB (sym, thumb_mode);
14221
14222 #if defined OBJ_COFF || defined OBJ_ELF
14223 ARM_SET_INTERWORK (sym, support_interwork);
14224 #endif
14225
14226 /* Note - do not allow local symbols (.Lxxx) to be labeled
14227 as Thumb functions. This is because these labels, whilst
14228 they exist inside Thumb code, are not the entry points for
14229 possible ARM->Thumb calls. Also, these labels can be used
14230 as part of a computed goto or switch statement. eg gcc
14231 can generate code that looks like this:
14232
14233 ldr r2, [pc, .Laaa]
14234 lsl r3, r3, #2
14235 ldr r2, [r3, r2]
14236 mov pc, r2
14237
14238 .Lbbb: .word .Lxxx
14239 .Lccc: .word .Lyyy
14240 ..etc...
14241 .Laaa: .word Lbbb
14242
14243 The first instruction loads the address of the jump table.
14244 The second instruction converts a table index into a byte offset.
14245 The third instruction gets the jump address out of the table.
14246 The fourth instruction performs the jump.
14247
14248 If the address stored at .Laaa is that of a symbol which has the
14249 Thumb_Func bit set, then the linker will arrange for this address
14250 to have the bottom bit set, which in turn would mean that the
14251 address computation performed by the third instruction would end
14252 up with the bottom bit set. Since the ARM is capable of unaligned
14253 word loads, the instruction would then load the incorrect address
14254 out of the jump table, and chaos would ensue. */
14255 if (label_is_thumb_function_name
14256 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14257 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14258 {
14259 /* When the address of a Thumb function is taken the bottom
14260 bit of that address should be set. This will allow
14261 interworking between Arm and Thumb functions to work
14262 correctly. */
14263
14264 THUMB_SET_FUNC (sym, 1);
14265
14266 label_is_thumb_function_name = FALSE;
14267 }
14268
14269 dwarf2_emit_label (sym);
14270 }
14271
14272 int
14273 arm_data_in_code (void)
14274 {
14275 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14276 {
14277 *input_line_pointer = '/';
14278 input_line_pointer += 5;
14279 *input_line_pointer = 0;
14280 return 1;
14281 }
14282
14283 return 0;
14284 }
14285
14286 char *
14287 arm_canonicalize_symbol_name (char * name)
14288 {
14289 int len;
14290
14291 if (thumb_mode && (len = strlen (name)) > 5
14292 && streq (name + len - 5, "/data"))
14293 *(name + len - 5) = 0;
14294
14295 return name;
14296 }
14297 \f
14298 /* Table of all register names defined by default. The user can
14299 define additional names with .req. Note that all register names
14300 should appear in both upper and lowercase variants. Some registers
14301 also have mixed-case names. */
14302
14303 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14304 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14305 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14306 #define REGSET(p,t) \
14307 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14308 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14309 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14310 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14311 #define REGSETH(p,t) \
14312 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14313 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14314 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14315 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14316 #define REGSET2(p,t) \
14317 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14318 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14319 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14320 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14321
14322 static const struct reg_entry reg_names[] =
14323 {
14324 /* ARM integer registers. */
14325 REGSET(r, RN), REGSET(R, RN),
14326
14327 /* ATPCS synonyms. */
14328 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14329 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14330 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14331
14332 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14333 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14334 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14335
14336 /* Well-known aliases. */
14337 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14338 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14339
14340 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14341 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14342
14343 /* Coprocessor numbers. */
14344 REGSET(p, CP), REGSET(P, CP),
14345
14346 /* Coprocessor register numbers. The "cr" variants are for backward
14347 compatibility. */
14348 REGSET(c, CN), REGSET(C, CN),
14349 REGSET(cr, CN), REGSET(CR, CN),
14350
14351 /* FPA registers. */
14352 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14353 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14354
14355 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14356 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14357
14358 /* VFP SP registers. */
14359 REGSET(s,VFS), REGSET(S,VFS),
14360 REGSETH(s,VFS), REGSETH(S,VFS),
14361
14362 /* VFP DP Registers. */
14363 REGSET(d,VFD), REGSET(D,VFD),
14364 /* Extra Neon DP registers. */
14365 REGSETH(d,VFD), REGSETH(D,VFD),
14366
14367 /* Neon QP registers. */
14368 REGSET2(q,NQ), REGSET2(Q,NQ),
14369
14370 /* VFP control registers. */
14371 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14372 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14373
14374 /* Maverick DSP coprocessor registers. */
14375 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14376 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14377
14378 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14379 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14380 REGDEF(dspsc,0,DSPSC),
14381
14382 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14383 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14384 REGDEF(DSPSC,0,DSPSC),
14385
14386 /* iWMMXt data registers - p0, c0-15. */
14387 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14388
14389 /* iWMMXt control registers - p1, c0-3. */
14390 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14391 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14392 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14393 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14394
14395 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14396 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14397 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14398 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14399 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14400
14401 /* XScale accumulator registers. */
14402 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14403 };
14404 #undef REGDEF
14405 #undef REGNUM
14406 #undef REGSET
14407
14408 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14409 within psr_required_here. */
14410 static const struct asm_psr psrs[] =
14411 {
14412 /* Backward compatibility notation. Note that "all" is no longer
14413 truly all possible PSR bits. */
14414 {"all", PSR_c | PSR_f},
14415 {"flg", PSR_f},
14416 {"ctl", PSR_c},
14417
14418 /* Individual flags. */
14419 {"f", PSR_f},
14420 {"c", PSR_c},
14421 {"x", PSR_x},
14422 {"s", PSR_s},
14423 /* Combinations of flags. */
14424 {"fs", PSR_f | PSR_s},
14425 {"fx", PSR_f | PSR_x},
14426 {"fc", PSR_f | PSR_c},
14427 {"sf", PSR_s | PSR_f},
14428 {"sx", PSR_s | PSR_x},
14429 {"sc", PSR_s | PSR_c},
14430 {"xf", PSR_x | PSR_f},
14431 {"xs", PSR_x | PSR_s},
14432 {"xc", PSR_x | PSR_c},
14433 {"cf", PSR_c | PSR_f},
14434 {"cs", PSR_c | PSR_s},
14435 {"cx", PSR_c | PSR_x},
14436 {"fsx", PSR_f | PSR_s | PSR_x},
14437 {"fsc", PSR_f | PSR_s | PSR_c},
14438 {"fxs", PSR_f | PSR_x | PSR_s},
14439 {"fxc", PSR_f | PSR_x | PSR_c},
14440 {"fcs", PSR_f | PSR_c | PSR_s},
14441 {"fcx", PSR_f | PSR_c | PSR_x},
14442 {"sfx", PSR_s | PSR_f | PSR_x},
14443 {"sfc", PSR_s | PSR_f | PSR_c},
14444 {"sxf", PSR_s | PSR_x | PSR_f},
14445 {"sxc", PSR_s | PSR_x | PSR_c},
14446 {"scf", PSR_s | PSR_c | PSR_f},
14447 {"scx", PSR_s | PSR_c | PSR_x},
14448 {"xfs", PSR_x | PSR_f | PSR_s},
14449 {"xfc", PSR_x | PSR_f | PSR_c},
14450 {"xsf", PSR_x | PSR_s | PSR_f},
14451 {"xsc", PSR_x | PSR_s | PSR_c},
14452 {"xcf", PSR_x | PSR_c | PSR_f},
14453 {"xcs", PSR_x | PSR_c | PSR_s},
14454 {"cfs", PSR_c | PSR_f | PSR_s},
14455 {"cfx", PSR_c | PSR_f | PSR_x},
14456 {"csf", PSR_c | PSR_s | PSR_f},
14457 {"csx", PSR_c | PSR_s | PSR_x},
14458 {"cxf", PSR_c | PSR_x | PSR_f},
14459 {"cxs", PSR_c | PSR_x | PSR_s},
14460 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14461 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14462 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14463 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14464 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14465 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14466 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14467 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14468 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14469 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14470 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14471 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14472 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14473 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14474 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14475 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14476 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14477 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14478 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14479 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14480 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14481 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14482 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14483 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14484 };
14485
14486 /* Table of V7M psr names. */
14487 static const struct asm_psr v7m_psrs[] =
14488 {
14489 {"apsr", 0 },
14490 {"iapsr", 1 },
14491 {"eapsr", 2 },
14492 {"psr", 3 },
14493 {"ipsr", 5 },
14494 {"epsr", 6 },
14495 {"iepsr", 7 },
14496 {"msp", 8 },
14497 {"psp", 9 },
14498 {"primask", 16},
14499 {"basepri", 17},
14500 {"basepri_max", 18},
14501 {"faultmask", 19},
14502 {"control", 20}
14503 };
14504
14505 /* Table of all shift-in-operand names. */
14506 static const struct asm_shift_name shift_names [] =
14507 {
14508 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14509 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14510 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14511 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14512 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14513 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14514 };
14515
14516 /* Table of all explicit relocation names. */
14517 #ifdef OBJ_ELF
14518 static struct reloc_entry reloc_names[] =
14519 {
14520 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14521 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14522 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14523 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14524 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14525 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14526 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14527 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14528 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14529 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14530 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14531 };
14532 #endif
14533
14534 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14535 static const struct asm_cond conds[] =
14536 {
14537 {"eq", 0x0},
14538 {"ne", 0x1},
14539 {"cs", 0x2}, {"hs", 0x2},
14540 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14541 {"mi", 0x4},
14542 {"pl", 0x5},
14543 {"vs", 0x6},
14544 {"vc", 0x7},
14545 {"hi", 0x8},
14546 {"ls", 0x9},
14547 {"ge", 0xa},
14548 {"lt", 0xb},
14549 {"gt", 0xc},
14550 {"le", 0xd},
14551 {"al", 0xe}
14552 };
14553
14554 static struct asm_barrier_opt barrier_opt_names[] =
14555 {
14556 { "sy", 0xf },
14557 { "un", 0x7 },
14558 { "st", 0xe },
14559 { "unst", 0x6 }
14560 };
14561
14562 /* Table of ARM-format instructions. */
14563
14564 /* Macros for gluing together operand strings. N.B. In all cases
14565 other than OPS0, the trailing OP_stop comes from default
14566 zero-initialization of the unspecified elements of the array. */
14567 #define OPS0() { OP_stop, }
14568 #define OPS1(a) { OP_##a, }
14569 #define OPS2(a,b) { OP_##a,OP_##b, }
14570 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14571 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14572 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14573 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14574
14575 /* These macros abstract out the exact format of the mnemonic table and
14576 save some repeated characters. */
14577
14578 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14579 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14580 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14581 THUMB_VARIANT, do_##ae, do_##te }
14582
14583 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14584 a T_MNEM_xyz enumerator. */
14585 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14586 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14587 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14588 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14589
14590 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14591 infix after the third character. */
14592 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14593 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14594 THUMB_VARIANT, do_##ae, do_##te }
14595 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14596 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14597 THUMB_VARIANT, do_##ae, do_##te }
14598 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14599 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14600 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14601 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14602 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14603 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14604 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14605 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14606
14607 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14608 appear in the condition table. */
14609 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14610 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14611 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14612
14613 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14614 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14615 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14616 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14617 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14618 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14619 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14620 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14621 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14622 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14623 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14624 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14625 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14626 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14627 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14628 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14629 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14630 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14631 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14632 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14633
14634 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14635 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14636 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14637 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14638
14639 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14640 field is still 0xE. Many of the Thumb variants can be executed
14641 conditionally, so this is checked separately. */
14642 #define TUE(mnem, op, top, nops, ops, ae, te) \
14643 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14644 THUMB_VARIANT, do_##ae, do_##te }
14645
14646 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14647 condition code field. */
14648 #define TUF(mnem, op, top, nops, ops, ae, te) \
14649 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14650 THUMB_VARIANT, do_##ae, do_##te }
14651
14652 /* ARM-only variants of all the above. */
14653 #define CE(mnem, op, nops, ops, ae) \
14654 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14655
14656 #define C3(mnem, op, nops, ops, ae) \
14657 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14658
14659 /* Legacy mnemonics that always have conditional infix after the third
14660 character. */
14661 #define CL(mnem, op, nops, ops, ae) \
14662 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14663 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14664
14665 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14666 #define cCE(mnem, op, nops, ops, ae) \
14667 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14668
14669 /* Legacy coprocessor instructions where conditional infix and conditional
14670 suffix are ambiguous. For consistency this includes all FPA instructions,
14671 not just the potentially ambiguous ones. */
14672 #define cCL(mnem, op, nops, ops, ae) \
14673 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14674 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14675
14676 /* Coprocessor, takes either a suffix or a position-3 infix
14677 (for an FPA corner case). */
14678 #define C3E(mnem, op, nops, ops, ae) \
14679 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14680 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14681
14682 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14683 { #m1 #m2 #m3, OPS##nops ops, \
14684 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14685 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14686
14687 #define CM(m1, m2, op, nops, ops, ae) \
14688 xCM_(m1, , m2, op, nops, ops, ae), \
14689 xCM_(m1, eq, m2, op, nops, ops, ae), \
14690 xCM_(m1, ne, m2, op, nops, ops, ae), \
14691 xCM_(m1, cs, m2, op, nops, ops, ae), \
14692 xCM_(m1, hs, m2, op, nops, ops, ae), \
14693 xCM_(m1, cc, m2, op, nops, ops, ae), \
14694 xCM_(m1, ul, m2, op, nops, ops, ae), \
14695 xCM_(m1, lo, m2, op, nops, ops, ae), \
14696 xCM_(m1, mi, m2, op, nops, ops, ae), \
14697 xCM_(m1, pl, m2, op, nops, ops, ae), \
14698 xCM_(m1, vs, m2, op, nops, ops, ae), \
14699 xCM_(m1, vc, m2, op, nops, ops, ae), \
14700 xCM_(m1, hi, m2, op, nops, ops, ae), \
14701 xCM_(m1, ls, m2, op, nops, ops, ae), \
14702 xCM_(m1, ge, m2, op, nops, ops, ae), \
14703 xCM_(m1, lt, m2, op, nops, ops, ae), \
14704 xCM_(m1, gt, m2, op, nops, ops, ae), \
14705 xCM_(m1, le, m2, op, nops, ops, ae), \
14706 xCM_(m1, al, m2, op, nops, ops, ae)
14707
14708 #define UE(mnem, op, nops, ops, ae) \
14709 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14710
14711 #define UF(mnem, op, nops, ops, ae) \
14712 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14713
14714 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14715 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14716 use the same encoding function for each. */
14717 #define NUF(mnem, op, nops, ops, enc) \
14718 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14719 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14720
14721 /* Neon data processing, version which indirects through neon_enc_tab for
14722 the various overloaded versions of opcodes. */
14723 #define nUF(mnem, op, nops, ops, enc) \
14724 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14725 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14726
14727 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14728 version. */
14729 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14730 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14731 THUMB_VARIANT, do_##enc, do_##enc }
14732
14733 #define NCE(mnem, op, nops, ops, enc) \
14734 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14735
14736 #define NCEF(mnem, op, nops, ops, enc) \
14737 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14738
14739 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14740 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14741 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14742 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14743
14744 #define nCE(mnem, op, nops, ops, enc) \
14745 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14746
14747 #define nCEF(mnem, op, nops, ops, enc) \
14748 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14749
14750 #define do_0 0
14751
14752 /* Thumb-only, unconditional. */
14753 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14754
14755 static const struct asm_opcode insns[] =
14756 {
14757 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14758 #define THUMB_VARIANT &arm_ext_v4t
14759 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14760 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14761 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14762 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14763 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14764 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14765 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14766 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14767 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14768 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14769 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14770 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14771 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14772 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14773 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14774 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14775
14776 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14777 for setting PSR flag bits. They are obsolete in V6 and do not
14778 have Thumb equivalents. */
14779 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14780 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14781 CL(tstp, 110f000, 2, (RR, SH), cmp),
14782 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14783 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14784 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14785 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14786 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14787 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14788
14789 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14790 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14791 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14792 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14793
14794 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14795 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14796 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14797 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14798
14799 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14800 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14801 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14802 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14803 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14804 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14805
14806 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14807 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14808 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14809 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14810
14811 /* Pseudo ops. */
14812 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14813 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14814 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14815
14816 /* Thumb-compatibility pseudo ops. */
14817 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14818 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14819 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14820 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14821 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14822 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14823 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14824 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14825 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14826 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14827 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14828 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14829
14830 /* These may simplify to neg. */
14831 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14832 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14833
14834 #undef THUMB_VARIANT
14835 #define THUMB_VARIANT &arm_ext_v6
14836 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14837
14838 /* V1 instructions with no Thumb analogue prior to V6T2. */
14839 #undef THUMB_VARIANT
14840 #define THUMB_VARIANT &arm_ext_v6t2
14841 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14842 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14843 CL(teqp, 130f000, 2, (RR, SH), cmp),
14844
14845 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14846 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14847 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14848 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14849
14850 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14851 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14852
14853 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14854 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14855
14856 /* V1 instructions with no Thumb analogue at all. */
14857 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14858 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14859
14860 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14861 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14862 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14863 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14864 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14865 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14866 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14867 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14868
14869 #undef ARM_VARIANT
14870 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14871 #undef THUMB_VARIANT
14872 #define THUMB_VARIANT &arm_ext_v4t
14873 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14874 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14875
14876 #undef THUMB_VARIANT
14877 #define THUMB_VARIANT &arm_ext_v6t2
14878 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14879 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14880
14881 /* Generic coprocessor instructions. */
14882 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14883 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14884 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14885 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14886 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14887 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14888 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14889
14890 #undef ARM_VARIANT
14891 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14892 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14893 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14894
14895 #undef ARM_VARIANT
14896 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14897 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14898 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14899
14900 #undef ARM_VARIANT
14901 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14902 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14903 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14904 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14905 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14906 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14907 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14908 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14909 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14910
14911 #undef ARM_VARIANT
14912 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14913 #undef THUMB_VARIANT
14914 #define THUMB_VARIANT &arm_ext_v4t
14915 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14916 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14917 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14918 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14919 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14920 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14921
14922 #undef ARM_VARIANT
14923 #define ARM_VARIANT &arm_ext_v4t_5
14924 /* ARM Architecture 4T. */
14925 /* Note: bx (and blx) are required on V5, even if the processor does
14926 not support Thumb. */
14927 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14928
14929 #undef ARM_VARIANT
14930 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14931 #undef THUMB_VARIANT
14932 #define THUMB_VARIANT &arm_ext_v5t
14933 /* Note: blx has 2 variants; the .value coded here is for
14934 BLX(2). Only this variant has conditional execution. */
14935 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14936 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14937
14938 #undef THUMB_VARIANT
14939 #define THUMB_VARIANT &arm_ext_v6t2
14940 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14941 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14942 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14943 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14944 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14945 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14946 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14947 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14948
14949 #undef ARM_VARIANT
14950 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14951 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14952 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14953 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14954 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14955
14956 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14957 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14958
14959 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14960 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14961 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14962 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14963
14964 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14965 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14966 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14967 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14968
14969 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14970 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14971
14972 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14973 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14974 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14975 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14976
14977 #undef ARM_VARIANT
14978 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14979 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14980 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14981 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14982
14983 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14984 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14985
14986 #undef ARM_VARIANT
14987 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14988 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14989
14990 #undef ARM_VARIANT
14991 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14992 #undef THUMB_VARIANT
14993 #define THUMB_VARIANT &arm_ext_v6
14994 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14995 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14996 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14997 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14998 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14999 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15000 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15001 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15002 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15003 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
15004
15005 #undef THUMB_VARIANT
15006 #define THUMB_VARIANT &arm_ext_v6t2
15007 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
15008 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15009 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
15010
15011 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
15012 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
15013
15014 /* ARM V6 not included in V7M (eg. integer SIMD). */
15015 #undef THUMB_VARIANT
15016 #define THUMB_VARIANT &arm_ext_v6_notm
15017 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
15018 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
15019 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
15020 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15021 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15022 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15023 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15024 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15025 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15026 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15027 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15028 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15029 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15030 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15031 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15032 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15033 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15034 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15035 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15036 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15037 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15038 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15039 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15040 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15041 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15042 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15043 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15044 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15045 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15046 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15047 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15048 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15049 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15050 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15051 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15052 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15053 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15054 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15055 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15056 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15057 UF(rfeib, 9900a00, 1, (RRw), rfe),
15058 UF(rfeda, 8100a00, 1, (RRw), rfe),
15059 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15060 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
15061 UF(rfefa, 9900a00, 1, (RRw), rfe),
15062 UF(rfeea, 8100a00, 1, (RRw), rfe),
15063 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
15064 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15065 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15066 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15067 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15068 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15069 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15070 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
15071 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
15072 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
15073 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15074 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15075 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15076 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15077 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15078 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15079 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15080 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
15081 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15082 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15083 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15084 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15085 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15086 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15087 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15088 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15089 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15090 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15091 TUF(srsia, 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
15092 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
15093 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
15094 TUF(srsdb, 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
15095 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
15096 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
15097 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
15098 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
15099 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
15100 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
15101
15102 #undef ARM_VARIANT
15103 #define ARM_VARIANT &arm_ext_v6k
15104 #undef THUMB_VARIANT
15105 #define THUMB_VARIANT &arm_ext_v6k
15106 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
15107 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
15108 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
15109 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
15110
15111 #undef THUMB_VARIANT
15112 #define THUMB_VARIANT &arm_ext_v6_notm
15113 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
15114 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
15115
15116 #undef THUMB_VARIANT
15117 #define THUMB_VARIANT &arm_ext_v6t2
15118 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15119 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
15120 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15121 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
15122 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
15123
15124 #undef ARM_VARIANT
15125 #define ARM_VARIANT &arm_ext_v6z
15126 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
15127
15128 #undef ARM_VARIANT
15129 #define ARM_VARIANT &arm_ext_v6t2
15130 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
15131 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
15132 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15133 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
15134
15135 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
15136 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
15137 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
15138 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
15139
15140 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15141 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15142 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15143 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
15144
15145 UT(cbnz, b900, 2, (RR, EXP), t_cbz),
15146 UT(cbz, b100, 2, (RR, EXP), t_cbz),
15147 /* ARM does not really have an IT instruction, so always allow it. */
15148 #undef ARM_VARIANT
15149 #define ARM_VARIANT &arm_ext_v1
15150 TUE(it, 0, bf08, 1, (COND), it, t_it),
15151 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
15152 TUE(ite, 0, bf04, 1, (COND), it, t_it),
15153 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
15154 TUE(itet, 0, bf06, 1, (COND), it, t_it),
15155 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
15156 TUE(itee, 0, bf02, 1, (COND), it, t_it),
15157 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
15158 TUE(itett, 0, bf07, 1, (COND), it, t_it),
15159 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
15160 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
15161 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
15162 TUE(itete, 0, bf05, 1, (COND), it, t_it),
15163 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
15164 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
15165
15166 /* Thumb2 only instructions. */
15167 #undef ARM_VARIANT
15168 #define ARM_VARIANT NULL
15169
15170 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15171 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
15172 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
15173 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
15174
15175 /* Thumb-2 hardware division instructions (R and M profiles only). */
15176 #undef THUMB_VARIANT
15177 #define THUMB_VARIANT &arm_ext_div
15178 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15179 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15180
15181 /* ARM V7 instructions. */
15182 #undef ARM_VARIANT
15183 #define ARM_VARIANT &arm_ext_v7
15184 #undef THUMB_VARIANT
15185 #define THUMB_VARIANT &arm_ext_v7
15186 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15187 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15188 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15189 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15190 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15191
15192 #undef ARM_VARIANT
15193 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15194 cCE(wfs, e200110, 1, (RR), rd),
15195 cCE(rfs, e300110, 1, (RR), rd),
15196 cCE(wfc, e400110, 1, (RR), rd),
15197 cCE(rfc, e500110, 1, (RR), rd),
15198
15199 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15200 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15201 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15202 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15203
15204 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15205 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15206 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15207 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15208
15209 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15210 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15211 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15212 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15213 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15214 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15215 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15216 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15217 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15218 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15219 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15220 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15221
15222 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15223 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15224 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15225 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15226 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15227 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15228 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15229 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15230 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15231 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15232 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15233 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15234
15235 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15236 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15237 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15238 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15239 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15240 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15241 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15242 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15243 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15244 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15245 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15246 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15247
15248 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15249 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15250 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15251 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15252 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15253 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15254 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15255 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15256 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15257 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15258 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15259 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15260
15261 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15262 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15263 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15264 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15265 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15266 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15267 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15268 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15269 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15270 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15271 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15272 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15273
15274 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15275 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15276 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15277 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15278 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15279 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15280 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15281 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15282 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15283 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15284 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15285 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15286
15287 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15288 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15289 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15290 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15291 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15292 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15293 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15294 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15295 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15296 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15297 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15298 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15299
15300 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15301 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15302 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15303 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15304 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15305 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15306 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15307 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15308 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15309 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15310 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15311 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15312
15313 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15314 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15315 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15316 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15317 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15318 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15319 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15320 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15321 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15322 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15323 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15324 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15325
15326 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15327 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15328 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15329 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15330 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15331 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15332 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15333 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15334 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15335 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15336 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15337 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15338
15339 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15340 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15341 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15342 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15343 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15344 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15345 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15346 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15347 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15348 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15349 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15350 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15351
15352 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15353 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15354 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15355 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15356 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15357 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15358 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15359 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15360 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15361 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15362 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15363 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15364
15365 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15366 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15367 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15368 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15369 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15370 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15371 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15372 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15373 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15374 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15375 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15376 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15377
15378 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15379 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15380 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15381 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15382 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15383 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15384 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15385 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15386 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15387 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15388 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15389 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15390
15391 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15392 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15393 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15394 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15395 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15396 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15397 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15398 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15399 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15400 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15401 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15402 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15403
15404 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15405 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15406 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15407 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15408 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15409 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15410 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15411 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15412 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15413 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15414 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15415 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15416
15417 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15418 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15419 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15420 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15421 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15422 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15423 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15424 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15425 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15426 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15427 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15428 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15429
15430 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15431 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15432 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15433 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15434 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15435 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15436 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15437 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15438 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15439 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15440 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15441 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15442
15443 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15444 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15445 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15446 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15447 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15448 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15449 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15450 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15451 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15452 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15453 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15454 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15455
15456 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15457 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15458 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15459 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15460 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15461 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15462 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15463 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15464 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15465 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15466 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15467 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15468
15469 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15470 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15471 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15472 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15473 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15474 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15475 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15476 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15477 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15478 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15479 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15480 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15481
15482 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15483 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15484 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15485 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15486 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15487 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15488 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15489 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15490 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15491 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15492 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15493 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15494
15495 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15496 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15497 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15498 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15499 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15500 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15501 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15502 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15503 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15504 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15505 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15506 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15507
15508 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15509 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15510 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15511 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15512 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15513 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15514 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15515 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15516 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15517 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15518 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15519 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15520
15521 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15522 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15523 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15524 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15525 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15526 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15527 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15528 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15529 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15530 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15531 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15532 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15533
15534 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15535 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15536 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15537 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15538 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15539 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15540 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15541 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15542 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15543 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15544 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15545 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15546
15547 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15548 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15549 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15550 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15551 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15552 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15553 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15554 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15555 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15556 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15557 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15558 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15559
15560 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15561 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15562 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15563 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15564 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15565 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15566 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15567 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15568 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15569 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15570 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15571 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15572
15573 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15574 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15575 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15576 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15577 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15578 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15579 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15580 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15581 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15582 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15583 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15584 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15585
15586 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15587 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15588 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15589 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15590
15591 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15592 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15593 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15594 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15595 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15596 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15597 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15598 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15599 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15600 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15601 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15602 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15603
15604 /* The implementation of the FIX instruction is broken on some
15605 assemblers, in that it accepts a precision specifier as well as a
15606 rounding specifier, despite the fact that this is meaningless.
15607 To be more compatible, we accept it as well, though of course it
15608 does not set any bits. */
15609 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15610 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15611 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15612 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15613 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15614 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15615 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15616 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15617 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15618 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15619 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15620 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15621 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15622
15623 /* Instructions that were new with the real FPA, call them V2. */
15624 #undef ARM_VARIANT
15625 #define ARM_VARIANT &fpu_fpa_ext_v2
15626 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15627 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15628 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15629 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15630 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15631 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15632
15633 #undef ARM_VARIANT
15634 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15635 /* Moves and type conversions. */
15636 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15637 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15638 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15639 cCE(fmstat, ef1fa10, 0, (), noargs),
15640 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15641 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15642 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15643 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15644 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15645 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15646 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15647 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15648
15649 /* Memory operations. */
15650 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15651 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15652 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15653 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15654 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15655 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15656 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15657 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15658 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15659 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15660 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15661 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15662 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15663 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15664 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15665 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15666 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15667 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15668
15669 /* Monadic operations. */
15670 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15671 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15672 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15673
15674 /* Dyadic operations. */
15675 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15676 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15677 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15678 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15679 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15680 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15681 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15682 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15683 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15684
15685 /* Comparisons. */
15686 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15687 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15688 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15689 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15690
15691 #undef ARM_VARIANT
15692 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15693 /* Moves and type conversions. */
15694 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15695 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15696 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15697 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15698 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15699 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15700 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15701 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15702 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15703 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15704 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15705 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15706 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15707
15708 /* Memory operations. */
15709 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15710 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15711 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15712 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15713 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15714 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15715 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15716 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15717 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15718 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15719
15720 /* Monadic operations. */
15721 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15722 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15723 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15724
15725 /* Dyadic operations. */
15726 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15727 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15728 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15729 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15730 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15731 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15732 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15733 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15734 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15735
15736 /* Comparisons. */
15737 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15738 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15739 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15740 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15741
15742 #undef ARM_VARIANT
15743 #define ARM_VARIANT &fpu_vfp_ext_v2
15744 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15745 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15746 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15747 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15748
15749 /* Instructions which may belong to either the Neon or VFP instruction sets.
15750 Individual encoder functions perform additional architecture checks. */
15751 #undef ARM_VARIANT
15752 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15753 #undef THUMB_VARIANT
15754 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15755 /* These mnemonics are unique to VFP. */
15756 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15757 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15758 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15759 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15760 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15761 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15762 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15763 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15764 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15765 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15766
15767 /* Mnemonics shared by Neon and VFP. */
15768 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15769 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15770 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15771
15772 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15773 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15774
15775 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15776 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15777
15778 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15779 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15780 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15781 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15782 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15783 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15784 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15785 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15786
15787 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15788
15789 /* NOTE: All VMOV encoding is special-cased! */
15790 NCE(vmov, 0, 1, (VMOV), neon_mov),
15791 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15792
15793 #undef THUMB_VARIANT
15794 #define THUMB_VARIANT &fpu_neon_ext_v1
15795 #undef ARM_VARIANT
15796 #define ARM_VARIANT &fpu_neon_ext_v1
15797 /* Data processing with three registers of the same length. */
15798 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15799 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15800 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15801 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15802 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15803 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15804 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15805 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15806 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15807 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15808 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15809 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15810 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15811 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15812 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15813 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15814 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
15815 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
15816 /* If not immediate, fall back to neon_dyadic_i64_su.
15817 shl_imm should accept I8 I16 I32 I64,
15818 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15819 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15820 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15821 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15822 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15823 /* Logic ops, types optional & ignored. */
15824 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15825 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15826 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15827 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15828 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15829 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15830 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15831 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15832 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15833 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15834 /* Bitfield ops, untyped. */
15835 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15836 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15837 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15838 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15839 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15840 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15841 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15842 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15843 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15844 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15845 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15846 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15847 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15848 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15849 back to neon_dyadic_if_su. */
15850 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15851 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15852 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15853 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15854 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15855 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15856 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15857 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15858 /* Comparison. Type I8 I16 I32 F32. */
15859 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15860 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15861 /* As above, D registers only. */
15862 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15863 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15864 /* Int and float variants, signedness unimportant. */
15865 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15866 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15867 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15868 /* Add/sub take types I8 I16 I32 I64 F32. */
15869 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15870 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15871 /* vtst takes sizes 8, 16, 32. */
15872 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15873 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15874 /* VMUL takes I8 I16 I32 F32 P8. */
15875 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15876 /* VQD{R}MULH takes S16 S32. */
15877 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15878 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15879 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15880 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15881 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15882 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15883 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15884 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15885 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15886 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15887 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15888 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15889 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15890 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15891 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15892 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15893
15894 /* Two address, int/float. Types S8 S16 S32 F32. */
15895 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15896 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15897
15898 /* Data processing with two registers and a shift amount. */
15899 /* Right shifts, and variants with rounding.
15900 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15901 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15902 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15903 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15904 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15905 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15906 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15907 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15908 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15909 /* Shift and insert. Sizes accepted 8 16 32 64. */
15910 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15911 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15912 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15913 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15914 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15915 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15916 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15917 /* Right shift immediate, saturating & narrowing, with rounding variants.
15918 Types accepted S16 S32 S64 U16 U32 U64. */
15919 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15920 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15921 /* As above, unsigned. Types accepted S16 S32 S64. */
15922 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15923 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15924 /* Right shift narrowing. Types accepted I16 I32 I64. */
15925 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15926 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15927 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15928 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15929 /* CVT with optional immediate for fixed-point variant. */
15930 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15931
15932 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15933 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15934
15935 /* Data processing, three registers of different lengths. */
15936 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15937 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15938 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15939 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15940 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15941 /* If not scalar, fall back to neon_dyadic_long.
15942 Vector types as above, scalar types S16 S32 U16 U32. */
15943 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15944 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15945 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15946 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15947 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15948 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15949 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15950 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15951 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15952 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15953 /* Saturating doubling multiplies. Types S16 S32. */
15954 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15955 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15956 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15957 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15958 S16 S32 U16 U32. */
15959 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15960
15961 /* Extract. Size 8. */
15962 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
15963 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
15964
15965 /* Two registers, miscellaneous. */
15966 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15967 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15968 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15969 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15970 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15971 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15972 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15973 /* Vector replicate. Sizes 8 16 32. */
15974 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15975 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15976 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15977 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15978 /* VMOVN. Types I16 I32 I64. */
15979 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15980 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15981 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15982 /* VQMOVUN. Types S16 S32 S64. */
15983 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15984 /* VZIP / VUZP. Sizes 8 16 32. */
15985 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15986 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15987 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15988 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15989 /* VQABS / VQNEG. Types S8 S16 S32. */
15990 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15991 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15992 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15993 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15994 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15995 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15996 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15997 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15998 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15999 /* Reciprocal estimates. Types U32 F32. */
16000 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
16001 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
16002 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
16003 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
16004 /* VCLS. Types S8 S16 S32. */
16005 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
16006 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
16007 /* VCLZ. Types I8 I16 I32. */
16008 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
16009 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
16010 /* VCNT. Size 8. */
16011 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
16012 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
16013 /* Two address, untyped. */
16014 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
16015 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
16016 /* VTRN. Sizes 8 16 32. */
16017 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
16018 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
16019
16020 /* Table lookup. Size 8. */
16021 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16022 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
16023
16024 #undef THUMB_VARIANT
16025 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16026 #undef ARM_VARIANT
16027 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16028 /* Neon element/structure load/store. */
16029 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16030 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
16031 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16032 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
16033 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16034 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
16035 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16036 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
16037
16038 #undef THUMB_VARIANT
16039 #define THUMB_VARIANT &fpu_vfp_ext_v3
16040 #undef ARM_VARIANT
16041 #define ARM_VARIANT &fpu_vfp_ext_v3
16042 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
16043 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
16044 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16045 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16046 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16047 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16048 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16049 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16050 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16051 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16052 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16053 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16054 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16055 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16056 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
16057 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
16058 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
16059 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
16060
16061 #undef THUMB_VARIANT
16062 #undef ARM_VARIANT
16063 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16064 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16065 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16066 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16067 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16068 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16069 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
16070 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
16071 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
16072
16073 #undef ARM_VARIANT
16074 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16075 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
16076 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
16077 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
16078 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
16079 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
16080 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
16081 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
16082 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
16083 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
16084 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16085 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16086 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
16087 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16088 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16089 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
16090 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16091 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16092 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
16093 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
16094 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
16095 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16096 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16097 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16098 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16099 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16100 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
16101 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
16102 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
16103 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
16104 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
16105 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
16106 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
16107 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
16108 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
16109 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
16110 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
16111 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
16112 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16113 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16114 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16115 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16116 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16117 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16118 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16119 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16120 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16121 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
16122 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16123 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16124 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16125 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16126 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16127 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16128 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16129 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16130 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16131 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16132 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16133 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16134 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16135 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16136 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16137 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16138 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16139 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16140 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16141 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16142 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16143 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16144 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16145 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16146 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16147 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16148 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16149 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16150 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16151 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16152 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16153 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16154 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16155 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16156 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16157 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16158 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16159 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16160 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16161 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16162 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16163 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
16164 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16165 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16166 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16167 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16168 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16169 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16170 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16171 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16172 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16173 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16174 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16175 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16176 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16177 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16178 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16179 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16180 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16181 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16182 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16183 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16184 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16185 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16186 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16187 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16188 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16189 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16190 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16191 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16192 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16193 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16194 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16195 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16196 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16197 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16198 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16199 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16200 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16201 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16202 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16203 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16204 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16205 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16206 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16207 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16208 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16209 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16210 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16211 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16212 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16213 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16214 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16215 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16216 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16217 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16218 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16219 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16220 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16221 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16222 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16223 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16224 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16225 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16226 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16227 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16228 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16229 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16230 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16231 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16232 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16233 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16234 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16235 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16236 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16237
16238 #undef ARM_VARIANT
16239 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16240 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16241 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16242 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16243 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16244 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16245 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16246 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16247 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16248 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16249 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16250 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16251 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16252 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16253 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16254 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16255 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16256 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16257 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16258 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16259 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16260 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16261 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16262 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16263 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16264 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16265 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16266 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16267 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16268 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16269 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16270 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16271 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16272 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16273 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16274 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16275 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16276 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16277 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16278 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16279 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16280 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16281 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16282 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16283 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16284 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16285 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16286 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16287 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16288 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16289 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16290 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16291 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16292 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16293 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16294 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16295 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16296 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16297
16298 #undef ARM_VARIANT
16299 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16300 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16301 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16302 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16303 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16304 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16305 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16306 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16307 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16308 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16309 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16310 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16311 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16312 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16313 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16314 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16315 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16316 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16317 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16318 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16319 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16320 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16321 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16322 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16323 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16324 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16325 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16326 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16327 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16328 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16329 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16330 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16331 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16332 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16333 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16334 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16335 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16336 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16337 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16338 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16339 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16340 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16341 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16342 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16343 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16344 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16345 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16346 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16347 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16348 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16349 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16350 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16351 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16352 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16353 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16354 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16355 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16356 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16357 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16358 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16359 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16360 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16361 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16362 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16363 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16364 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16365 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16366 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16367 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16368 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16369 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16370 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16371 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16372 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16373 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16374 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16375 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16376 };
16377 #undef ARM_VARIANT
16378 #undef THUMB_VARIANT
16379 #undef TCE
16380 #undef TCM
16381 #undef TUE
16382 #undef TUF
16383 #undef TCC
16384 #undef cCE
16385 #undef cCL
16386 #undef C3E
16387 #undef CE
16388 #undef CM
16389 #undef UE
16390 #undef UF
16391 #undef UT
16392 #undef NUF
16393 #undef nUF
16394 #undef NCE
16395 #undef nCE
16396 #undef OPS0
16397 #undef OPS1
16398 #undef OPS2
16399 #undef OPS3
16400 #undef OPS4
16401 #undef OPS5
16402 #undef OPS6
16403 #undef do_0
16404 \f
16405 /* MD interface: bits in the object file. */
16406
16407 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16408 for use in the a.out file, and stores them in the array pointed to by buf.
16409 This knows about the endian-ness of the target machine and does
16410 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16411 2 (short) and 4 (long) Floating numbers are put out as a series of
16412 LITTLENUMS (shorts, here at least). */
16413
16414 void
16415 md_number_to_chars (char * buf, valueT val, int n)
16416 {
16417 if (target_big_endian)
16418 number_to_chars_bigendian (buf, val, n);
16419 else
16420 number_to_chars_littleendian (buf, val, n);
16421 }
16422
16423 static valueT
16424 md_chars_to_number (char * buf, int n)
16425 {
16426 valueT result = 0;
16427 unsigned char * where = (unsigned char *) buf;
16428
16429 if (target_big_endian)
16430 {
16431 while (n--)
16432 {
16433 result <<= 8;
16434 result |= (*where++ & 255);
16435 }
16436 }
16437 else
16438 {
16439 while (n--)
16440 {
16441 result <<= 8;
16442 result |= (where[n] & 255);
16443 }
16444 }
16445
16446 return result;
16447 }
16448
16449 /* MD interface: Sections. */
16450
16451 /* Estimate the size of a frag before relaxing. Assume everything fits in
16452 2 bytes. */
16453
16454 int
16455 md_estimate_size_before_relax (fragS * fragp,
16456 segT segtype ATTRIBUTE_UNUSED)
16457 {
16458 fragp->fr_var = 2;
16459 return 2;
16460 }
16461
16462 /* Convert a machine dependent frag. */
16463
16464 void
16465 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16466 {
16467 unsigned long insn;
16468 unsigned long old_op;
16469 char *buf;
16470 expressionS exp;
16471 fixS *fixp;
16472 int reloc_type;
16473 int pc_rel;
16474 int opcode;
16475
16476 buf = fragp->fr_literal + fragp->fr_fix;
16477
16478 old_op = bfd_get_16(abfd, buf);
16479 if (fragp->fr_symbol) {
16480 exp.X_op = O_symbol;
16481 exp.X_add_symbol = fragp->fr_symbol;
16482 } else {
16483 exp.X_op = O_constant;
16484 }
16485 exp.X_add_number = fragp->fr_offset;
16486 opcode = fragp->fr_subtype;
16487 switch (opcode)
16488 {
16489 case T_MNEM_ldr_pc:
16490 case T_MNEM_ldr_pc2:
16491 case T_MNEM_ldr_sp:
16492 case T_MNEM_str_sp:
16493 case T_MNEM_ldr:
16494 case T_MNEM_ldrb:
16495 case T_MNEM_ldrh:
16496 case T_MNEM_str:
16497 case T_MNEM_strb:
16498 case T_MNEM_strh:
16499 if (fragp->fr_var == 4)
16500 {
16501 insn = THUMB_OP32(opcode);
16502 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16503 {
16504 insn |= (old_op & 0x700) << 4;
16505 }
16506 else
16507 {
16508 insn |= (old_op & 7) << 12;
16509 insn |= (old_op & 0x38) << 13;
16510 }
16511 insn |= 0x00000c00;
16512 put_thumb32_insn (buf, insn);
16513 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16514 }
16515 else
16516 {
16517 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16518 }
16519 pc_rel = (opcode == T_MNEM_ldr_pc2);
16520 break;
16521 case T_MNEM_adr:
16522 if (fragp->fr_var == 4)
16523 {
16524 insn = THUMB_OP32 (opcode);
16525 insn |= (old_op & 0xf0) << 4;
16526 put_thumb32_insn (buf, insn);
16527 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16528 }
16529 else
16530 {
16531 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16532 exp.X_add_number -= 4;
16533 }
16534 pc_rel = 1;
16535 break;
16536 case T_MNEM_mov:
16537 case T_MNEM_movs:
16538 case T_MNEM_cmp:
16539 case T_MNEM_cmn:
16540 if (fragp->fr_var == 4)
16541 {
16542 int r0off = (opcode == T_MNEM_mov
16543 || opcode == T_MNEM_movs) ? 0 : 8;
16544 insn = THUMB_OP32 (opcode);
16545 insn = (insn & 0xe1ffffff) | 0x10000000;
16546 insn |= (old_op & 0x700) << r0off;
16547 put_thumb32_insn (buf, insn);
16548 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16549 }
16550 else
16551 {
16552 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16553 }
16554 pc_rel = 0;
16555 break;
16556 case T_MNEM_b:
16557 if (fragp->fr_var == 4)
16558 {
16559 insn = THUMB_OP32(opcode);
16560 put_thumb32_insn (buf, insn);
16561 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16562 }
16563 else
16564 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16565 pc_rel = 1;
16566 break;
16567 case T_MNEM_bcond:
16568 if (fragp->fr_var == 4)
16569 {
16570 insn = THUMB_OP32(opcode);
16571 insn |= (old_op & 0xf00) << 14;
16572 put_thumb32_insn (buf, insn);
16573 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16574 }
16575 else
16576 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16577 pc_rel = 1;
16578 break;
16579 case T_MNEM_add_sp:
16580 case T_MNEM_add_pc:
16581 case T_MNEM_inc_sp:
16582 case T_MNEM_dec_sp:
16583 if (fragp->fr_var == 4)
16584 {
16585 /* ??? Choose between add and addw. */
16586 insn = THUMB_OP32 (opcode);
16587 insn |= (old_op & 0xf0) << 4;
16588 put_thumb32_insn (buf, insn);
16589 if (opcode == T_MNEM_add_pc)
16590 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16591 else
16592 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16593 }
16594 else
16595 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16596 pc_rel = 0;
16597 break;
16598
16599 case T_MNEM_addi:
16600 case T_MNEM_addis:
16601 case T_MNEM_subi:
16602 case T_MNEM_subis:
16603 if (fragp->fr_var == 4)
16604 {
16605 insn = THUMB_OP32 (opcode);
16606 insn |= (old_op & 0xf0) << 4;
16607 insn |= (old_op & 0xf) << 16;
16608 put_thumb32_insn (buf, insn);
16609 if (insn & (1 << 20))
16610 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16611 else
16612 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16613 }
16614 else
16615 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16616 pc_rel = 0;
16617 break;
16618 default:
16619 abort();
16620 }
16621 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16622 reloc_type);
16623 fixp->fx_file = fragp->fr_file;
16624 fixp->fx_line = fragp->fr_line;
16625 fragp->fr_fix += fragp->fr_var;
16626 }
16627
16628 /* Return the size of a relaxable immediate operand instruction.
16629 SHIFT and SIZE specify the form of the allowable immediate. */
16630 static int
16631 relax_immediate (fragS *fragp, int size, int shift)
16632 {
16633 offsetT offset;
16634 offsetT mask;
16635 offsetT low;
16636
16637 /* ??? Should be able to do better than this. */
16638 if (fragp->fr_symbol)
16639 return 4;
16640
16641 low = (1 << shift) - 1;
16642 mask = (1 << (shift + size)) - (1 << shift);
16643 offset = fragp->fr_offset;
16644 /* Force misaligned offsets to 32-bit variant. */
16645 if (offset & low)
16646 return 4;
16647 if (offset & ~mask)
16648 return 4;
16649 return 2;
16650 }
16651
16652 /* Get the address of a symbol during relaxation. */
16653 static addressT
16654 relaxed_symbol_addr(fragS *fragp, long stretch)
16655 {
16656 fragS *sym_frag;
16657 addressT addr;
16658 symbolS *sym;
16659
16660 sym = fragp->fr_symbol;
16661 sym_frag = symbol_get_frag (sym);
16662 know (S_GET_SEGMENT (sym) != absolute_section
16663 || sym_frag == &zero_address_frag);
16664 addr = S_GET_VALUE (sym) + fragp->fr_offset;
16665
16666 /* If frag has yet to be reached on this pass, assume it will
16667 move by STRETCH just as we did. If this is not so, it will
16668 be because some frag between grows, and that will force
16669 another pass. */
16670
16671 if (stretch != 0
16672 && sym_frag->relax_marker != fragp->relax_marker)
16673 addr += stretch;
16674
16675 return addr;
16676 }
16677
16678 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16679 load. */
16680 static int
16681 relax_adr (fragS *fragp, asection *sec, long stretch)
16682 {
16683 addressT addr;
16684 offsetT val;
16685
16686 /* Assume worst case for symbols not known to be in the same section. */
16687 if (!S_IS_DEFINED(fragp->fr_symbol)
16688 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16689 return 4;
16690
16691 val = relaxed_symbol_addr(fragp, stretch);
16692 addr = fragp->fr_address + fragp->fr_fix;
16693 addr = (addr + 4) & ~3;
16694 /* Force misaligned targets to 32-bit variant. */
16695 if (val & 3)
16696 return 4;
16697 val -= addr;
16698 if (val < 0 || val > 1020)
16699 return 4;
16700 return 2;
16701 }
16702
16703 /* Return the size of a relaxable add/sub immediate instruction. */
16704 static int
16705 relax_addsub (fragS *fragp, asection *sec)
16706 {
16707 char *buf;
16708 int op;
16709
16710 buf = fragp->fr_literal + fragp->fr_fix;
16711 op = bfd_get_16(sec->owner, buf);
16712 if ((op & 0xf) == ((op >> 4) & 0xf))
16713 return relax_immediate (fragp, 8, 0);
16714 else
16715 return relax_immediate (fragp, 3, 0);
16716 }
16717
16718
16719 /* Return the size of a relaxable branch instruction. BITS is the
16720 size of the offset field in the narrow instruction. */
16721
16722 static int
16723 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
16724 {
16725 addressT addr;
16726 offsetT val;
16727 offsetT limit;
16728
16729 /* Assume worst case for symbols not known to be in the same section. */
16730 if (!S_IS_DEFINED(fragp->fr_symbol)
16731 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16732 return 4;
16733
16734 val = relaxed_symbol_addr(fragp, stretch);
16735 addr = fragp->fr_address + fragp->fr_fix + 4;
16736 val -= addr;
16737
16738 /* Offset is a signed value *2 */
16739 limit = 1 << bits;
16740 if (val >= limit || val < -limit)
16741 return 4;
16742 return 2;
16743 }
16744
16745
16746 /* Relax a machine dependent frag. This returns the amount by which
16747 the current size of the frag should change. */
16748
16749 int
16750 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
16751 {
16752 int oldsize;
16753 int newsize;
16754
16755 oldsize = fragp->fr_var;
16756 switch (fragp->fr_subtype)
16757 {
16758 case T_MNEM_ldr_pc2:
16759 newsize = relax_adr(fragp, sec, stretch);
16760 break;
16761 case T_MNEM_ldr_pc:
16762 case T_MNEM_ldr_sp:
16763 case T_MNEM_str_sp:
16764 newsize = relax_immediate(fragp, 8, 2);
16765 break;
16766 case T_MNEM_ldr:
16767 case T_MNEM_str:
16768 newsize = relax_immediate(fragp, 5, 2);
16769 break;
16770 case T_MNEM_ldrh:
16771 case T_MNEM_strh:
16772 newsize = relax_immediate(fragp, 5, 1);
16773 break;
16774 case T_MNEM_ldrb:
16775 case T_MNEM_strb:
16776 newsize = relax_immediate(fragp, 5, 0);
16777 break;
16778 case T_MNEM_adr:
16779 newsize = relax_adr(fragp, sec, stretch);
16780 break;
16781 case T_MNEM_mov:
16782 case T_MNEM_movs:
16783 case T_MNEM_cmp:
16784 case T_MNEM_cmn:
16785 newsize = relax_immediate(fragp, 8, 0);
16786 break;
16787 case T_MNEM_b:
16788 newsize = relax_branch(fragp, sec, 11, stretch);
16789 break;
16790 case T_MNEM_bcond:
16791 newsize = relax_branch(fragp, sec, 8, stretch);
16792 break;
16793 case T_MNEM_add_sp:
16794 case T_MNEM_add_pc:
16795 newsize = relax_immediate (fragp, 8, 2);
16796 break;
16797 case T_MNEM_inc_sp:
16798 case T_MNEM_dec_sp:
16799 newsize = relax_immediate (fragp, 7, 2);
16800 break;
16801 case T_MNEM_addi:
16802 case T_MNEM_addis:
16803 case T_MNEM_subi:
16804 case T_MNEM_subis:
16805 newsize = relax_addsub (fragp, sec);
16806 break;
16807 default:
16808 abort();
16809 }
16810
16811 fragp->fr_var = newsize;
16812 /* Freeze wide instructions that are at or before the same location as
16813 in the previous pass. This avoids infinite loops.
16814 Don't freeze them unconditionally because targets may be artificialy
16815 misaligned by the expansion of preceeding frags. */
16816 if (stretch <= 0 && newsize > 2)
16817 {
16818 md_convert_frag (sec->owner, sec, fragp);
16819 frag_wane(fragp);
16820 }
16821
16822 return newsize - oldsize;
16823 }
16824
16825 /* Round up a section size to the appropriate boundary. */
16826
16827 valueT
16828 md_section_align (segT segment ATTRIBUTE_UNUSED,
16829 valueT size)
16830 {
16831 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16832 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
16833 {
16834 /* For a.out, force the section size to be aligned. If we don't do
16835 this, BFD will align it for us, but it will not write out the
16836 final bytes of the section. This may be a bug in BFD, but it is
16837 easier to fix it here since that is how the other a.out targets
16838 work. */
16839 int align;
16840
16841 align = bfd_get_section_alignment (stdoutput, segment);
16842 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
16843 }
16844 #endif
16845
16846 return size;
16847 }
16848
16849 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16850 of an rs_align_code fragment. */
16851
16852 void
16853 arm_handle_align (fragS * fragP)
16854 {
16855 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16856 static char const thumb_noop[2] = { 0xc0, 0x46 };
16857 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16858 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16859
16860 int bytes, fix, noop_size;
16861 char * p;
16862 const char * noop;
16863
16864 if (fragP->fr_type != rs_align_code)
16865 return;
16866
16867 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16868 p = fragP->fr_literal + fragP->fr_fix;
16869 fix = 0;
16870
16871 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16872 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16873
16874 if (fragP->tc_frag_data)
16875 {
16876 if (target_big_endian)
16877 noop = thumb_bigend_noop;
16878 else
16879 noop = thumb_noop;
16880 noop_size = sizeof (thumb_noop);
16881 }
16882 else
16883 {
16884 if (target_big_endian)
16885 noop = arm_bigend_noop;
16886 else
16887 noop = arm_noop;
16888 noop_size = sizeof (arm_noop);
16889 }
16890
16891 if (bytes & (noop_size - 1))
16892 {
16893 fix = bytes & (noop_size - 1);
16894 memset (p, 0, fix);
16895 p += fix;
16896 bytes -= fix;
16897 }
16898
16899 while (bytes >= noop_size)
16900 {
16901 memcpy (p, noop, noop_size);
16902 p += noop_size;
16903 bytes -= noop_size;
16904 fix += noop_size;
16905 }
16906
16907 fragP->fr_fix += fix;
16908 fragP->fr_var = noop_size;
16909 }
16910
16911 /* Called from md_do_align. Used to create an alignment
16912 frag in a code section. */
16913
16914 void
16915 arm_frag_align_code (int n, int max)
16916 {
16917 char * p;
16918
16919 /* We assume that there will never be a requirement
16920 to support alignments greater than 32 bytes. */
16921 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16922 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16923
16924 p = frag_var (rs_align_code,
16925 MAX_MEM_FOR_RS_ALIGN_CODE,
16926 1,
16927 (relax_substateT) max,
16928 (symbolS *) NULL,
16929 (offsetT) n,
16930 (char *) NULL);
16931 *p = 0;
16932 }
16933
16934 /* Perform target specific initialisation of a frag. */
16935
16936 void
16937 arm_init_frag (fragS * fragP)
16938 {
16939 /* Record whether this frag is in an ARM or a THUMB area. */
16940 fragP->tc_frag_data = thumb_mode;
16941 }
16942
16943 #ifdef OBJ_ELF
16944 /* When we change sections we need to issue a new mapping symbol. */
16945
16946 void
16947 arm_elf_change_section (void)
16948 {
16949 flagword flags;
16950 segment_info_type *seginfo;
16951
16952 /* Link an unlinked unwind index table section to the .text section. */
16953 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16954 && elf_linked_to_section (now_seg) == NULL)
16955 elf_linked_to_section (now_seg) = text_section;
16956
16957 if (!SEG_NORMAL (now_seg))
16958 return;
16959
16960 flags = bfd_get_section_flags (stdoutput, now_seg);
16961
16962 /* We can ignore sections that only contain debug info. */
16963 if ((flags & SEC_ALLOC) == 0)
16964 return;
16965
16966 seginfo = seg_info (now_seg);
16967 mapstate = seginfo->tc_segment_info_data.mapstate;
16968 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16969 }
16970
16971 int
16972 arm_elf_section_type (const char * str, size_t len)
16973 {
16974 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16975 return SHT_ARM_EXIDX;
16976
16977 return -1;
16978 }
16979 \f
16980 /* Code to deal with unwinding tables. */
16981
16982 static void add_unwind_adjustsp (offsetT);
16983
16984 /* Cenerate and deferred unwind frame offset. */
16985
16986 static void
16987 flush_pending_unwind (void)
16988 {
16989 offsetT offset;
16990
16991 offset = unwind.pending_offset;
16992 unwind.pending_offset = 0;
16993 if (offset != 0)
16994 add_unwind_adjustsp (offset);
16995 }
16996
16997 /* Add an opcode to this list for this function. Two-byte opcodes should
16998 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16999 order. */
17000
17001 static void
17002 add_unwind_opcode (valueT op, int length)
17003 {
17004 /* Add any deferred stack adjustment. */
17005 if (unwind.pending_offset)
17006 flush_pending_unwind ();
17007
17008 unwind.sp_restored = 0;
17009
17010 if (unwind.opcode_count + length > unwind.opcode_alloc)
17011 {
17012 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
17013 if (unwind.opcodes)
17014 unwind.opcodes = xrealloc (unwind.opcodes,
17015 unwind.opcode_alloc);
17016 else
17017 unwind.opcodes = xmalloc (unwind.opcode_alloc);
17018 }
17019 while (length > 0)
17020 {
17021 length--;
17022 unwind.opcodes[unwind.opcode_count] = op & 0xff;
17023 op >>= 8;
17024 unwind.opcode_count++;
17025 }
17026 }
17027
17028 /* Add unwind opcodes to adjust the stack pointer. */
17029
17030 static void
17031 add_unwind_adjustsp (offsetT offset)
17032 {
17033 valueT op;
17034
17035 if (offset > 0x200)
17036 {
17037 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17038 char bytes[5];
17039 int n;
17040 valueT o;
17041
17042 /* Long form: 0xb2, uleb128. */
17043 /* This might not fit in a word so add the individual bytes,
17044 remembering the list is built in reverse order. */
17045 o = (valueT) ((offset - 0x204) >> 2);
17046 if (o == 0)
17047 add_unwind_opcode (0, 1);
17048
17049 /* Calculate the uleb128 encoding of the offset. */
17050 n = 0;
17051 while (o)
17052 {
17053 bytes[n] = o & 0x7f;
17054 o >>= 7;
17055 if (o)
17056 bytes[n] |= 0x80;
17057 n++;
17058 }
17059 /* Add the insn. */
17060 for (; n; n--)
17061 add_unwind_opcode (bytes[n - 1], 1);
17062 add_unwind_opcode (0xb2, 1);
17063 }
17064 else if (offset > 0x100)
17065 {
17066 /* Two short opcodes. */
17067 add_unwind_opcode (0x3f, 1);
17068 op = (offset - 0x104) >> 2;
17069 add_unwind_opcode (op, 1);
17070 }
17071 else if (offset > 0)
17072 {
17073 /* Short opcode. */
17074 op = (offset - 4) >> 2;
17075 add_unwind_opcode (op, 1);
17076 }
17077 else if (offset < 0)
17078 {
17079 offset = -offset;
17080 while (offset > 0x100)
17081 {
17082 add_unwind_opcode (0x7f, 1);
17083 offset -= 0x100;
17084 }
17085 op = ((offset - 4) >> 2) | 0x40;
17086 add_unwind_opcode (op, 1);
17087 }
17088 }
17089
17090 /* Finish the list of unwind opcodes for this function. */
17091 static void
17092 finish_unwind_opcodes (void)
17093 {
17094 valueT op;
17095
17096 if (unwind.fp_used)
17097 {
17098 /* Adjust sp as necessary. */
17099 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
17100 flush_pending_unwind ();
17101
17102 /* After restoring sp from the frame pointer. */
17103 op = 0x90 | unwind.fp_reg;
17104 add_unwind_opcode (op, 1);
17105 }
17106 else
17107 flush_pending_unwind ();
17108 }
17109
17110
17111 /* Start an exception table entry. If idx is nonzero this is an index table
17112 entry. */
17113
17114 static void
17115 start_unwind_section (const segT text_seg, int idx)
17116 {
17117 const char * text_name;
17118 const char * prefix;
17119 const char * prefix_once;
17120 const char * group_name;
17121 size_t prefix_len;
17122 size_t text_len;
17123 char * sec_name;
17124 size_t sec_name_len;
17125 int type;
17126 int flags;
17127 int linkonce;
17128
17129 if (idx)
17130 {
17131 prefix = ELF_STRING_ARM_unwind;
17132 prefix_once = ELF_STRING_ARM_unwind_once;
17133 type = SHT_ARM_EXIDX;
17134 }
17135 else
17136 {
17137 prefix = ELF_STRING_ARM_unwind_info;
17138 prefix_once = ELF_STRING_ARM_unwind_info_once;
17139 type = SHT_PROGBITS;
17140 }
17141
17142 text_name = segment_name (text_seg);
17143 if (streq (text_name, ".text"))
17144 text_name = "";
17145
17146 if (strncmp (text_name, ".gnu.linkonce.t.",
17147 strlen (".gnu.linkonce.t.")) == 0)
17148 {
17149 prefix = prefix_once;
17150 text_name += strlen (".gnu.linkonce.t.");
17151 }
17152
17153 prefix_len = strlen (prefix);
17154 text_len = strlen (text_name);
17155 sec_name_len = prefix_len + text_len;
17156 sec_name = xmalloc (sec_name_len + 1);
17157 memcpy (sec_name, prefix, prefix_len);
17158 memcpy (sec_name + prefix_len, text_name, text_len);
17159 sec_name[prefix_len + text_len] = '\0';
17160
17161 flags = SHF_ALLOC;
17162 linkonce = 0;
17163 group_name = 0;
17164
17165 /* Handle COMDAT group. */
17166 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
17167 {
17168 group_name = elf_group_name (text_seg);
17169 if (group_name == NULL)
17170 {
17171 as_bad ("Group section `%s' has no group signature",
17172 segment_name (text_seg));
17173 ignore_rest_of_line ();
17174 return;
17175 }
17176 flags |= SHF_GROUP;
17177 linkonce = 1;
17178 }
17179
17180 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
17181
17182 /* Set the setion link for index tables. */
17183 if (idx)
17184 elf_linked_to_section (now_seg) = text_seg;
17185 }
17186
17187
17188 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17189 personality routine data. Returns zero, or the index table value for
17190 and inline entry. */
17191
17192 static valueT
17193 create_unwind_entry (int have_data)
17194 {
17195 int size;
17196 addressT where;
17197 char *ptr;
17198 /* The current word of data. */
17199 valueT data;
17200 /* The number of bytes left in this word. */
17201 int n;
17202
17203 finish_unwind_opcodes ();
17204
17205 /* Remember the current text section. */
17206 unwind.saved_seg = now_seg;
17207 unwind.saved_subseg = now_subseg;
17208
17209 start_unwind_section (now_seg, 0);
17210
17211 if (unwind.personality_routine == NULL)
17212 {
17213 if (unwind.personality_index == -2)
17214 {
17215 if (have_data)
17216 as_bad (_("handerdata in cantunwind frame"));
17217 return 1; /* EXIDX_CANTUNWIND. */
17218 }
17219
17220 /* Use a default personality routine if none is specified. */
17221 if (unwind.personality_index == -1)
17222 {
17223 if (unwind.opcode_count > 3)
17224 unwind.personality_index = 1;
17225 else
17226 unwind.personality_index = 0;
17227 }
17228
17229 /* Space for the personality routine entry. */
17230 if (unwind.personality_index == 0)
17231 {
17232 if (unwind.opcode_count > 3)
17233 as_bad (_("too many unwind opcodes for personality routine 0"));
17234
17235 if (!have_data)
17236 {
17237 /* All the data is inline in the index table. */
17238 data = 0x80;
17239 n = 3;
17240 while (unwind.opcode_count > 0)
17241 {
17242 unwind.opcode_count--;
17243 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17244 n--;
17245 }
17246
17247 /* Pad with "finish" opcodes. */
17248 while (n--)
17249 data = (data << 8) | 0xb0;
17250
17251 return data;
17252 }
17253 size = 0;
17254 }
17255 else
17256 /* We get two opcodes "free" in the first word. */
17257 size = unwind.opcode_count - 2;
17258 }
17259 else
17260 /* An extra byte is required for the opcode count. */
17261 size = unwind.opcode_count + 1;
17262
17263 size = (size + 3) >> 2;
17264 if (size > 0xff)
17265 as_bad (_("too many unwind opcodes"));
17266
17267 frag_align (2, 0, 0);
17268 record_alignment (now_seg, 2);
17269 unwind.table_entry = expr_build_dot ();
17270
17271 /* Allocate the table entry. */
17272 ptr = frag_more ((size << 2) + 4);
17273 where = frag_now_fix () - ((size << 2) + 4);
17274
17275 switch (unwind.personality_index)
17276 {
17277 case -1:
17278 /* ??? Should this be a PLT generating relocation? */
17279 /* Custom personality routine. */
17280 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17281 BFD_RELOC_ARM_PREL31);
17282
17283 where += 4;
17284 ptr += 4;
17285
17286 /* Set the first byte to the number of additional words. */
17287 data = size - 1;
17288 n = 3;
17289 break;
17290
17291 /* ABI defined personality routines. */
17292 case 0:
17293 /* Three opcodes bytes are packed into the first word. */
17294 data = 0x80;
17295 n = 3;
17296 break;
17297
17298 case 1:
17299 case 2:
17300 /* The size and first two opcode bytes go in the first word. */
17301 data = ((0x80 + unwind.personality_index) << 8) | size;
17302 n = 2;
17303 break;
17304
17305 default:
17306 /* Should never happen. */
17307 abort ();
17308 }
17309
17310 /* Pack the opcodes into words (MSB first), reversing the list at the same
17311 time. */
17312 while (unwind.opcode_count > 0)
17313 {
17314 if (n == 0)
17315 {
17316 md_number_to_chars (ptr, data, 4);
17317 ptr += 4;
17318 n = 4;
17319 data = 0;
17320 }
17321 unwind.opcode_count--;
17322 n--;
17323 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17324 }
17325
17326 /* Finish off the last word. */
17327 if (n < 4)
17328 {
17329 /* Pad with "finish" opcodes. */
17330 while (n--)
17331 data = (data << 8) | 0xb0;
17332
17333 md_number_to_chars (ptr, data, 4);
17334 }
17335
17336 if (!have_data)
17337 {
17338 /* Add an empty descriptor if there is no user-specified data. */
17339 ptr = frag_more (4);
17340 md_number_to_chars (ptr, 0, 4);
17341 }
17342
17343 return 0;
17344 }
17345
17346
17347 /* Initialize the DWARF-2 unwind information for this procedure. */
17348
17349 void
17350 tc_arm_frame_initial_instructions (void)
17351 {
17352 cfi_add_CFA_def_cfa (REG_SP, 0);
17353 }
17354 #endif /* OBJ_ELF */
17355
17356 /* Convert REGNAME to a DWARF-2 register number. */
17357
17358 int
17359 tc_arm_regname_to_dw2regnum (char *regname)
17360 {
17361 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
17362
17363 if (reg == FAIL)
17364 return -1;
17365
17366 return reg;
17367 }
17368
17369 #ifdef TE_PE
17370 void
17371 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
17372 {
17373 expressionS expr;
17374
17375 expr.X_op = O_secrel;
17376 expr.X_add_symbol = symbol;
17377 expr.X_add_number = 0;
17378 emit_expr (&expr, size);
17379 }
17380 #endif
17381
17382 /* MD interface: Symbol and relocation handling. */
17383
17384 /* Return the address within the segment that a PC-relative fixup is
17385 relative to. For ARM, PC-relative fixups applied to instructions
17386 are generally relative to the location of the fixup plus 8 bytes.
17387 Thumb branches are offset by 4, and Thumb loads relative to PC
17388 require special handling. */
17389
17390 long
17391 md_pcrel_from_section (fixS * fixP, segT seg)
17392 {
17393 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17394
17395 /* If this is pc-relative and we are going to emit a relocation
17396 then we just want to put out any pipeline compensation that the linker
17397 will need. Otherwise we want to use the calculated base.
17398 For WinCE we skip the bias for externals as well, since this
17399 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17400 if (fixP->fx_pcrel
17401 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17402 || (arm_force_relocation (fixP)
17403 #ifdef TE_WINCE
17404 && !S_IS_EXTERNAL (fixP->fx_addsy)
17405 #endif
17406 )))
17407 base = 0;
17408
17409 switch (fixP->fx_r_type)
17410 {
17411 /* PC relative addressing on the Thumb is slightly odd as the
17412 bottom two bits of the PC are forced to zero for the
17413 calculation. This happens *after* application of the
17414 pipeline offset. However, Thumb adrl already adjusts for
17415 this, so we need not do it again. */
17416 case BFD_RELOC_ARM_THUMB_ADD:
17417 return base & ~3;
17418
17419 case BFD_RELOC_ARM_THUMB_OFFSET:
17420 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17421 case BFD_RELOC_ARM_T32_ADD_PC12:
17422 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17423 return (base + 4) & ~3;
17424
17425 /* Thumb branches are simply offset by +4. */
17426 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17427 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17428 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17429 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17430 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17431 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17432 case BFD_RELOC_THUMB_PCREL_BLX:
17433 return base + 4;
17434
17435 /* ARM mode branches are offset by +8. However, the Windows CE
17436 loader expects the relocation not to take this into account. */
17437 case BFD_RELOC_ARM_PCREL_BRANCH:
17438 case BFD_RELOC_ARM_PCREL_CALL:
17439 case BFD_RELOC_ARM_PCREL_JUMP:
17440 case BFD_RELOC_ARM_PCREL_BLX:
17441 case BFD_RELOC_ARM_PLT32:
17442 #ifdef TE_WINCE
17443 /* When handling fixups immediately, because we have already
17444 discovered the value of a symbol, or the address of the frag involved
17445 we must account for the offset by +8, as the OS loader will never see the reloc.
17446 see fixup_segment() in write.c
17447 The S_IS_EXTERNAL test handles the case of global symbols.
17448 Those need the calculated base, not just the pipe compensation the linker will need. */
17449 if (fixP->fx_pcrel
17450 && fixP->fx_addsy != NULL
17451 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
17452 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
17453 return base + 8;
17454 return base;
17455 #else
17456 return base + 8;
17457 #endif
17458
17459 /* ARM mode loads relative to PC are also offset by +8. Unlike
17460 branches, the Windows CE loader *does* expect the relocation
17461 to take this into account. */
17462 case BFD_RELOC_ARM_OFFSET_IMM:
17463 case BFD_RELOC_ARM_OFFSET_IMM8:
17464 case BFD_RELOC_ARM_HWLITERAL:
17465 case BFD_RELOC_ARM_LITERAL:
17466 case BFD_RELOC_ARM_CP_OFF_IMM:
17467 return base + 8;
17468
17469
17470 /* Other PC-relative relocations are un-offset. */
17471 default:
17472 return base;
17473 }
17474 }
17475
17476 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17477 Otherwise we have no need to default values of symbols. */
17478
17479 symbolS *
17480 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17481 {
17482 #ifdef OBJ_ELF
17483 if (name[0] == '_' && name[1] == 'G'
17484 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17485 {
17486 if (!GOT_symbol)
17487 {
17488 if (symbol_find (name))
17489 as_bad ("GOT already in the symbol table");
17490
17491 GOT_symbol = symbol_new (name, undefined_section,
17492 (valueT) 0, & zero_address_frag);
17493 }
17494
17495 return GOT_symbol;
17496 }
17497 #endif
17498
17499 return 0;
17500 }
17501
17502 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17503 computed as two separate immediate values, added together. We
17504 already know that this value cannot be computed by just one ARM
17505 instruction. */
17506
17507 static unsigned int
17508 validate_immediate_twopart (unsigned int val,
17509 unsigned int * highpart)
17510 {
17511 unsigned int a;
17512 unsigned int i;
17513
17514 for (i = 0; i < 32; i += 2)
17515 if (((a = rotate_left (val, i)) & 0xff) != 0)
17516 {
17517 if (a & 0xff00)
17518 {
17519 if (a & ~ 0xffff)
17520 continue;
17521 * highpart = (a >> 8) | ((i + 24) << 7);
17522 }
17523 else if (a & 0xff0000)
17524 {
17525 if (a & 0xff000000)
17526 continue;
17527 * highpart = (a >> 16) | ((i + 16) << 7);
17528 }
17529 else
17530 {
17531 assert (a & 0xff000000);
17532 * highpart = (a >> 24) | ((i + 8) << 7);
17533 }
17534
17535 return (a & 0xff) | (i << 7);
17536 }
17537
17538 return FAIL;
17539 }
17540
17541 static int
17542 validate_offset_imm (unsigned int val, int hwse)
17543 {
17544 if ((hwse && val > 255) || val > 4095)
17545 return FAIL;
17546 return val;
17547 }
17548
17549 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17550 negative immediate constant by altering the instruction. A bit of
17551 a hack really.
17552 MOV <-> MVN
17553 AND <-> BIC
17554 ADC <-> SBC
17555 by inverting the second operand, and
17556 ADD <-> SUB
17557 CMP <-> CMN
17558 by negating the second operand. */
17559
17560 static int
17561 negate_data_op (unsigned long * instruction,
17562 unsigned long value)
17563 {
17564 int op, new_inst;
17565 unsigned long negated, inverted;
17566
17567 negated = encode_arm_immediate (-value);
17568 inverted = encode_arm_immediate (~value);
17569
17570 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17571 switch (op)
17572 {
17573 /* First negates. */
17574 case OPCODE_SUB: /* ADD <-> SUB */
17575 new_inst = OPCODE_ADD;
17576 value = negated;
17577 break;
17578
17579 case OPCODE_ADD:
17580 new_inst = OPCODE_SUB;
17581 value = negated;
17582 break;
17583
17584 case OPCODE_CMP: /* CMP <-> CMN */
17585 new_inst = OPCODE_CMN;
17586 value = negated;
17587 break;
17588
17589 case OPCODE_CMN:
17590 new_inst = OPCODE_CMP;
17591 value = negated;
17592 break;
17593
17594 /* Now Inverted ops. */
17595 case OPCODE_MOV: /* MOV <-> MVN */
17596 new_inst = OPCODE_MVN;
17597 value = inverted;
17598 break;
17599
17600 case OPCODE_MVN:
17601 new_inst = OPCODE_MOV;
17602 value = inverted;
17603 break;
17604
17605 case OPCODE_AND: /* AND <-> BIC */
17606 new_inst = OPCODE_BIC;
17607 value = inverted;
17608 break;
17609
17610 case OPCODE_BIC:
17611 new_inst = OPCODE_AND;
17612 value = inverted;
17613 break;
17614
17615 case OPCODE_ADC: /* ADC <-> SBC */
17616 new_inst = OPCODE_SBC;
17617 value = inverted;
17618 break;
17619
17620 case OPCODE_SBC:
17621 new_inst = OPCODE_ADC;
17622 value = inverted;
17623 break;
17624
17625 /* We cannot do anything. */
17626 default:
17627 return FAIL;
17628 }
17629
17630 if (value == (unsigned) FAIL)
17631 return FAIL;
17632
17633 *instruction &= OPCODE_MASK;
17634 *instruction |= new_inst << DATA_OP_SHIFT;
17635 return value;
17636 }
17637
17638 /* Like negate_data_op, but for Thumb-2. */
17639
17640 static unsigned int
17641 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17642 {
17643 int op, new_inst;
17644 int rd;
17645 unsigned int negated, inverted;
17646
17647 negated = encode_thumb32_immediate (-value);
17648 inverted = encode_thumb32_immediate (~value);
17649
17650 rd = (*instruction >> 8) & 0xf;
17651 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17652 switch (op)
17653 {
17654 /* ADD <-> SUB. Includes CMP <-> CMN. */
17655 case T2_OPCODE_SUB:
17656 new_inst = T2_OPCODE_ADD;
17657 value = negated;
17658 break;
17659
17660 case T2_OPCODE_ADD:
17661 new_inst = T2_OPCODE_SUB;
17662 value = negated;
17663 break;
17664
17665 /* ORR <-> ORN. Includes MOV <-> MVN. */
17666 case T2_OPCODE_ORR:
17667 new_inst = T2_OPCODE_ORN;
17668 value = inverted;
17669 break;
17670
17671 case T2_OPCODE_ORN:
17672 new_inst = T2_OPCODE_ORR;
17673 value = inverted;
17674 break;
17675
17676 /* AND <-> BIC. TST has no inverted equivalent. */
17677 case T2_OPCODE_AND:
17678 new_inst = T2_OPCODE_BIC;
17679 if (rd == 15)
17680 value = FAIL;
17681 else
17682 value = inverted;
17683 break;
17684
17685 case T2_OPCODE_BIC:
17686 new_inst = T2_OPCODE_AND;
17687 value = inverted;
17688 break;
17689
17690 /* ADC <-> SBC */
17691 case T2_OPCODE_ADC:
17692 new_inst = T2_OPCODE_SBC;
17693 value = inverted;
17694 break;
17695
17696 case T2_OPCODE_SBC:
17697 new_inst = T2_OPCODE_ADC;
17698 value = inverted;
17699 break;
17700
17701 /* We cannot do anything. */
17702 default:
17703 return FAIL;
17704 }
17705
17706 if (value == (unsigned int)FAIL)
17707 return FAIL;
17708
17709 *instruction &= T2_OPCODE_MASK;
17710 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17711 return value;
17712 }
17713
17714 /* Read a 32-bit thumb instruction from buf. */
17715 static unsigned long
17716 get_thumb32_insn (char * buf)
17717 {
17718 unsigned long insn;
17719 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17720 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17721
17722 return insn;
17723 }
17724
17725
17726 /* We usually want to set the low bit on the address of thumb function
17727 symbols. In particular .word foo - . should have the low bit set.
17728 Generic code tries to fold the difference of two symbols to
17729 a constant. Prevent this and force a relocation when the first symbols
17730 is a thumb function. */
17731 int
17732 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17733 {
17734 if (op == O_subtract
17735 && l->X_op == O_symbol
17736 && r->X_op == O_symbol
17737 && THUMB_IS_FUNC (l->X_add_symbol))
17738 {
17739 l->X_op = O_subtract;
17740 l->X_op_symbol = r->X_add_symbol;
17741 l->X_add_number -= r->X_add_number;
17742 return 1;
17743 }
17744 /* Process as normal. */
17745 return 0;
17746 }
17747
17748 void
17749 md_apply_fix (fixS * fixP,
17750 valueT * valP,
17751 segT seg)
17752 {
17753 offsetT value = * valP;
17754 offsetT newval;
17755 unsigned int newimm;
17756 unsigned long temp;
17757 int sign;
17758 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17759
17760 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17761
17762 /* Note whether this will delete the relocation. */
17763
17764 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17765 fixP->fx_done = 1;
17766
17767 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17768 consistency with the behavior on 32-bit hosts. Remember value
17769 for emit_reloc. */
17770 value &= 0xffffffff;
17771 value ^= 0x80000000;
17772 value -= 0x80000000;
17773
17774 *valP = value;
17775 fixP->fx_addnumber = value;
17776
17777 /* Same treatment for fixP->fx_offset. */
17778 fixP->fx_offset &= 0xffffffff;
17779 fixP->fx_offset ^= 0x80000000;
17780 fixP->fx_offset -= 0x80000000;
17781
17782 switch (fixP->fx_r_type)
17783 {
17784 case BFD_RELOC_NONE:
17785 /* This will need to go in the object file. */
17786 fixP->fx_done = 0;
17787 break;
17788
17789 case BFD_RELOC_ARM_IMMEDIATE:
17790 /* We claim that this fixup has been processed here,
17791 even if in fact we generate an error because we do
17792 not have a reloc for it, so tc_gen_reloc will reject it. */
17793 fixP->fx_done = 1;
17794
17795 if (fixP->fx_addsy
17796 && ! S_IS_DEFINED (fixP->fx_addsy))
17797 {
17798 as_bad_where (fixP->fx_file, fixP->fx_line,
17799 _("undefined symbol %s used as an immediate value"),
17800 S_GET_NAME (fixP->fx_addsy));
17801 break;
17802 }
17803
17804 newimm = encode_arm_immediate (value);
17805 temp = md_chars_to_number (buf, INSN_SIZE);
17806
17807 /* If the instruction will fail, see if we can fix things up by
17808 changing the opcode. */
17809 if (newimm == (unsigned int) FAIL
17810 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17811 {
17812 as_bad_where (fixP->fx_file, fixP->fx_line,
17813 _("invalid constant (%lx) after fixup"),
17814 (unsigned long) value);
17815 break;
17816 }
17817
17818 newimm |= (temp & 0xfffff000);
17819 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17820 break;
17821
17822 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17823 {
17824 unsigned int highpart = 0;
17825 unsigned int newinsn = 0xe1a00000; /* nop. */
17826
17827 newimm = encode_arm_immediate (value);
17828 temp = md_chars_to_number (buf, INSN_SIZE);
17829
17830 /* If the instruction will fail, see if we can fix things up by
17831 changing the opcode. */
17832 if (newimm == (unsigned int) FAIL
17833 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17834 {
17835 /* No ? OK - try using two ADD instructions to generate
17836 the value. */
17837 newimm = validate_immediate_twopart (value, & highpart);
17838
17839 /* Yes - then make sure that the second instruction is
17840 also an add. */
17841 if (newimm != (unsigned int) FAIL)
17842 newinsn = temp;
17843 /* Still No ? Try using a negated value. */
17844 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17845 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17846 /* Otherwise - give up. */
17847 else
17848 {
17849 as_bad_where (fixP->fx_file, fixP->fx_line,
17850 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17851 (long) value);
17852 break;
17853 }
17854
17855 /* Replace the first operand in the 2nd instruction (which
17856 is the PC) with the destination register. We have
17857 already added in the PC in the first instruction and we
17858 do not want to do it again. */
17859 newinsn &= ~ 0xf0000;
17860 newinsn |= ((newinsn & 0x0f000) << 4);
17861 }
17862
17863 newimm |= (temp & 0xfffff000);
17864 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17865
17866 highpart |= (newinsn & 0xfffff000);
17867 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17868 }
17869 break;
17870
17871 case BFD_RELOC_ARM_OFFSET_IMM:
17872 if (!fixP->fx_done && seg->use_rela_p)
17873 value = 0;
17874
17875 case BFD_RELOC_ARM_LITERAL:
17876 sign = value >= 0;
17877
17878 if (value < 0)
17879 value = - value;
17880
17881 if (validate_offset_imm (value, 0) == FAIL)
17882 {
17883 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17884 as_bad_where (fixP->fx_file, fixP->fx_line,
17885 _("invalid literal constant: pool needs to be closer"));
17886 else
17887 as_bad_where (fixP->fx_file, fixP->fx_line,
17888 _("bad immediate value for offset (%ld)"),
17889 (long) value);
17890 break;
17891 }
17892
17893 newval = md_chars_to_number (buf, INSN_SIZE);
17894 newval &= 0xff7ff000;
17895 newval |= value | (sign ? INDEX_UP : 0);
17896 md_number_to_chars (buf, newval, INSN_SIZE);
17897 break;
17898
17899 case BFD_RELOC_ARM_OFFSET_IMM8:
17900 case BFD_RELOC_ARM_HWLITERAL:
17901 sign = value >= 0;
17902
17903 if (value < 0)
17904 value = - value;
17905
17906 if (validate_offset_imm (value, 1) == FAIL)
17907 {
17908 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17909 as_bad_where (fixP->fx_file, fixP->fx_line,
17910 _("invalid literal constant: pool needs to be closer"));
17911 else
17912 as_bad (_("bad immediate value for half-word offset (%ld)"),
17913 (long) value);
17914 break;
17915 }
17916
17917 newval = md_chars_to_number (buf, INSN_SIZE);
17918 newval &= 0xff7ff0f0;
17919 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17920 md_number_to_chars (buf, newval, INSN_SIZE);
17921 break;
17922
17923 case BFD_RELOC_ARM_T32_OFFSET_U8:
17924 if (value < 0 || value > 1020 || value % 4 != 0)
17925 as_bad_where (fixP->fx_file, fixP->fx_line,
17926 _("bad immediate value for offset (%ld)"), (long) value);
17927 value /= 4;
17928
17929 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17930 newval |= value;
17931 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17932 break;
17933
17934 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17935 /* This is a complicated relocation used for all varieties of Thumb32
17936 load/store instruction with immediate offset:
17937
17938 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17939 *4, optional writeback(W)
17940 (doubleword load/store)
17941
17942 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17943 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17944 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17945 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17946 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17947
17948 Uppercase letters indicate bits that are already encoded at
17949 this point. Lowercase letters are our problem. For the
17950 second block of instructions, the secondary opcode nybble
17951 (bits 8..11) is present, and bit 23 is zero, even if this is
17952 a PC-relative operation. */
17953 newval = md_chars_to_number (buf, THUMB_SIZE);
17954 newval <<= 16;
17955 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17956
17957 if ((newval & 0xf0000000) == 0xe0000000)
17958 {
17959 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17960 if (value >= 0)
17961 newval |= (1 << 23);
17962 else
17963 value = -value;
17964 if (value % 4 != 0)
17965 {
17966 as_bad_where (fixP->fx_file, fixP->fx_line,
17967 _("offset not a multiple of 4"));
17968 break;
17969 }
17970 value /= 4;
17971 if (value > 0xff)
17972 {
17973 as_bad_where (fixP->fx_file, fixP->fx_line,
17974 _("offset out of range"));
17975 break;
17976 }
17977 newval &= ~0xff;
17978 }
17979 else if ((newval & 0x000f0000) == 0x000f0000)
17980 {
17981 /* PC-relative, 12-bit offset. */
17982 if (value >= 0)
17983 newval |= (1 << 23);
17984 else
17985 value = -value;
17986 if (value > 0xfff)
17987 {
17988 as_bad_where (fixP->fx_file, fixP->fx_line,
17989 _("offset out of range"));
17990 break;
17991 }
17992 newval &= ~0xfff;
17993 }
17994 else if ((newval & 0x00000100) == 0x00000100)
17995 {
17996 /* Writeback: 8-bit, +/- offset. */
17997 if (value >= 0)
17998 newval |= (1 << 9);
17999 else
18000 value = -value;
18001 if (value > 0xff)
18002 {
18003 as_bad_where (fixP->fx_file, fixP->fx_line,
18004 _("offset out of range"));
18005 break;
18006 }
18007 newval &= ~0xff;
18008 }
18009 else if ((newval & 0x00000f00) == 0x00000e00)
18010 {
18011 /* T-instruction: positive 8-bit offset. */
18012 if (value < 0 || value > 0xff)
18013 {
18014 as_bad_where (fixP->fx_file, fixP->fx_line,
18015 _("offset out of range"));
18016 break;
18017 }
18018 newval &= ~0xff;
18019 newval |= value;
18020 }
18021 else
18022 {
18023 /* Positive 12-bit or negative 8-bit offset. */
18024 int limit;
18025 if (value >= 0)
18026 {
18027 newval |= (1 << 23);
18028 limit = 0xfff;
18029 }
18030 else
18031 {
18032 value = -value;
18033 limit = 0xff;
18034 }
18035 if (value > limit)
18036 {
18037 as_bad_where (fixP->fx_file, fixP->fx_line,
18038 _("offset out of range"));
18039 break;
18040 }
18041 newval &= ~limit;
18042 }
18043
18044 newval |= value;
18045 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
18046 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
18047 break;
18048
18049 case BFD_RELOC_ARM_SHIFT_IMM:
18050 newval = md_chars_to_number (buf, INSN_SIZE);
18051 if (((unsigned long) value) > 32
18052 || (value == 32
18053 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
18054 {
18055 as_bad_where (fixP->fx_file, fixP->fx_line,
18056 _("shift expression is too large"));
18057 break;
18058 }
18059
18060 if (value == 0)
18061 /* Shifts of zero must be done as lsl. */
18062 newval &= ~0x60;
18063 else if (value == 32)
18064 value = 0;
18065 newval &= 0xfffff07f;
18066 newval |= (value & 0x1f) << 7;
18067 md_number_to_chars (buf, newval, INSN_SIZE);
18068 break;
18069
18070 case BFD_RELOC_ARM_T32_IMMEDIATE:
18071 case BFD_RELOC_ARM_T32_ADD_IMM:
18072 case BFD_RELOC_ARM_T32_IMM12:
18073 case BFD_RELOC_ARM_T32_ADD_PC12:
18074 /* We claim that this fixup has been processed here,
18075 even if in fact we generate an error because we do
18076 not have a reloc for it, so tc_gen_reloc will reject it. */
18077 fixP->fx_done = 1;
18078
18079 if (fixP->fx_addsy
18080 && ! S_IS_DEFINED (fixP->fx_addsy))
18081 {
18082 as_bad_where (fixP->fx_file, fixP->fx_line,
18083 _("undefined symbol %s used as an immediate value"),
18084 S_GET_NAME (fixP->fx_addsy));
18085 break;
18086 }
18087
18088 newval = md_chars_to_number (buf, THUMB_SIZE);
18089 newval <<= 16;
18090 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
18091
18092 newimm = FAIL;
18093 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18094 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18095 {
18096 newimm = encode_thumb32_immediate (value);
18097 if (newimm == (unsigned int) FAIL)
18098 newimm = thumb32_negate_data_op (&newval, value);
18099 }
18100 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
18101 && newimm == (unsigned int) FAIL)
18102 {
18103 /* Turn add/sum into addw/subw. */
18104 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
18105 newval = (newval & 0xfeffffff) | 0x02000000;
18106
18107 /* 12 bit immediate for addw/subw. */
18108 if (value < 0)
18109 {
18110 value = -value;
18111 newval ^= 0x00a00000;
18112 }
18113 if (value > 0xfff)
18114 newimm = (unsigned int) FAIL;
18115 else
18116 newimm = value;
18117 }
18118
18119 if (newimm == (unsigned int)FAIL)
18120 {
18121 as_bad_where (fixP->fx_file, fixP->fx_line,
18122 _("invalid constant (%lx) after fixup"),
18123 (unsigned long) value);
18124 break;
18125 }
18126
18127 newval |= (newimm & 0x800) << 15;
18128 newval |= (newimm & 0x700) << 4;
18129 newval |= (newimm & 0x0ff);
18130
18131 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
18132 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
18133 break;
18134
18135 case BFD_RELOC_ARM_SMC:
18136 if (((unsigned long) value) > 0xffff)
18137 as_bad_where (fixP->fx_file, fixP->fx_line,
18138 _("invalid smc expression"));
18139 newval = md_chars_to_number (buf, INSN_SIZE);
18140 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
18141 md_number_to_chars (buf, newval, INSN_SIZE);
18142 break;
18143
18144 case BFD_RELOC_ARM_SWI:
18145 if (fixP->tc_fix_data != 0)
18146 {
18147 if (((unsigned long) value) > 0xff)
18148 as_bad_where (fixP->fx_file, fixP->fx_line,
18149 _("invalid swi expression"));
18150 newval = md_chars_to_number (buf, THUMB_SIZE);
18151 newval |= value;
18152 md_number_to_chars (buf, newval, THUMB_SIZE);
18153 }
18154 else
18155 {
18156 if (((unsigned long) value) > 0x00ffffff)
18157 as_bad_where (fixP->fx_file, fixP->fx_line,
18158 _("invalid swi expression"));
18159 newval = md_chars_to_number (buf, INSN_SIZE);
18160 newval |= value;
18161 md_number_to_chars (buf, newval, INSN_SIZE);
18162 }
18163 break;
18164
18165 case BFD_RELOC_ARM_MULTI:
18166 if (((unsigned long) value) > 0xffff)
18167 as_bad_where (fixP->fx_file, fixP->fx_line,
18168 _("invalid expression in load/store multiple"));
18169 newval = value | md_chars_to_number (buf, INSN_SIZE);
18170 md_number_to_chars (buf, newval, INSN_SIZE);
18171 break;
18172
18173 #ifdef OBJ_ELF
18174 case BFD_RELOC_ARM_PCREL_CALL:
18175 newval = md_chars_to_number (buf, INSN_SIZE);
18176 if ((newval & 0xf0000000) == 0xf0000000)
18177 temp = 1;
18178 else
18179 temp = 3;
18180 goto arm_branch_common;
18181
18182 case BFD_RELOC_ARM_PCREL_JUMP:
18183 case BFD_RELOC_ARM_PLT32:
18184 #endif
18185 case BFD_RELOC_ARM_PCREL_BRANCH:
18186 temp = 3;
18187 goto arm_branch_common;
18188
18189 case BFD_RELOC_ARM_PCREL_BLX:
18190 temp = 1;
18191 arm_branch_common:
18192 /* We are going to store value (shifted right by two) in the
18193 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18194 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18195 also be be clear. */
18196 if (value & temp)
18197 as_bad_where (fixP->fx_file, fixP->fx_line,
18198 _("misaligned branch destination"));
18199 if ((value & (offsetT)0xfe000000) != (offsetT)0
18200 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
18201 as_bad_where (fixP->fx_file, fixP->fx_line,
18202 _("branch out of range"));
18203
18204 if (fixP->fx_done || !seg->use_rela_p)
18205 {
18206 newval = md_chars_to_number (buf, INSN_SIZE);
18207 newval |= (value >> 2) & 0x00ffffff;
18208 /* Set the H bit on BLX instructions. */
18209 if (temp == 1)
18210 {
18211 if (value & 2)
18212 newval |= 0x01000000;
18213 else
18214 newval &= ~0x01000000;
18215 }
18216 md_number_to_chars (buf, newval, INSN_SIZE);
18217 }
18218 break;
18219
18220 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
18221 /* CBZ can only branch forward. */
18222
18223 /* Attempts to use CBZ to branch to the next instruction
18224 (which, strictly speaking, are prohibited) will be turned into
18225 no-ops.
18226
18227 FIXME: It may be better to remove the instruction completely and
18228 perform relaxation. */
18229 if (value == -2)
18230 {
18231 newval = md_chars_to_number (buf, THUMB_SIZE);
18232 newval = 0xbf00; /* NOP encoding T1 */
18233 md_number_to_chars (buf, newval, THUMB_SIZE);
18234 }
18235 else
18236 {
18237 if (value & ~0x7e)
18238 as_bad_where (fixP->fx_file, fixP->fx_line,
18239 _("branch out of range"));
18240
18241 if (fixP->fx_done || !seg->use_rela_p)
18242 {
18243 newval = md_chars_to_number (buf, THUMB_SIZE);
18244 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
18245 md_number_to_chars (buf, newval, THUMB_SIZE);
18246 }
18247 }
18248 break;
18249
18250 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
18251 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
18252 as_bad_where (fixP->fx_file, fixP->fx_line,
18253 _("branch out of range"));
18254
18255 if (fixP->fx_done || !seg->use_rela_p)
18256 {
18257 newval = md_chars_to_number (buf, THUMB_SIZE);
18258 newval |= (value & 0x1ff) >> 1;
18259 md_number_to_chars (buf, newval, THUMB_SIZE);
18260 }
18261 break;
18262
18263 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18264 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18265 as_bad_where (fixP->fx_file, fixP->fx_line,
18266 _("branch out of range"));
18267
18268 if (fixP->fx_done || !seg->use_rela_p)
18269 {
18270 newval = md_chars_to_number (buf, THUMB_SIZE);
18271 newval |= (value & 0xfff) >> 1;
18272 md_number_to_chars (buf, newval, THUMB_SIZE);
18273 }
18274 break;
18275
18276 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18277 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18278 as_bad_where (fixP->fx_file, fixP->fx_line,
18279 _("conditional branch out of range"));
18280
18281 if (fixP->fx_done || !seg->use_rela_p)
18282 {
18283 offsetT newval2;
18284 addressT S, J1, J2, lo, hi;
18285
18286 S = (value & 0x00100000) >> 20;
18287 J2 = (value & 0x00080000) >> 19;
18288 J1 = (value & 0x00040000) >> 18;
18289 hi = (value & 0x0003f000) >> 12;
18290 lo = (value & 0x00000ffe) >> 1;
18291
18292 newval = md_chars_to_number (buf, THUMB_SIZE);
18293 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18294 newval |= (S << 10) | hi;
18295 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18296 md_number_to_chars (buf, newval, THUMB_SIZE);
18297 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18298 }
18299 break;
18300
18301 case BFD_RELOC_THUMB_PCREL_BLX:
18302 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18303 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18304 as_bad_where (fixP->fx_file, fixP->fx_line,
18305 _("branch out of range"));
18306
18307 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18308 /* For a BLX instruction, make sure that the relocation is rounded up
18309 to a word boundary. This follows the semantics of the instruction
18310 which specifies that bit 1 of the target address will come from bit
18311 1 of the base address. */
18312 value = (value + 1) & ~ 1;
18313
18314 if (fixP->fx_done || !seg->use_rela_p)
18315 {
18316 offsetT newval2;
18317
18318 newval = md_chars_to_number (buf, THUMB_SIZE);
18319 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18320 newval |= (value & 0x7fffff) >> 12;
18321 newval2 |= (value & 0xfff) >> 1;
18322 md_number_to_chars (buf, newval, THUMB_SIZE);
18323 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18324 }
18325 break;
18326
18327 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18328 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18329 as_bad_where (fixP->fx_file, fixP->fx_line,
18330 _("branch out of range"));
18331
18332 if (fixP->fx_done || !seg->use_rela_p)
18333 {
18334 offsetT newval2;
18335 addressT S, I1, I2, lo, hi;
18336
18337 S = (value & 0x01000000) >> 24;
18338 I1 = (value & 0x00800000) >> 23;
18339 I2 = (value & 0x00400000) >> 22;
18340 hi = (value & 0x003ff000) >> 12;
18341 lo = (value & 0x00000ffe) >> 1;
18342
18343 I1 = !(I1 ^ S);
18344 I2 = !(I2 ^ S);
18345
18346 newval = md_chars_to_number (buf, THUMB_SIZE);
18347 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18348 newval |= (S << 10) | hi;
18349 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18350 md_number_to_chars (buf, newval, THUMB_SIZE);
18351 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18352 }
18353 break;
18354
18355 case BFD_RELOC_8:
18356 if (fixP->fx_done || !seg->use_rela_p)
18357 md_number_to_chars (buf, value, 1);
18358 break;
18359
18360 case BFD_RELOC_16:
18361 if (fixP->fx_done || !seg->use_rela_p)
18362 md_number_to_chars (buf, value, 2);
18363 break;
18364
18365 #ifdef OBJ_ELF
18366 case BFD_RELOC_ARM_TLS_GD32:
18367 case BFD_RELOC_ARM_TLS_LE32:
18368 case BFD_RELOC_ARM_TLS_IE32:
18369 case BFD_RELOC_ARM_TLS_LDM32:
18370 case BFD_RELOC_ARM_TLS_LDO32:
18371 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18372 /* fall through */
18373
18374 case BFD_RELOC_ARM_GOT32:
18375 case BFD_RELOC_ARM_GOTOFF:
18376 case BFD_RELOC_ARM_TARGET2:
18377 if (fixP->fx_done || !seg->use_rela_p)
18378 md_number_to_chars (buf, 0, 4);
18379 break;
18380 #endif
18381
18382 case BFD_RELOC_RVA:
18383 case BFD_RELOC_32:
18384 case BFD_RELOC_ARM_TARGET1:
18385 case BFD_RELOC_ARM_ROSEGREL32:
18386 case BFD_RELOC_ARM_SBREL32:
18387 case BFD_RELOC_32_PCREL:
18388 #ifdef TE_PE
18389 case BFD_RELOC_32_SECREL:
18390 #endif
18391 if (fixP->fx_done || !seg->use_rela_p)
18392 #ifdef TE_WINCE
18393 /* For WinCE we only do this for pcrel fixups. */
18394 if (fixP->fx_done || fixP->fx_pcrel)
18395 #endif
18396 md_number_to_chars (buf, value, 4);
18397 break;
18398
18399 #ifdef OBJ_ELF
18400 case BFD_RELOC_ARM_PREL31:
18401 if (fixP->fx_done || !seg->use_rela_p)
18402 {
18403 newval = md_chars_to_number (buf, 4) & 0x80000000;
18404 if ((value ^ (value >> 1)) & 0x40000000)
18405 {
18406 as_bad_where (fixP->fx_file, fixP->fx_line,
18407 _("rel31 relocation overflow"));
18408 }
18409 newval |= value & 0x7fffffff;
18410 md_number_to_chars (buf, newval, 4);
18411 }
18412 break;
18413 #endif
18414
18415 case BFD_RELOC_ARM_CP_OFF_IMM:
18416 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18417 if (value < -1023 || value > 1023 || (value & 3))
18418 as_bad_where (fixP->fx_file, fixP->fx_line,
18419 _("co-processor offset out of range"));
18420 cp_off_common:
18421 sign = value >= 0;
18422 if (value < 0)
18423 value = -value;
18424 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18425 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18426 newval = md_chars_to_number (buf, INSN_SIZE);
18427 else
18428 newval = get_thumb32_insn (buf);
18429 newval &= 0xff7fff00;
18430 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18431 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18432 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18433 md_number_to_chars (buf, newval, INSN_SIZE);
18434 else
18435 put_thumb32_insn (buf, newval);
18436 break;
18437
18438 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18439 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18440 if (value < -255 || value > 255)
18441 as_bad_where (fixP->fx_file, fixP->fx_line,
18442 _("co-processor offset out of range"));
18443 value *= 4;
18444 goto cp_off_common;
18445
18446 case BFD_RELOC_ARM_THUMB_OFFSET:
18447 newval = md_chars_to_number (buf, THUMB_SIZE);
18448 /* Exactly what ranges, and where the offset is inserted depends
18449 on the type of instruction, we can establish this from the
18450 top 4 bits. */
18451 switch (newval >> 12)
18452 {
18453 case 4: /* PC load. */
18454 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18455 forced to zero for these loads; md_pcrel_from has already
18456 compensated for this. */
18457 if (value & 3)
18458 as_bad_where (fixP->fx_file, fixP->fx_line,
18459 _("invalid offset, target not word aligned (0x%08lX)"),
18460 (((unsigned long) fixP->fx_frag->fr_address
18461 + (unsigned long) fixP->fx_where) & ~3)
18462 + (unsigned long) value);
18463
18464 if (value & ~0x3fc)
18465 as_bad_where (fixP->fx_file, fixP->fx_line,
18466 _("invalid offset, value too big (0x%08lX)"),
18467 (long) value);
18468
18469 newval |= value >> 2;
18470 break;
18471
18472 case 9: /* SP load/store. */
18473 if (value & ~0x3fc)
18474 as_bad_where (fixP->fx_file, fixP->fx_line,
18475 _("invalid offset, value too big (0x%08lX)"),
18476 (long) value);
18477 newval |= value >> 2;
18478 break;
18479
18480 case 6: /* Word load/store. */
18481 if (value & ~0x7c)
18482 as_bad_where (fixP->fx_file, fixP->fx_line,
18483 _("invalid offset, value too big (0x%08lX)"),
18484 (long) value);
18485 newval |= value << 4; /* 6 - 2. */
18486 break;
18487
18488 case 7: /* Byte load/store. */
18489 if (value & ~0x1f)
18490 as_bad_where (fixP->fx_file, fixP->fx_line,
18491 _("invalid offset, value too big (0x%08lX)"),
18492 (long) value);
18493 newval |= value << 6;
18494 break;
18495
18496 case 8: /* Halfword load/store. */
18497 if (value & ~0x3e)
18498 as_bad_where (fixP->fx_file, fixP->fx_line,
18499 _("invalid offset, value too big (0x%08lX)"),
18500 (long) value);
18501 newval |= value << 5; /* 6 - 1. */
18502 break;
18503
18504 default:
18505 as_bad_where (fixP->fx_file, fixP->fx_line,
18506 "Unable to process relocation for thumb opcode: %lx",
18507 (unsigned long) newval);
18508 break;
18509 }
18510 md_number_to_chars (buf, newval, THUMB_SIZE);
18511 break;
18512
18513 case BFD_RELOC_ARM_THUMB_ADD:
18514 /* This is a complicated relocation, since we use it for all of
18515 the following immediate relocations:
18516
18517 3bit ADD/SUB
18518 8bit ADD/SUB
18519 9bit ADD/SUB SP word-aligned
18520 10bit ADD PC/SP word-aligned
18521
18522 The type of instruction being processed is encoded in the
18523 instruction field:
18524
18525 0x8000 SUB
18526 0x00F0 Rd
18527 0x000F Rs
18528 */
18529 newval = md_chars_to_number (buf, THUMB_SIZE);
18530 {
18531 int rd = (newval >> 4) & 0xf;
18532 int rs = newval & 0xf;
18533 int subtract = !!(newval & 0x8000);
18534
18535 /* Check for HI regs, only very restricted cases allowed:
18536 Adjusting SP, and using PC or SP to get an address. */
18537 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18538 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18539 as_bad_where (fixP->fx_file, fixP->fx_line,
18540 _("invalid Hi register with immediate"));
18541
18542 /* If value is negative, choose the opposite instruction. */
18543 if (value < 0)
18544 {
18545 value = -value;
18546 subtract = !subtract;
18547 if (value < 0)
18548 as_bad_where (fixP->fx_file, fixP->fx_line,
18549 _("immediate value out of range"));
18550 }
18551
18552 if (rd == REG_SP)
18553 {
18554 if (value & ~0x1fc)
18555 as_bad_where (fixP->fx_file, fixP->fx_line,
18556 _("invalid immediate for stack address calculation"));
18557 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18558 newval |= value >> 2;
18559 }
18560 else if (rs == REG_PC || rs == REG_SP)
18561 {
18562 if (subtract || value & ~0x3fc)
18563 as_bad_where (fixP->fx_file, fixP->fx_line,
18564 _("invalid immediate for address calculation (value = 0x%08lX)"),
18565 (unsigned long) value);
18566 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18567 newval |= rd << 8;
18568 newval |= value >> 2;
18569 }
18570 else if (rs == rd)
18571 {
18572 if (value & ~0xff)
18573 as_bad_where (fixP->fx_file, fixP->fx_line,
18574 _("immediate value out of range"));
18575 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18576 newval |= (rd << 8) | value;
18577 }
18578 else
18579 {
18580 if (value & ~0x7)
18581 as_bad_where (fixP->fx_file, fixP->fx_line,
18582 _("immediate value out of range"));
18583 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18584 newval |= rd | (rs << 3) | (value << 6);
18585 }
18586 }
18587 md_number_to_chars (buf, newval, THUMB_SIZE);
18588 break;
18589
18590 case BFD_RELOC_ARM_THUMB_IMM:
18591 newval = md_chars_to_number (buf, THUMB_SIZE);
18592 if (value < 0 || value > 255)
18593 as_bad_where (fixP->fx_file, fixP->fx_line,
18594 _("invalid immediate: %ld is too large"),
18595 (long) value);
18596 newval |= value;
18597 md_number_to_chars (buf, newval, THUMB_SIZE);
18598 break;
18599
18600 case BFD_RELOC_ARM_THUMB_SHIFT:
18601 /* 5bit shift value (0..32). LSL cannot take 32. */
18602 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18603 temp = newval & 0xf800;
18604 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18605 as_bad_where (fixP->fx_file, fixP->fx_line,
18606 _("invalid shift value: %ld"), (long) value);
18607 /* Shifts of zero must be encoded as LSL. */
18608 if (value == 0)
18609 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18610 /* Shifts of 32 are encoded as zero. */
18611 else if (value == 32)
18612 value = 0;
18613 newval |= value << 6;
18614 md_number_to_chars (buf, newval, THUMB_SIZE);
18615 break;
18616
18617 case BFD_RELOC_VTABLE_INHERIT:
18618 case BFD_RELOC_VTABLE_ENTRY:
18619 fixP->fx_done = 0;
18620 return;
18621
18622 case BFD_RELOC_ARM_MOVW:
18623 case BFD_RELOC_ARM_MOVT:
18624 case BFD_RELOC_ARM_THUMB_MOVW:
18625 case BFD_RELOC_ARM_THUMB_MOVT:
18626 if (fixP->fx_done || !seg->use_rela_p)
18627 {
18628 /* REL format relocations are limited to a 16-bit addend. */
18629 if (!fixP->fx_done)
18630 {
18631 if (value < -0x1000 || value > 0xffff)
18632 as_bad_where (fixP->fx_file, fixP->fx_line,
18633 _("offset too big"));
18634 }
18635 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18636 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18637 {
18638 value >>= 16;
18639 }
18640
18641 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18642 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18643 {
18644 newval = get_thumb32_insn (buf);
18645 newval &= 0xfbf08f00;
18646 newval |= (value & 0xf000) << 4;
18647 newval |= (value & 0x0800) << 15;
18648 newval |= (value & 0x0700) << 4;
18649 newval |= (value & 0x00ff);
18650 put_thumb32_insn (buf, newval);
18651 }
18652 else
18653 {
18654 newval = md_chars_to_number (buf, 4);
18655 newval &= 0xfff0f000;
18656 newval |= value & 0x0fff;
18657 newval |= (value & 0xf000) << 4;
18658 md_number_to_chars (buf, newval, 4);
18659 }
18660 }
18661 return;
18662
18663 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18664 case BFD_RELOC_ARM_ALU_PC_G0:
18665 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18666 case BFD_RELOC_ARM_ALU_PC_G1:
18667 case BFD_RELOC_ARM_ALU_PC_G2:
18668 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18669 case BFD_RELOC_ARM_ALU_SB_G0:
18670 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18671 case BFD_RELOC_ARM_ALU_SB_G1:
18672 case BFD_RELOC_ARM_ALU_SB_G2:
18673 assert (!fixP->fx_done);
18674 if (!seg->use_rela_p)
18675 {
18676 bfd_vma insn;
18677 bfd_vma encoded_addend;
18678 bfd_vma addend_abs = abs (value);
18679
18680 /* Check that the absolute value of the addend can be
18681 expressed as an 8-bit constant plus a rotation. */
18682 encoded_addend = encode_arm_immediate (addend_abs);
18683 if (encoded_addend == (unsigned int) FAIL)
18684 as_bad_where (fixP->fx_file, fixP->fx_line,
18685 _("the offset 0x%08lX is not representable"),
18686 addend_abs);
18687
18688 /* Extract the instruction. */
18689 insn = md_chars_to_number (buf, INSN_SIZE);
18690
18691 /* If the addend is positive, use an ADD instruction.
18692 Otherwise use a SUB. Take care not to destroy the S bit. */
18693 insn &= 0xff1fffff;
18694 if (value < 0)
18695 insn |= 1 << 22;
18696 else
18697 insn |= 1 << 23;
18698
18699 /* Place the encoded addend into the first 12 bits of the
18700 instruction. */
18701 insn &= 0xfffff000;
18702 insn |= encoded_addend;
18703
18704 /* Update the instruction. */
18705 md_number_to_chars (buf, insn, INSN_SIZE);
18706 }
18707 break;
18708
18709 case BFD_RELOC_ARM_LDR_PC_G0:
18710 case BFD_RELOC_ARM_LDR_PC_G1:
18711 case BFD_RELOC_ARM_LDR_PC_G2:
18712 case BFD_RELOC_ARM_LDR_SB_G0:
18713 case BFD_RELOC_ARM_LDR_SB_G1:
18714 case BFD_RELOC_ARM_LDR_SB_G2:
18715 assert (!fixP->fx_done);
18716 if (!seg->use_rela_p)
18717 {
18718 bfd_vma insn;
18719 bfd_vma addend_abs = abs (value);
18720
18721 /* Check that the absolute value of the addend can be
18722 encoded in 12 bits. */
18723 if (addend_abs >= 0x1000)
18724 as_bad_where (fixP->fx_file, fixP->fx_line,
18725 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18726 addend_abs);
18727
18728 /* Extract the instruction. */
18729 insn = md_chars_to_number (buf, INSN_SIZE);
18730
18731 /* If the addend is negative, clear bit 23 of the instruction.
18732 Otherwise set it. */
18733 if (value < 0)
18734 insn &= ~(1 << 23);
18735 else
18736 insn |= 1 << 23;
18737
18738 /* Place the absolute value of the addend into the first 12 bits
18739 of the instruction. */
18740 insn &= 0xfffff000;
18741 insn |= addend_abs;
18742
18743 /* Update the instruction. */
18744 md_number_to_chars (buf, insn, INSN_SIZE);
18745 }
18746 break;
18747
18748 case BFD_RELOC_ARM_LDRS_PC_G0:
18749 case BFD_RELOC_ARM_LDRS_PC_G1:
18750 case BFD_RELOC_ARM_LDRS_PC_G2:
18751 case BFD_RELOC_ARM_LDRS_SB_G0:
18752 case BFD_RELOC_ARM_LDRS_SB_G1:
18753 case BFD_RELOC_ARM_LDRS_SB_G2:
18754 assert (!fixP->fx_done);
18755 if (!seg->use_rela_p)
18756 {
18757 bfd_vma insn;
18758 bfd_vma addend_abs = abs (value);
18759
18760 /* Check that the absolute value of the addend can be
18761 encoded in 8 bits. */
18762 if (addend_abs >= 0x100)
18763 as_bad_where (fixP->fx_file, fixP->fx_line,
18764 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18765 addend_abs);
18766
18767 /* Extract the instruction. */
18768 insn = md_chars_to_number (buf, INSN_SIZE);
18769
18770 /* If the addend is negative, clear bit 23 of the instruction.
18771 Otherwise set it. */
18772 if (value < 0)
18773 insn &= ~(1 << 23);
18774 else
18775 insn |= 1 << 23;
18776
18777 /* Place the first four bits of the absolute value of the addend
18778 into the first 4 bits of the instruction, and the remaining
18779 four into bits 8 .. 11. */
18780 insn &= 0xfffff0f0;
18781 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18782
18783 /* Update the instruction. */
18784 md_number_to_chars (buf, insn, INSN_SIZE);
18785 }
18786 break;
18787
18788 case BFD_RELOC_ARM_LDC_PC_G0:
18789 case BFD_RELOC_ARM_LDC_PC_G1:
18790 case BFD_RELOC_ARM_LDC_PC_G2:
18791 case BFD_RELOC_ARM_LDC_SB_G0:
18792 case BFD_RELOC_ARM_LDC_SB_G1:
18793 case BFD_RELOC_ARM_LDC_SB_G2:
18794 assert (!fixP->fx_done);
18795 if (!seg->use_rela_p)
18796 {
18797 bfd_vma insn;
18798 bfd_vma addend_abs = abs (value);
18799
18800 /* Check that the absolute value of the addend is a multiple of
18801 four and, when divided by four, fits in 8 bits. */
18802 if (addend_abs & 0x3)
18803 as_bad_where (fixP->fx_file, fixP->fx_line,
18804 _("bad offset 0x%08lX (must be word-aligned)"),
18805 addend_abs);
18806
18807 if ((addend_abs >> 2) > 0xff)
18808 as_bad_where (fixP->fx_file, fixP->fx_line,
18809 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18810 addend_abs);
18811
18812 /* Extract the instruction. */
18813 insn = md_chars_to_number (buf, INSN_SIZE);
18814
18815 /* If the addend is negative, clear bit 23 of the instruction.
18816 Otherwise set it. */
18817 if (value < 0)
18818 insn &= ~(1 << 23);
18819 else
18820 insn |= 1 << 23;
18821
18822 /* Place the addend (divided by four) into the first eight
18823 bits of the instruction. */
18824 insn &= 0xfffffff0;
18825 insn |= addend_abs >> 2;
18826
18827 /* Update the instruction. */
18828 md_number_to_chars (buf, insn, INSN_SIZE);
18829 }
18830 break;
18831
18832 case BFD_RELOC_UNUSED:
18833 default:
18834 as_bad_where (fixP->fx_file, fixP->fx_line,
18835 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18836 }
18837 }
18838
18839 /* Translate internal representation of relocation info to BFD target
18840 format. */
18841
18842 arelent *
18843 tc_gen_reloc (asection *section, fixS *fixp)
18844 {
18845 arelent * reloc;
18846 bfd_reloc_code_real_type code;
18847
18848 reloc = xmalloc (sizeof (arelent));
18849
18850 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18851 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18852 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18853
18854 if (fixp->fx_pcrel)
18855 {
18856 if (section->use_rela_p)
18857 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18858 else
18859 fixp->fx_offset = reloc->address;
18860 }
18861 reloc->addend = fixp->fx_offset;
18862
18863 switch (fixp->fx_r_type)
18864 {
18865 case BFD_RELOC_8:
18866 if (fixp->fx_pcrel)
18867 {
18868 code = BFD_RELOC_8_PCREL;
18869 break;
18870 }
18871
18872 case BFD_RELOC_16:
18873 if (fixp->fx_pcrel)
18874 {
18875 code = BFD_RELOC_16_PCREL;
18876 break;
18877 }
18878
18879 case BFD_RELOC_32:
18880 if (fixp->fx_pcrel)
18881 {
18882 code = BFD_RELOC_32_PCREL;
18883 break;
18884 }
18885
18886 case BFD_RELOC_ARM_MOVW:
18887 if (fixp->fx_pcrel)
18888 {
18889 code = BFD_RELOC_ARM_MOVW_PCREL;
18890 break;
18891 }
18892
18893 case BFD_RELOC_ARM_MOVT:
18894 if (fixp->fx_pcrel)
18895 {
18896 code = BFD_RELOC_ARM_MOVT_PCREL;
18897 break;
18898 }
18899
18900 case BFD_RELOC_ARM_THUMB_MOVW:
18901 if (fixp->fx_pcrel)
18902 {
18903 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18904 break;
18905 }
18906
18907 case BFD_RELOC_ARM_THUMB_MOVT:
18908 if (fixp->fx_pcrel)
18909 {
18910 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18911 break;
18912 }
18913
18914 case BFD_RELOC_NONE:
18915 case BFD_RELOC_ARM_PCREL_BRANCH:
18916 case BFD_RELOC_ARM_PCREL_BLX:
18917 case BFD_RELOC_RVA:
18918 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18919 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18920 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18921 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18922 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18923 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18924 case BFD_RELOC_THUMB_PCREL_BLX:
18925 case BFD_RELOC_VTABLE_ENTRY:
18926 case BFD_RELOC_VTABLE_INHERIT:
18927 #ifdef TE_PE
18928 case BFD_RELOC_32_SECREL:
18929 #endif
18930 code = fixp->fx_r_type;
18931 break;
18932
18933 case BFD_RELOC_ARM_LITERAL:
18934 case BFD_RELOC_ARM_HWLITERAL:
18935 /* If this is called then the a literal has
18936 been referenced across a section boundary. */
18937 as_bad_where (fixp->fx_file, fixp->fx_line,
18938 _("literal referenced across section boundary"));
18939 return NULL;
18940
18941 #ifdef OBJ_ELF
18942 case BFD_RELOC_ARM_GOT32:
18943 case BFD_RELOC_ARM_GOTOFF:
18944 case BFD_RELOC_ARM_PLT32:
18945 case BFD_RELOC_ARM_TARGET1:
18946 case BFD_RELOC_ARM_ROSEGREL32:
18947 case BFD_RELOC_ARM_SBREL32:
18948 case BFD_RELOC_ARM_PREL31:
18949 case BFD_RELOC_ARM_TARGET2:
18950 case BFD_RELOC_ARM_TLS_LE32:
18951 case BFD_RELOC_ARM_TLS_LDO32:
18952 case BFD_RELOC_ARM_PCREL_CALL:
18953 case BFD_RELOC_ARM_PCREL_JUMP:
18954 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18955 case BFD_RELOC_ARM_ALU_PC_G0:
18956 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18957 case BFD_RELOC_ARM_ALU_PC_G1:
18958 case BFD_RELOC_ARM_ALU_PC_G2:
18959 case BFD_RELOC_ARM_LDR_PC_G0:
18960 case BFD_RELOC_ARM_LDR_PC_G1:
18961 case BFD_RELOC_ARM_LDR_PC_G2:
18962 case BFD_RELOC_ARM_LDRS_PC_G0:
18963 case BFD_RELOC_ARM_LDRS_PC_G1:
18964 case BFD_RELOC_ARM_LDRS_PC_G2:
18965 case BFD_RELOC_ARM_LDC_PC_G0:
18966 case BFD_RELOC_ARM_LDC_PC_G1:
18967 case BFD_RELOC_ARM_LDC_PC_G2:
18968 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18969 case BFD_RELOC_ARM_ALU_SB_G0:
18970 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18971 case BFD_RELOC_ARM_ALU_SB_G1:
18972 case BFD_RELOC_ARM_ALU_SB_G2:
18973 case BFD_RELOC_ARM_LDR_SB_G0:
18974 case BFD_RELOC_ARM_LDR_SB_G1:
18975 case BFD_RELOC_ARM_LDR_SB_G2:
18976 case BFD_RELOC_ARM_LDRS_SB_G0:
18977 case BFD_RELOC_ARM_LDRS_SB_G1:
18978 case BFD_RELOC_ARM_LDRS_SB_G2:
18979 case BFD_RELOC_ARM_LDC_SB_G0:
18980 case BFD_RELOC_ARM_LDC_SB_G1:
18981 case BFD_RELOC_ARM_LDC_SB_G2:
18982 code = fixp->fx_r_type;
18983 break;
18984
18985 case BFD_RELOC_ARM_TLS_GD32:
18986 case BFD_RELOC_ARM_TLS_IE32:
18987 case BFD_RELOC_ARM_TLS_LDM32:
18988 /* BFD will include the symbol's address in the addend.
18989 But we don't want that, so subtract it out again here. */
18990 if (!S_IS_COMMON (fixp->fx_addsy))
18991 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18992 code = fixp->fx_r_type;
18993 break;
18994 #endif
18995
18996 case BFD_RELOC_ARM_IMMEDIATE:
18997 as_bad_where (fixp->fx_file, fixp->fx_line,
18998 _("internal relocation (type: IMMEDIATE) not fixed up"));
18999 return NULL;
19000
19001 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19002 as_bad_where (fixp->fx_file, fixp->fx_line,
19003 _("ADRL used for a symbol not defined in the same file"));
19004 return NULL;
19005
19006 case BFD_RELOC_ARM_OFFSET_IMM:
19007 if (section->use_rela_p)
19008 {
19009 code = fixp->fx_r_type;
19010 break;
19011 }
19012
19013 if (fixp->fx_addsy != NULL
19014 && !S_IS_DEFINED (fixp->fx_addsy)
19015 && S_IS_LOCAL (fixp->fx_addsy))
19016 {
19017 as_bad_where (fixp->fx_file, fixp->fx_line,
19018 _("undefined local label `%s'"),
19019 S_GET_NAME (fixp->fx_addsy));
19020 return NULL;
19021 }
19022
19023 as_bad_where (fixp->fx_file, fixp->fx_line,
19024 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19025 return NULL;
19026
19027 default:
19028 {
19029 char * type;
19030
19031 switch (fixp->fx_r_type)
19032 {
19033 case BFD_RELOC_NONE: type = "NONE"; break;
19034 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
19035 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
19036 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
19037 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
19038 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
19039 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
19040 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
19041 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
19042 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
19043 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
19044 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
19045 default: type = _("<unknown>"); break;
19046 }
19047 as_bad_where (fixp->fx_file, fixp->fx_line,
19048 _("cannot represent %s relocation in this object file format"),
19049 type);
19050 return NULL;
19051 }
19052 }
19053
19054 #ifdef OBJ_ELF
19055 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
19056 && GOT_symbol
19057 && fixp->fx_addsy == GOT_symbol)
19058 {
19059 code = BFD_RELOC_ARM_GOTPC;
19060 reloc->addend = fixp->fx_offset = reloc->address;
19061 }
19062 #endif
19063
19064 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
19065
19066 if (reloc->howto == NULL)
19067 {
19068 as_bad_where (fixp->fx_file, fixp->fx_line,
19069 _("cannot represent %s relocation in this object file format"),
19070 bfd_get_reloc_code_name (code));
19071 return NULL;
19072 }
19073
19074 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19075 vtable entry to be used in the relocation's section offset. */
19076 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19077 reloc->address = fixp->fx_offset;
19078
19079 return reloc;
19080 }
19081
19082 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19083
19084 void
19085 cons_fix_new_arm (fragS * frag,
19086 int where,
19087 int size,
19088 expressionS * exp)
19089 {
19090 bfd_reloc_code_real_type type;
19091 int pcrel = 0;
19092
19093 /* Pick a reloc.
19094 FIXME: @@ Should look at CPU word size. */
19095 switch (size)
19096 {
19097 case 1:
19098 type = BFD_RELOC_8;
19099 break;
19100 case 2:
19101 type = BFD_RELOC_16;
19102 break;
19103 case 4:
19104 default:
19105 type = BFD_RELOC_32;
19106 break;
19107 case 8:
19108 type = BFD_RELOC_64;
19109 break;
19110 }
19111
19112 #ifdef TE_PE
19113 if (exp->X_op == O_secrel)
19114 {
19115 exp->X_op = O_symbol;
19116 type = BFD_RELOC_32_SECREL;
19117 }
19118 #endif
19119
19120 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
19121 }
19122
19123 #if defined OBJ_COFF || defined OBJ_ELF
19124 void
19125 arm_validate_fix (fixS * fixP)
19126 {
19127 /* If the destination of the branch is a defined symbol which does not have
19128 the THUMB_FUNC attribute, then we must be calling a function which has
19129 the (interfacearm) attribute. We look for the Thumb entry point to that
19130 function and change the branch to refer to that function instead. */
19131 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
19132 && fixP->fx_addsy != NULL
19133 && S_IS_DEFINED (fixP->fx_addsy)
19134 && ! THUMB_IS_FUNC (fixP->fx_addsy))
19135 {
19136 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
19137 }
19138 }
19139 #endif
19140
19141 int
19142 arm_force_relocation (struct fix * fixp)
19143 {
19144 #if defined (OBJ_COFF) && defined (TE_PE)
19145 if (fixp->fx_r_type == BFD_RELOC_RVA)
19146 return 1;
19147 #endif
19148
19149 /* Resolve these relocations even if the symbol is extern or weak. */
19150 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
19151 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
19152 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
19153 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
19154 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19155 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
19156 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
19157 return 0;
19158
19159 /* Always leave these relocations for the linker. */
19160 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19161 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19162 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19163 return 1;
19164
19165 /* Always generate relocations against function symbols. */
19166 if (fixp->fx_r_type == BFD_RELOC_32
19167 && fixp->fx_addsy
19168 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
19169 return 1;
19170
19171 return generic_force_reloc (fixp);
19172 }
19173
19174 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19175 /* Relocations against function names must be left unadjusted,
19176 so that the linker can use this information to generate interworking
19177 stubs. The MIPS version of this function
19178 also prevents relocations that are mips-16 specific, but I do not
19179 know why it does this.
19180
19181 FIXME:
19182 There is one other problem that ought to be addressed here, but
19183 which currently is not: Taking the address of a label (rather
19184 than a function) and then later jumping to that address. Such
19185 addresses also ought to have their bottom bit set (assuming that
19186 they reside in Thumb code), but at the moment they will not. */
19187
19188 bfd_boolean
19189 arm_fix_adjustable (fixS * fixP)
19190 {
19191 if (fixP->fx_addsy == NULL)
19192 return 1;
19193
19194 /* Preserve relocations against symbols with function type. */
19195 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
19196 return 0;
19197
19198 if (THUMB_IS_FUNC (fixP->fx_addsy)
19199 && fixP->fx_subsy == NULL)
19200 return 0;
19201
19202 /* We need the symbol name for the VTABLE entries. */
19203 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
19204 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
19205 return 0;
19206
19207 /* Don't allow symbols to be discarded on GOT related relocs. */
19208 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
19209 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
19210 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
19211 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
19212 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
19213 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
19214 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
19215 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
19216 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
19217 return 0;
19218
19219 /* Similarly for group relocations. */
19220 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
19221 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
19222 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
19223 return 0;
19224
19225 return 1;
19226 }
19227 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19228
19229 #ifdef OBJ_ELF
19230
19231 const char *
19232 elf32_arm_target_format (void)
19233 {
19234 #ifdef TE_SYMBIAN
19235 return (target_big_endian
19236 ? "elf32-bigarm-symbian"
19237 : "elf32-littlearm-symbian");
19238 #elif defined (TE_VXWORKS)
19239 return (target_big_endian
19240 ? "elf32-bigarm-vxworks"
19241 : "elf32-littlearm-vxworks");
19242 #else
19243 if (target_big_endian)
19244 return "elf32-bigarm";
19245 else
19246 return "elf32-littlearm";
19247 #endif
19248 }
19249
19250 void
19251 armelf_frob_symbol (symbolS * symp,
19252 int * puntp)
19253 {
19254 elf_frob_symbol (symp, puntp);
19255 }
19256 #endif
19257
19258 /* MD interface: Finalization. */
19259
19260 /* A good place to do this, although this was probably not intended
19261 for this kind of use. We need to dump the literal pool before
19262 references are made to a null symbol pointer. */
19263
19264 void
19265 arm_cleanup (void)
19266 {
19267 literal_pool * pool;
19268
19269 for (pool = list_of_pools; pool; pool = pool->next)
19270 {
19271 /* Put it at the end of the relevent section. */
19272 subseg_set (pool->section, pool->sub_section);
19273 #ifdef OBJ_ELF
19274 arm_elf_change_section ();
19275 #endif
19276 s_ltorg (0);
19277 }
19278 }
19279
19280 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19281 ARM ones. */
19282
19283 void
19284 arm_adjust_symtab (void)
19285 {
19286 #ifdef OBJ_COFF
19287 symbolS * sym;
19288
19289 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19290 {
19291 if (ARM_IS_THUMB (sym))
19292 {
19293 if (THUMB_IS_FUNC (sym))
19294 {
19295 /* Mark the symbol as a Thumb function. */
19296 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19297 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19298 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19299
19300 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19301 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19302 else
19303 as_bad (_("%s: unexpected function type: %d"),
19304 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19305 }
19306 else switch (S_GET_STORAGE_CLASS (sym))
19307 {
19308 case C_EXT:
19309 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19310 break;
19311 case C_STAT:
19312 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19313 break;
19314 case C_LABEL:
19315 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19316 break;
19317 default:
19318 /* Do nothing. */
19319 break;
19320 }
19321 }
19322
19323 if (ARM_IS_INTERWORK (sym))
19324 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19325 }
19326 #endif
19327 #ifdef OBJ_ELF
19328 symbolS * sym;
19329 char bind;
19330
19331 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19332 {
19333 if (ARM_IS_THUMB (sym))
19334 {
19335 elf_symbol_type * elf_sym;
19336
19337 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19338 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19339
19340 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19341 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19342 {
19343 /* If it's a .thumb_func, declare it as so,
19344 otherwise tag label as .code 16. */
19345 if (THUMB_IS_FUNC (sym))
19346 elf_sym->internal_elf_sym.st_info =
19347 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19348 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
19349 elf_sym->internal_elf_sym.st_info =
19350 ELF_ST_INFO (bind, STT_ARM_16BIT);
19351 }
19352 }
19353 }
19354 #endif
19355 }
19356
19357 /* MD interface: Initialization. */
19358
19359 static void
19360 set_constant_flonums (void)
19361 {
19362 int i;
19363
19364 for (i = 0; i < NUM_FLOAT_VALS; i++)
19365 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19366 abort ();
19367 }
19368
19369 /* Auto-select Thumb mode if it's the only available instruction set for the
19370 given architecture. */
19371
19372 static void
19373 autoselect_thumb_from_cpu_variant (void)
19374 {
19375 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19376 opcode_select (16);
19377 }
19378
19379 void
19380 md_begin (void)
19381 {
19382 unsigned mach;
19383 unsigned int i;
19384
19385 if ( (arm_ops_hsh = hash_new ()) == NULL
19386 || (arm_cond_hsh = hash_new ()) == NULL
19387 || (arm_shift_hsh = hash_new ()) == NULL
19388 || (arm_psr_hsh = hash_new ()) == NULL
19389 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19390 || (arm_reg_hsh = hash_new ()) == NULL
19391 || (arm_reloc_hsh = hash_new ()) == NULL
19392 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19393 as_fatal (_("virtual memory exhausted"));
19394
19395 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19396 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19397 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19398 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19399 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19400 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19401 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19402 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19403 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19404 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19405 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19406 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19407 for (i = 0;
19408 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19409 i++)
19410 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19411 (PTR) (barrier_opt_names + i));
19412 #ifdef OBJ_ELF
19413 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19414 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19415 #endif
19416
19417 set_constant_flonums ();
19418
19419 /* Set the cpu variant based on the command-line options. We prefer
19420 -mcpu= over -march= if both are set (as for GCC); and we prefer
19421 -mfpu= over any other way of setting the floating point unit.
19422 Use of legacy options with new options are faulted. */
19423 if (legacy_cpu)
19424 {
19425 if (mcpu_cpu_opt || march_cpu_opt)
19426 as_bad (_("use of old and new-style options to set CPU type"));
19427
19428 mcpu_cpu_opt = legacy_cpu;
19429 }
19430 else if (!mcpu_cpu_opt)
19431 mcpu_cpu_opt = march_cpu_opt;
19432
19433 if (legacy_fpu)
19434 {
19435 if (mfpu_opt)
19436 as_bad (_("use of old and new-style options to set FPU type"));
19437
19438 mfpu_opt = legacy_fpu;
19439 }
19440 else if (!mfpu_opt)
19441 {
19442 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19443 /* Some environments specify a default FPU. If they don't, infer it
19444 from the processor. */
19445 if (mcpu_fpu_opt)
19446 mfpu_opt = mcpu_fpu_opt;
19447 else
19448 mfpu_opt = march_fpu_opt;
19449 #else
19450 mfpu_opt = &fpu_default;
19451 #endif
19452 }
19453
19454 if (!mfpu_opt)
19455 {
19456 if (mcpu_cpu_opt != NULL)
19457 mfpu_opt = &fpu_default;
19458 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19459 mfpu_opt = &fpu_arch_vfp_v2;
19460 else
19461 mfpu_opt = &fpu_arch_fpa;
19462 }
19463
19464 #ifdef CPU_DEFAULT
19465 if (!mcpu_cpu_opt)
19466 {
19467 mcpu_cpu_opt = &cpu_default;
19468 selected_cpu = cpu_default;
19469 }
19470 #else
19471 if (mcpu_cpu_opt)
19472 selected_cpu = *mcpu_cpu_opt;
19473 else
19474 mcpu_cpu_opt = &arm_arch_any;
19475 #endif
19476
19477 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19478
19479 autoselect_thumb_from_cpu_variant ();
19480
19481 arm_arch_used = thumb_arch_used = arm_arch_none;
19482
19483 #if defined OBJ_COFF || defined OBJ_ELF
19484 {
19485 unsigned int flags = 0;
19486
19487 #if defined OBJ_ELF
19488 flags = meabi_flags;
19489
19490 switch (meabi_flags)
19491 {
19492 case EF_ARM_EABI_UNKNOWN:
19493 #endif
19494 /* Set the flags in the private structure. */
19495 if (uses_apcs_26) flags |= F_APCS26;
19496 if (support_interwork) flags |= F_INTERWORK;
19497 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19498 if (pic_code) flags |= F_PIC;
19499 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19500 flags |= F_SOFT_FLOAT;
19501
19502 switch (mfloat_abi_opt)
19503 {
19504 case ARM_FLOAT_ABI_SOFT:
19505 case ARM_FLOAT_ABI_SOFTFP:
19506 flags |= F_SOFT_FLOAT;
19507 break;
19508
19509 case ARM_FLOAT_ABI_HARD:
19510 if (flags & F_SOFT_FLOAT)
19511 as_bad (_("hard-float conflicts with specified fpu"));
19512 break;
19513 }
19514
19515 /* Using pure-endian doubles (even if soft-float). */
19516 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19517 flags |= F_VFP_FLOAT;
19518
19519 #if defined OBJ_ELF
19520 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19521 flags |= EF_ARM_MAVERICK_FLOAT;
19522 break;
19523
19524 case EF_ARM_EABI_VER4:
19525 case EF_ARM_EABI_VER5:
19526 /* No additional flags to set. */
19527 break;
19528
19529 default:
19530 abort ();
19531 }
19532 #endif
19533 bfd_set_private_flags (stdoutput, flags);
19534
19535 /* We have run out flags in the COFF header to encode the
19536 status of ATPCS support, so instead we create a dummy,
19537 empty, debug section called .arm.atpcs. */
19538 if (atpcs)
19539 {
19540 asection * sec;
19541
19542 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19543
19544 if (sec != NULL)
19545 {
19546 bfd_set_section_flags
19547 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19548 bfd_set_section_size (stdoutput, sec, 0);
19549 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19550 }
19551 }
19552 }
19553 #endif
19554
19555 /* Record the CPU type as well. */
19556 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19557 mach = bfd_mach_arm_iWMMXt2;
19558 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19559 mach = bfd_mach_arm_iWMMXt;
19560 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19561 mach = bfd_mach_arm_XScale;
19562 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19563 mach = bfd_mach_arm_ep9312;
19564 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19565 mach = bfd_mach_arm_5TE;
19566 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19567 {
19568 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19569 mach = bfd_mach_arm_5T;
19570 else
19571 mach = bfd_mach_arm_5;
19572 }
19573 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19574 {
19575 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19576 mach = bfd_mach_arm_4T;
19577 else
19578 mach = bfd_mach_arm_4;
19579 }
19580 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19581 mach = bfd_mach_arm_3M;
19582 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19583 mach = bfd_mach_arm_3;
19584 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19585 mach = bfd_mach_arm_2a;
19586 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19587 mach = bfd_mach_arm_2;
19588 else
19589 mach = bfd_mach_arm_unknown;
19590
19591 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19592 }
19593
19594 /* Command line processing. */
19595
19596 /* md_parse_option
19597 Invocation line includes a switch not recognized by the base assembler.
19598 See if it's a processor-specific option.
19599
19600 This routine is somewhat complicated by the need for backwards
19601 compatibility (since older releases of gcc can't be changed).
19602 The new options try to make the interface as compatible as
19603 possible with GCC.
19604
19605 New options (supported) are:
19606
19607 -mcpu=<cpu name> Assemble for selected processor
19608 -march=<architecture name> Assemble for selected architecture
19609 -mfpu=<fpu architecture> Assemble for selected FPU.
19610 -EB/-mbig-endian Big-endian
19611 -EL/-mlittle-endian Little-endian
19612 -k Generate PIC code
19613 -mthumb Start in Thumb mode
19614 -mthumb-interwork Code supports ARM/Thumb interworking
19615
19616 For now we will also provide support for:
19617
19618 -mapcs-32 32-bit Program counter
19619 -mapcs-26 26-bit Program counter
19620 -macps-float Floats passed in FP registers
19621 -mapcs-reentrant Reentrant code
19622 -matpcs
19623 (sometime these will probably be replaced with -mapcs=<list of options>
19624 and -matpcs=<list of options>)
19625
19626 The remaining options are only supported for back-wards compatibility.
19627 Cpu variants, the arm part is optional:
19628 -m[arm]1 Currently not supported.
19629 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19630 -m[arm]3 Arm 3 processor
19631 -m[arm]6[xx], Arm 6 processors
19632 -m[arm]7[xx][t][[d]m] Arm 7 processors
19633 -m[arm]8[10] Arm 8 processors
19634 -m[arm]9[20][tdmi] Arm 9 processors
19635 -mstrongarm[110[0]] StrongARM processors
19636 -mxscale XScale processors
19637 -m[arm]v[2345[t[e]]] Arm architectures
19638 -mall All (except the ARM1)
19639 FP variants:
19640 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19641 -mfpe-old (No float load/store multiples)
19642 -mvfpxd VFP Single precision
19643 -mvfp All VFP
19644 -mno-fpu Disable all floating point instructions
19645
19646 The following CPU names are recognized:
19647 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19648 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19649 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19650 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19651 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19652 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19653 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19654
19655 */
19656
19657 const char * md_shortopts = "m:k";
19658
19659 #ifdef ARM_BI_ENDIAN
19660 #define OPTION_EB (OPTION_MD_BASE + 0)
19661 #define OPTION_EL (OPTION_MD_BASE + 1)
19662 #else
19663 #if TARGET_BYTES_BIG_ENDIAN
19664 #define OPTION_EB (OPTION_MD_BASE + 0)
19665 #else
19666 #define OPTION_EL (OPTION_MD_BASE + 1)
19667 #endif
19668 #endif
19669
19670 struct option md_longopts[] =
19671 {
19672 #ifdef OPTION_EB
19673 {"EB", no_argument, NULL, OPTION_EB},
19674 #endif
19675 #ifdef OPTION_EL
19676 {"EL", no_argument, NULL, OPTION_EL},
19677 #endif
19678 {NULL, no_argument, NULL, 0}
19679 };
19680
19681 size_t md_longopts_size = sizeof (md_longopts);
19682
19683 struct arm_option_table
19684 {
19685 char *option; /* Option name to match. */
19686 char *help; /* Help information. */
19687 int *var; /* Variable to change. */
19688 int value; /* What to change it to. */
19689 char *deprecated; /* If non-null, print this message. */
19690 };
19691
19692 struct arm_option_table arm_opts[] =
19693 {
19694 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19695 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19696 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19697 &support_interwork, 1, NULL},
19698 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19699 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19700 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19701 1, NULL},
19702 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19703 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19704 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19705 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19706 NULL},
19707
19708 /* These are recognized by the assembler, but have no affect on code. */
19709 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19710 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19711 {NULL, NULL, NULL, 0, NULL}
19712 };
19713
19714 struct arm_legacy_option_table
19715 {
19716 char *option; /* Option name to match. */
19717 const arm_feature_set **var; /* Variable to change. */
19718 const arm_feature_set value; /* What to change it to. */
19719 char *deprecated; /* If non-null, print this message. */
19720 };
19721
19722 const struct arm_legacy_option_table arm_legacy_opts[] =
19723 {
19724 /* DON'T add any new processors to this list -- we want the whole list
19725 to go away... Add them to the processors table instead. */
19726 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19727 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19728 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19729 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19730 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19731 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19732 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19733 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19734 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19735 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19736 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19737 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19738 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19739 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19740 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19741 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19742 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19743 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19744 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19745 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19746 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19747 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19748 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19749 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19750 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19751 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19752 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19753 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19754 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19755 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19756 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19757 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19758 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19759 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19760 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19761 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19762 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19763 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19764 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19765 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19766 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19767 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19768 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19769 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19770 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19771 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19772 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19773 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19774 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19775 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19776 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19777 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19778 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19779 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19780 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19781 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19782 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19783 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19784 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19785 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19786 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19787 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19788 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19789 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19790 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19791 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19792 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19793 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19794 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19795 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19796 N_("use -mcpu=strongarm110")},
19797 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19798 N_("use -mcpu=strongarm1100")},
19799 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19800 N_("use -mcpu=strongarm1110")},
19801 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19802 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19803 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19804
19805 /* Architecture variants -- don't add any more to this list either. */
19806 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19807 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19808 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19809 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19810 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19811 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19812 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19813 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19814 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19815 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19816 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19817 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19818 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19819 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19820 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19821 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19822 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19823 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19824
19825 /* Floating point variants -- don't add any more to this list either. */
19826 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19827 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19828 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19829 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19830 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19831
19832 {NULL, NULL, ARM_ARCH_NONE, NULL}
19833 };
19834
19835 struct arm_cpu_option_table
19836 {
19837 char *name;
19838 const arm_feature_set value;
19839 /* For some CPUs we assume an FPU unless the user explicitly sets
19840 -mfpu=... */
19841 const arm_feature_set default_fpu;
19842 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19843 case. */
19844 const char *canonical_name;
19845 };
19846
19847 /* This list should, at a minimum, contain all the cpu names
19848 recognized by GCC. */
19849 static const struct arm_cpu_option_table arm_cpus[] =
19850 {
19851 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19852 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19853 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19854 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19855 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19856 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19857 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19858 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19859 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19860 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19861 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19862 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19863 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19864 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19865 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19866 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19867 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19868 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19869 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19870 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19871 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19872 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19873 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19874 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19875 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19876 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19877 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19878 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19879 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19880 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19881 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19882 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19883 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19884 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19885 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19886 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19887 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19888 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19889 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19890 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19891 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19892 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19893 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19894 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19895 /* For V5 or later processors we default to using VFP; but the user
19896 should really set the FPU type explicitly. */
19897 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19898 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19899 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19900 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19901 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19902 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19903 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19904 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19905 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19906 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19907 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19908 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19909 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19910 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19911 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19912 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19913 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19914 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19915 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19916 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19917 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19918 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19919 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19920 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19921 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19922 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19923 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19924 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19925 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19926 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19927 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19928 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19929 | FPU_NEON_EXT_V1),
19930 NULL},
19931 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19932 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19933 /* ??? XSCALE is really an architecture. */
19934 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19935 /* ??? iwmmxt is not a processor. */
19936 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19937 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19938 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19939 /* Maverick */
19940 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19941 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19942 };
19943
19944 struct arm_arch_option_table
19945 {
19946 char *name;
19947 const arm_feature_set value;
19948 const arm_feature_set default_fpu;
19949 };
19950
19951 /* This list should, at a minimum, contain all the architecture names
19952 recognized by GCC. */
19953 static const struct arm_arch_option_table arm_archs[] =
19954 {
19955 {"all", ARM_ANY, FPU_ARCH_FPA},
19956 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19957 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19958 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19959 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19960 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19961 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19962 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19963 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19964 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19965 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19966 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19967 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19968 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19969 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19970 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19971 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19972 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19973 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19974 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19975 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19976 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19977 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19978 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19979 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19980 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19981 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19982 /* The official spelling of the ARMv7 profile variants is the dashed form.
19983 Accept the non-dashed form for compatibility with old toolchains. */
19984 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19985 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19986 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19987 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19988 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19989 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19990 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19991 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19992 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19993 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19994 };
19995
19996 /* ISA extensions in the co-processor space. */
19997 struct arm_option_cpu_value_table
19998 {
19999 char *name;
20000 const arm_feature_set value;
20001 };
20002
20003 static const struct arm_option_cpu_value_table arm_extensions[] =
20004 {
20005 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
20006 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
20007 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
20008 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
20009 {NULL, ARM_ARCH_NONE}
20010 };
20011
20012 /* This list should, at a minimum, contain all the fpu names
20013 recognized by GCC. */
20014 static const struct arm_option_cpu_value_table arm_fpus[] =
20015 {
20016 {"softfpa", FPU_NONE},
20017 {"fpe", FPU_ARCH_FPE},
20018 {"fpe2", FPU_ARCH_FPE},
20019 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
20020 {"fpa", FPU_ARCH_FPA},
20021 {"fpa10", FPU_ARCH_FPA},
20022 {"fpa11", FPU_ARCH_FPA},
20023 {"arm7500fe", FPU_ARCH_FPA},
20024 {"softvfp", FPU_ARCH_VFP},
20025 {"softvfp+vfp", FPU_ARCH_VFP_V2},
20026 {"vfp", FPU_ARCH_VFP_V2},
20027 {"vfp9", FPU_ARCH_VFP_V2},
20028 {"vfp3", FPU_ARCH_VFP_V3},
20029 {"vfp10", FPU_ARCH_VFP_V2},
20030 {"vfp10-r0", FPU_ARCH_VFP_V1},
20031 {"vfpxd", FPU_ARCH_VFP_V1xD},
20032 {"arm1020t", FPU_ARCH_VFP_V1},
20033 {"arm1020e", FPU_ARCH_VFP_V2},
20034 {"arm1136jfs", FPU_ARCH_VFP_V2},
20035 {"arm1136jf-s", FPU_ARCH_VFP_V2},
20036 {"maverick", FPU_ARCH_MAVERICK},
20037 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
20038 {NULL, ARM_ARCH_NONE}
20039 };
20040
20041 struct arm_option_value_table
20042 {
20043 char *name;
20044 long value;
20045 };
20046
20047 static const struct arm_option_value_table arm_float_abis[] =
20048 {
20049 {"hard", ARM_FLOAT_ABI_HARD},
20050 {"softfp", ARM_FLOAT_ABI_SOFTFP},
20051 {"soft", ARM_FLOAT_ABI_SOFT},
20052 {NULL, 0}
20053 };
20054
20055 #ifdef OBJ_ELF
20056 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20057 static const struct arm_option_value_table arm_eabis[] =
20058 {
20059 {"gnu", EF_ARM_EABI_UNKNOWN},
20060 {"4", EF_ARM_EABI_VER4},
20061 {"5", EF_ARM_EABI_VER5},
20062 {NULL, 0}
20063 };
20064 #endif
20065
20066 struct arm_long_option_table
20067 {
20068 char * option; /* Substring to match. */
20069 char * help; /* Help information. */
20070 int (* func) (char * subopt); /* Function to decode sub-option. */
20071 char * deprecated; /* If non-null, print this message. */
20072 };
20073
20074 static int
20075 arm_parse_extension (char * str, const arm_feature_set **opt_p)
20076 {
20077 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
20078
20079 /* Copy the feature set, so that we can modify it. */
20080 *ext_set = **opt_p;
20081 *opt_p = ext_set;
20082
20083 while (str != NULL && *str != 0)
20084 {
20085 const struct arm_option_cpu_value_table * opt;
20086 char * ext;
20087 int optlen;
20088
20089 if (*str != '+')
20090 {
20091 as_bad (_("invalid architectural extension"));
20092 return 0;
20093 }
20094
20095 str++;
20096 ext = strchr (str, '+');
20097
20098 if (ext != NULL)
20099 optlen = ext - str;
20100 else
20101 optlen = strlen (str);
20102
20103 if (optlen == 0)
20104 {
20105 as_bad (_("missing architectural extension"));
20106 return 0;
20107 }
20108
20109 for (opt = arm_extensions; opt->name != NULL; opt++)
20110 if (strncmp (opt->name, str, optlen) == 0)
20111 {
20112 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
20113 break;
20114 }
20115
20116 if (opt->name == NULL)
20117 {
20118 as_bad (_("unknown architectural extnsion `%s'"), str);
20119 return 0;
20120 }
20121
20122 str = ext;
20123 };
20124
20125 return 1;
20126 }
20127
20128 static int
20129 arm_parse_cpu (char * str)
20130 {
20131 const struct arm_cpu_option_table * opt;
20132 char * ext = strchr (str, '+');
20133 int optlen;
20134
20135 if (ext != NULL)
20136 optlen = ext - str;
20137 else
20138 optlen = strlen (str);
20139
20140 if (optlen == 0)
20141 {
20142 as_bad (_("missing cpu name `%s'"), str);
20143 return 0;
20144 }
20145
20146 for (opt = arm_cpus; opt->name != NULL; opt++)
20147 if (strncmp (opt->name, str, optlen) == 0)
20148 {
20149 mcpu_cpu_opt = &opt->value;
20150 mcpu_fpu_opt = &opt->default_fpu;
20151 if (opt->canonical_name)
20152 strcpy(selected_cpu_name, opt->canonical_name);
20153 else
20154 {
20155 int i;
20156 for (i = 0; i < optlen; i++)
20157 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20158 selected_cpu_name[i] = 0;
20159 }
20160
20161 if (ext != NULL)
20162 return arm_parse_extension (ext, &mcpu_cpu_opt);
20163
20164 return 1;
20165 }
20166
20167 as_bad (_("unknown cpu `%s'"), str);
20168 return 0;
20169 }
20170
20171 static int
20172 arm_parse_arch (char * str)
20173 {
20174 const struct arm_arch_option_table *opt;
20175 char *ext = strchr (str, '+');
20176 int optlen;
20177
20178 if (ext != NULL)
20179 optlen = ext - str;
20180 else
20181 optlen = strlen (str);
20182
20183 if (optlen == 0)
20184 {
20185 as_bad (_("missing architecture name `%s'"), str);
20186 return 0;
20187 }
20188
20189 for (opt = arm_archs; opt->name != NULL; opt++)
20190 if (streq (opt->name, str))
20191 {
20192 march_cpu_opt = &opt->value;
20193 march_fpu_opt = &opt->default_fpu;
20194 strcpy(selected_cpu_name, opt->name);
20195
20196 if (ext != NULL)
20197 return arm_parse_extension (ext, &march_cpu_opt);
20198
20199 return 1;
20200 }
20201
20202 as_bad (_("unknown architecture `%s'\n"), str);
20203 return 0;
20204 }
20205
20206 static int
20207 arm_parse_fpu (char * str)
20208 {
20209 const struct arm_option_cpu_value_table * opt;
20210
20211 for (opt = arm_fpus; opt->name != NULL; opt++)
20212 if (streq (opt->name, str))
20213 {
20214 mfpu_opt = &opt->value;
20215 return 1;
20216 }
20217
20218 as_bad (_("unknown floating point format `%s'\n"), str);
20219 return 0;
20220 }
20221
20222 static int
20223 arm_parse_float_abi (char * str)
20224 {
20225 const struct arm_option_value_table * opt;
20226
20227 for (opt = arm_float_abis; opt->name != NULL; opt++)
20228 if (streq (opt->name, str))
20229 {
20230 mfloat_abi_opt = opt->value;
20231 return 1;
20232 }
20233
20234 as_bad (_("unknown floating point abi `%s'\n"), str);
20235 return 0;
20236 }
20237
20238 #ifdef OBJ_ELF
20239 static int
20240 arm_parse_eabi (char * str)
20241 {
20242 const struct arm_option_value_table *opt;
20243
20244 for (opt = arm_eabis; opt->name != NULL; opt++)
20245 if (streq (opt->name, str))
20246 {
20247 meabi_flags = opt->value;
20248 return 1;
20249 }
20250 as_bad (_("unknown EABI `%s'\n"), str);
20251 return 0;
20252 }
20253 #endif
20254
20255 struct arm_long_option_table arm_long_opts[] =
20256 {
20257 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20258 arm_parse_cpu, NULL},
20259 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20260 arm_parse_arch, NULL},
20261 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20262 arm_parse_fpu, NULL},
20263 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20264 arm_parse_float_abi, NULL},
20265 #ifdef OBJ_ELF
20266 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20267 arm_parse_eabi, NULL},
20268 #endif
20269 {NULL, NULL, 0, NULL}
20270 };
20271
20272 int
20273 md_parse_option (int c, char * arg)
20274 {
20275 struct arm_option_table *opt;
20276 const struct arm_legacy_option_table *fopt;
20277 struct arm_long_option_table *lopt;
20278
20279 switch (c)
20280 {
20281 #ifdef OPTION_EB
20282 case OPTION_EB:
20283 target_big_endian = 1;
20284 break;
20285 #endif
20286
20287 #ifdef OPTION_EL
20288 case OPTION_EL:
20289 target_big_endian = 0;
20290 break;
20291 #endif
20292
20293 case 'a':
20294 /* Listing option. Just ignore these, we don't support additional
20295 ones. */
20296 return 0;
20297
20298 default:
20299 for (opt = arm_opts; opt->option != NULL; opt++)
20300 {
20301 if (c == opt->option[0]
20302 && ((arg == NULL && opt->option[1] == 0)
20303 || streq (arg, opt->option + 1)))
20304 {
20305 #if WARN_DEPRECATED
20306 /* If the option is deprecated, tell the user. */
20307 if (opt->deprecated != NULL)
20308 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20309 arg ? arg : "", _(opt->deprecated));
20310 #endif
20311
20312 if (opt->var != NULL)
20313 *opt->var = opt->value;
20314
20315 return 1;
20316 }
20317 }
20318
20319 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20320 {
20321 if (c == fopt->option[0]
20322 && ((arg == NULL && fopt->option[1] == 0)
20323 || streq (arg, fopt->option + 1)))
20324 {
20325 #if WARN_DEPRECATED
20326 /* If the option is deprecated, tell the user. */
20327 if (fopt->deprecated != NULL)
20328 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20329 arg ? arg : "", _(fopt->deprecated));
20330 #endif
20331
20332 if (fopt->var != NULL)
20333 *fopt->var = &fopt->value;
20334
20335 return 1;
20336 }
20337 }
20338
20339 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20340 {
20341 /* These options are expected to have an argument. */
20342 if (c == lopt->option[0]
20343 && arg != NULL
20344 && strncmp (arg, lopt->option + 1,
20345 strlen (lopt->option + 1)) == 0)
20346 {
20347 #if WARN_DEPRECATED
20348 /* If the option is deprecated, tell the user. */
20349 if (lopt->deprecated != NULL)
20350 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20351 _(lopt->deprecated));
20352 #endif
20353
20354 /* Call the sup-option parser. */
20355 return lopt->func (arg + strlen (lopt->option) - 1);
20356 }
20357 }
20358
20359 return 0;
20360 }
20361
20362 return 1;
20363 }
20364
20365 void
20366 md_show_usage (FILE * fp)
20367 {
20368 struct arm_option_table *opt;
20369 struct arm_long_option_table *lopt;
20370
20371 fprintf (fp, _(" ARM-specific assembler options:\n"));
20372
20373 for (opt = arm_opts; opt->option != NULL; opt++)
20374 if (opt->help != NULL)
20375 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20376
20377 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20378 if (lopt->help != NULL)
20379 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20380
20381 #ifdef OPTION_EB
20382 fprintf (fp, _("\
20383 -EB assemble code for a big-endian cpu\n"));
20384 #endif
20385
20386 #ifdef OPTION_EL
20387 fprintf (fp, _("\
20388 -EL assemble code for a little-endian cpu\n"));
20389 #endif
20390 }
20391
20392
20393 #ifdef OBJ_ELF
20394 typedef struct
20395 {
20396 int val;
20397 arm_feature_set flags;
20398 } cpu_arch_ver_table;
20399
20400 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20401 least features first. */
20402 static const cpu_arch_ver_table cpu_arch_ver[] =
20403 {
20404 {1, ARM_ARCH_V4},
20405 {2, ARM_ARCH_V4T},
20406 {3, ARM_ARCH_V5},
20407 {4, ARM_ARCH_V5TE},
20408 {5, ARM_ARCH_V5TEJ},
20409 {6, ARM_ARCH_V6},
20410 {7, ARM_ARCH_V6Z},
20411 {8, ARM_ARCH_V6K},
20412 {9, ARM_ARCH_V6T2},
20413 {10, ARM_ARCH_V7A},
20414 {10, ARM_ARCH_V7R},
20415 {10, ARM_ARCH_V7M},
20416 {0, ARM_ARCH_NONE}
20417 };
20418
20419 /* Set the public EABI object attributes. */
20420 static void
20421 aeabi_set_public_attributes (void)
20422 {
20423 int arch;
20424 arm_feature_set flags;
20425 arm_feature_set tmp;
20426 const cpu_arch_ver_table *p;
20427
20428 /* Choose the architecture based on the capabilities of the requested cpu
20429 (if any) and/or the instructions actually used. */
20430 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20431 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20432 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20433 /*Allow the user to override the reported architecture. */
20434 if (object_arch)
20435 {
20436 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20437 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20438 }
20439
20440 tmp = flags;
20441 arch = 0;
20442 for (p = cpu_arch_ver; p->val; p++)
20443 {
20444 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20445 {
20446 arch = p->val;
20447 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20448 }
20449 }
20450
20451 /* Tag_CPU_name. */
20452 if (selected_cpu_name[0])
20453 {
20454 char *p;
20455
20456 p = selected_cpu_name;
20457 if (strncmp(p, "armv", 4) == 0)
20458 {
20459 int i;
20460
20461 p += 4;
20462 for (i = 0; p[i]; i++)
20463 p[i] = TOUPPER (p[i]);
20464 }
20465 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20466 }
20467 /* Tag_CPU_arch. */
20468 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20469 /* Tag_CPU_arch_profile. */
20470 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20471 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20472 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20473 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20474 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20475 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20476 /* Tag_ARM_ISA_use. */
20477 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20478 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20479 /* Tag_THUMB_ISA_use. */
20480 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20481 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20482 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20483 /* Tag_VFP_arch. */
20484 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20485 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20486 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20487 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20488 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20489 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20490 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20491 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20492 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20493 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20494 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20495 /* Tag_WMMX_arch. */
20496 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20497 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20498 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20499 /* Tag_NEON_arch. */
20500 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20501 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20502 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20503 }
20504
20505 /* Add the .ARM.attributes section. */
20506 void
20507 arm_md_end (void)
20508 {
20509 segT s;
20510 char *p;
20511 addressT addr;
20512 offsetT size;
20513
20514 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20515 return;
20516
20517 aeabi_set_public_attributes ();
20518 size = elf32_arm_eabi_attr_size (stdoutput);
20519 s = subseg_new (".ARM.attributes", 0);
20520 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20521 addr = frag_now_fix ();
20522 p = frag_more (size);
20523 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20524 }
20525 #endif /* OBJ_ELF */
20526
20527
20528 /* Parse a .cpu directive. */
20529
20530 static void
20531 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20532 {
20533 const struct arm_cpu_option_table *opt;
20534 char *name;
20535 char saved_char;
20536
20537 name = input_line_pointer;
20538 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20539 input_line_pointer++;
20540 saved_char = *input_line_pointer;
20541 *input_line_pointer = 0;
20542
20543 /* Skip the first "all" entry. */
20544 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20545 if (streq (opt->name, name))
20546 {
20547 mcpu_cpu_opt = &opt->value;
20548 selected_cpu = opt->value;
20549 if (opt->canonical_name)
20550 strcpy(selected_cpu_name, opt->canonical_name);
20551 else
20552 {
20553 int i;
20554 for (i = 0; opt->name[i]; i++)
20555 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20556 selected_cpu_name[i] = 0;
20557 }
20558 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20559 *input_line_pointer = saved_char;
20560 demand_empty_rest_of_line ();
20561 return;
20562 }
20563 as_bad (_("unknown cpu `%s'"), name);
20564 *input_line_pointer = saved_char;
20565 ignore_rest_of_line ();
20566 }
20567
20568
20569 /* Parse a .arch directive. */
20570
20571 static void
20572 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20573 {
20574 const struct arm_arch_option_table *opt;
20575 char saved_char;
20576 char *name;
20577
20578 name = input_line_pointer;
20579 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20580 input_line_pointer++;
20581 saved_char = *input_line_pointer;
20582 *input_line_pointer = 0;
20583
20584 /* Skip the first "all" entry. */
20585 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20586 if (streq (opt->name, name))
20587 {
20588 mcpu_cpu_opt = &opt->value;
20589 selected_cpu = opt->value;
20590 strcpy(selected_cpu_name, opt->name);
20591 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20592 *input_line_pointer = saved_char;
20593 demand_empty_rest_of_line ();
20594 return;
20595 }
20596
20597 as_bad (_("unknown architecture `%s'\n"), name);
20598 *input_line_pointer = saved_char;
20599 ignore_rest_of_line ();
20600 }
20601
20602
20603 /* Parse a .object_arch directive. */
20604
20605 static void
20606 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20607 {
20608 const struct arm_arch_option_table *opt;
20609 char saved_char;
20610 char *name;
20611
20612 name = input_line_pointer;
20613 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20614 input_line_pointer++;
20615 saved_char = *input_line_pointer;
20616 *input_line_pointer = 0;
20617
20618 /* Skip the first "all" entry. */
20619 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20620 if (streq (opt->name, name))
20621 {
20622 object_arch = &opt->value;
20623 *input_line_pointer = saved_char;
20624 demand_empty_rest_of_line ();
20625 return;
20626 }
20627
20628 as_bad (_("unknown architecture `%s'\n"), name);
20629 *input_line_pointer = saved_char;
20630 ignore_rest_of_line ();
20631 }
20632
20633
20634 /* Parse a .fpu directive. */
20635
20636 static void
20637 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20638 {
20639 const struct arm_option_cpu_value_table *opt;
20640 char saved_char;
20641 char *name;
20642
20643 name = input_line_pointer;
20644 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20645 input_line_pointer++;
20646 saved_char = *input_line_pointer;
20647 *input_line_pointer = 0;
20648
20649 for (opt = arm_fpus; opt->name != NULL; opt++)
20650 if (streq (opt->name, name))
20651 {
20652 mfpu_opt = &opt->value;
20653 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20654 *input_line_pointer = saved_char;
20655 demand_empty_rest_of_line ();
20656 return;
20657 }
20658
20659 as_bad (_("unknown floating point format `%s'\n"), name);
20660 *input_line_pointer = saved_char;
20661 ignore_rest_of_line ();
20662 }
20663
20664 /* Copy symbol information. */
20665 void
20666 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
20667 {
20668 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
20669 }
This page took 0.494766 seconds and 5 git commands to generate.