2010-01-03 Daniel Gutson <dgutson@codesourcery.com>
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
10
11 This file is part of GAS, the GNU Assembler.
12
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
16 any later version.
17
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
26 02110-1301, USA. */
27
28 #include "as.h"
29 #include <limits.h>
30 #include <stdarg.h>
31 #define NO_RELOC 0
32 #include "safe-ctype.h"
33 #include "subsegs.h"
34 #include "obstack.h"
35
36 #include "opcode/arm.h"
37
38 #ifdef OBJ_ELF
39 #include "elf/arm.h"
40 #include "dw2gencfi.h"
41 #endif
42
43 #include "dwarf2dbg.h"
44
45 #ifdef OBJ_ELF
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
48
49 /* This structure holds the unwinding state. */
50
51 static struct
52 {
53 symbolS * proc_start;
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
58 segT saved_seg;
59 subsegT saved_subseg;
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
62 int opcode_count;
63 int opcode_alloc;
64 /* The number of bytes pushed to the stack. */
65 offsetT frame_size;
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
72 offsetT fp_offset;
73 int fp_reg;
74 /* Nonzero if an unwind_setfp directive has been seen. */
75 unsigned fp_used:1;
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
78 } unwind;
79
80 #endif /* OBJ_ELF */
81
82 /* Results from operand parsing worker functions. */
83
84 typedef enum
85 {
86 PARSE_OPERAND_SUCCESS,
87 PARSE_OPERAND_FAIL,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
90
91 enum arm_float_abi
92 {
93 ARM_FLOAT_ABI_HARD,
94 ARM_FLOAT_ABI_SOFTFP,
95 ARM_FLOAT_ABI_SOFT
96 };
97
98 /* Types of processor to assemble for. */
99 #ifndef CPU_DEFAULT
100 #if defined __XSCALE__
101 #define CPU_DEFAULT ARM_ARCH_XSCALE
102 #else
103 #if defined __thumb__
104 #define CPU_DEFAULT ARM_ARCH_V5T
105 #endif
106 #endif
107 #endif
108
109 #ifndef FPU_DEFAULT
110 # ifdef TE_LINUX
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
113 # ifdef OBJ_ELF
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 # else
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # endif
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 # else
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
124 # endif
125 #endif /* ifndef FPU_DEFAULT */
126
127 #define streq(a, b) (strcmp (a, b) == 0)
128
129 static arm_feature_set cpu_variant;
130 static arm_feature_set arm_arch_used;
131 static arm_feature_set thumb_arch_used;
132
133 /* Flags stored in private area of BFD structure. */
134 static int uses_apcs_26 = FALSE;
135 static int atpcs = FALSE;
136 static int support_interwork = FALSE;
137 static int uses_apcs_float = FALSE;
138 static int pic_code = FALSE;
139 static int fix_v4bx = FALSE;
140 /* Warn on using deprecated features. */
141 static int warn_on_deprecated = TRUE;
142
143
144 /* Variables that we set while parsing command-line options. Once all
145 options have been read we re-process these values to set the real
146 assembly flags. */
147 static const arm_feature_set *legacy_cpu = NULL;
148 static const arm_feature_set *legacy_fpu = NULL;
149
150 static const arm_feature_set *mcpu_cpu_opt = NULL;
151 static const arm_feature_set *mcpu_fpu_opt = NULL;
152 static const arm_feature_set *march_cpu_opt = NULL;
153 static const arm_feature_set *march_fpu_opt = NULL;
154 static const arm_feature_set *mfpu_opt = NULL;
155 static const arm_feature_set *object_arch = NULL;
156
157 /* Constants for known architecture features. */
158 static const arm_feature_set fpu_default = FPU_DEFAULT;
159 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
160 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
161 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
162 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
163 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
164 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
165 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
166 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
167
168 #ifdef CPU_DEFAULT
169 static const arm_feature_set cpu_default = CPU_DEFAULT;
170 #endif
171
172 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
174 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
175 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
176 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
177 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
178 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
179 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v4t_5 =
181 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
182 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
183 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
184 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
185 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
186 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
187 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
188 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
189 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
190 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
191 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
192 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
193 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
194 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
195 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
196 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
197 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
198 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_V7M, 0);
201
202 static const arm_feature_set arm_arch_any = ARM_ANY;
203 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
204 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
205 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
206
207 static const arm_feature_set arm_cext_iwmmxt2 =
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
209 static const arm_feature_set arm_cext_iwmmxt =
210 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
211 static const arm_feature_set arm_cext_xscale =
212 ARM_FEATURE (0, ARM_CEXT_XSCALE);
213 static const arm_feature_set arm_cext_maverick =
214 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
215 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
216 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
217 static const arm_feature_set fpu_vfp_ext_v1xd =
218 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
219 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
220 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
221 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
222 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
223 static const arm_feature_set fpu_vfp_ext_d32 =
224 ARM_FEATURE (0, FPU_VFP_EXT_D32);
225 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
226 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
227 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
228 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
229 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
230 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
231
232 static int mfloat_abi_opt = -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name[16];
237 #ifdef OBJ_ELF
238 # ifdef EABI_DEFAULT
239 static int meabi_flags = EABI_DEFAULT;
240 # else
241 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
242 # endif
243
244 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
245
246 bfd_boolean
247 arm_is_eabi (void)
248 {
249 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
250 }
251 #endif
252
253 #ifdef OBJ_ELF
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS * GOT_symbol;
256 #endif
257
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
261 instructions. */
262 static int thumb_mode = 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
267
268 /* Specifies the intrinsic IT insn behavior mode. */
269 enum implicit_it_mode
270 {
271 IMPLICIT_IT_MODE_NEVER = 0x00,
272 IMPLICIT_IT_MODE_ARM = 0x01,
273 IMPLICIT_IT_MODE_THUMB = 0x02,
274 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
275 };
276 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
277
278 /* If unified_syntax is true, we are processing the new unified
279 ARM/Thumb syntax. Important differences from the old ARM mode:
280
281 - Immediate operands do not require a # prefix.
282 - Conditional affixes always appear at the end of the
283 instruction. (For backward compatibility, those instructions
284 that formerly had them in the middle, continue to accept them
285 there.)
286 - The IT instruction may appear, and if it does is validated
287 against subsequent conditional affixes. It does not generate
288 machine code.
289
290 Important differences from the old Thumb mode:
291
292 - Immediate operands do not require a # prefix.
293 - Most of the V6T2 instructions are only available in unified mode.
294 - The .N and .W suffixes are recognized and honored (it is an error
295 if they cannot be honored).
296 - All instructions set the flags if and only if they have an 's' affix.
297 - Conditional affixes may be used. They are validated against
298 preceding IT instructions. Unlike ARM mode, you cannot use a
299 conditional affix except in the scope of an IT instruction. */
300
301 static bfd_boolean unified_syntax = FALSE;
302
303 enum neon_el_type
304 {
305 NT_invtype,
306 NT_untyped,
307 NT_integer,
308 NT_float,
309 NT_poly,
310 NT_signed,
311 NT_unsigned
312 };
313
314 struct neon_type_el
315 {
316 enum neon_el_type type;
317 unsigned size;
318 };
319
320 #define NEON_MAX_TYPE_ELS 4
321
322 struct neon_type
323 {
324 struct neon_type_el el[NEON_MAX_TYPE_ELS];
325 unsigned elems;
326 };
327
328 enum it_instruction_type
329 {
330 OUTSIDE_IT_INSN,
331 INSIDE_IT_INSN,
332 INSIDE_IT_LAST_INSN,
333 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
334 if inside, should be the last one. */
335 NEUTRAL_IT_INSN, /* This could be either inside or outside,
336 i.e. BKPT and NOP. */
337 IT_INSN /* The IT insn has been parsed. */
338 };
339
340 struct arm_it
341 {
342 const char * error;
343 unsigned long instruction;
344 int size;
345 int size_req;
346 int cond;
347 /* "uncond_value" is set to the value in place of the conditional field in
348 unconditional versions of the instruction, or -1 if nothing is
349 appropriate. */
350 int uncond_value;
351 struct neon_type vectype;
352 /* This does not indicate an actual NEON instruction, only that
353 the mnemonic accepts neon-style type suffixes. */
354 int is_neon;
355 /* Set to the opcode if the instruction needs relaxation.
356 Zero if the instruction is not relaxed. */
357 unsigned long relax;
358 struct
359 {
360 bfd_reloc_code_real_type type;
361 expressionS exp;
362 int pc_rel;
363 } reloc;
364
365 enum it_instruction_type it_insn_type;
366
367 struct
368 {
369 unsigned reg;
370 signed int imm;
371 struct neon_type_el vectype;
372 unsigned present : 1; /* Operand present. */
373 unsigned isreg : 1; /* Operand was a register. */
374 unsigned immisreg : 1; /* .imm field is a second register. */
375 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
376 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
377 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
378 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
379 instructions. This allows us to disambiguate ARM <-> vector insns. */
380 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
381 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
382 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
383 unsigned issingle : 1; /* Operand is VFP single-precision register. */
384 unsigned hasreloc : 1; /* Operand has relocation suffix. */
385 unsigned writeback : 1; /* Operand has trailing ! */
386 unsigned preind : 1; /* Preindexed address. */
387 unsigned postind : 1; /* Postindexed address. */
388 unsigned negative : 1; /* Index register was negated. */
389 unsigned shifted : 1; /* Shift applied to operation. */
390 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
391 } operands[6];
392 };
393
394 static struct arm_it inst;
395
396 #define NUM_FLOAT_VALS 8
397
398 const char * fp_const[] =
399 {
400 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
401 };
402
403 /* Number of littlenums required to hold an extended precision number. */
404 #define MAX_LITTLENUMS 6
405
406 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
407
408 #define FAIL (-1)
409 #define SUCCESS (0)
410
411 #define SUFF_S 1
412 #define SUFF_D 2
413 #define SUFF_E 3
414 #define SUFF_P 4
415
416 #define CP_T_X 0x00008000
417 #define CP_T_Y 0x00400000
418
419 #define CONDS_BIT 0x00100000
420 #define LOAD_BIT 0x00100000
421
422 #define DOUBLE_LOAD_FLAG 0x00000001
423
424 struct asm_cond
425 {
426 const char * template_name;
427 unsigned long value;
428 };
429
430 #define COND_ALWAYS 0xE
431
432 struct asm_psr
433 {
434 const char * template_name;
435 unsigned long field;
436 };
437
438 struct asm_barrier_opt
439 {
440 const char * template_name;
441 unsigned long value;
442 };
443
444 /* The bit that distinguishes CPSR and SPSR. */
445 #define SPSR_BIT (1 << 22)
446
447 /* The individual PSR flag bits. */
448 #define PSR_c (1 << 16)
449 #define PSR_x (1 << 17)
450 #define PSR_s (1 << 18)
451 #define PSR_f (1 << 19)
452
453 struct reloc_entry
454 {
455 char * name;
456 bfd_reloc_code_real_type reloc;
457 };
458
459 enum vfp_reg_pos
460 {
461 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
462 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
463 };
464
465 enum vfp_ldstm_type
466 {
467 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
468 };
469
470 /* Bits for DEFINED field in neon_typed_alias. */
471 #define NTA_HASTYPE 1
472 #define NTA_HASINDEX 2
473
474 struct neon_typed_alias
475 {
476 unsigned char defined;
477 unsigned char index;
478 struct neon_type_el eltype;
479 };
480
481 /* ARM register categories. This includes coprocessor numbers and various
482 architecture extensions' registers. */
483 enum arm_reg_type
484 {
485 REG_TYPE_RN,
486 REG_TYPE_CP,
487 REG_TYPE_CN,
488 REG_TYPE_FN,
489 REG_TYPE_VFS,
490 REG_TYPE_VFD,
491 REG_TYPE_NQ,
492 REG_TYPE_VFSD,
493 REG_TYPE_NDQ,
494 REG_TYPE_NSDQ,
495 REG_TYPE_VFC,
496 REG_TYPE_MVF,
497 REG_TYPE_MVD,
498 REG_TYPE_MVFX,
499 REG_TYPE_MVDX,
500 REG_TYPE_MVAX,
501 REG_TYPE_DSPSC,
502 REG_TYPE_MMXWR,
503 REG_TYPE_MMXWC,
504 REG_TYPE_MMXWCG,
505 REG_TYPE_XSCALE,
506 };
507
508 /* Structure for a hash table entry for a register.
509 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
510 information which states whether a vector type or index is specified (for a
511 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
512 struct reg_entry
513 {
514 const char * name;
515 unsigned char number;
516 unsigned char type;
517 unsigned char builtin;
518 struct neon_typed_alias * neon;
519 };
520
521 /* Diagnostics used when we don't get a register of the expected type. */
522 const char * const reg_expected_msgs[] =
523 {
524 N_("ARM register expected"),
525 N_("bad or missing co-processor number"),
526 N_("co-processor register expected"),
527 N_("FPA register expected"),
528 N_("VFP single precision register expected"),
529 N_("VFP/Neon double precision register expected"),
530 N_("Neon quad precision register expected"),
531 N_("VFP single or double precision register expected"),
532 N_("Neon double or quad precision register expected"),
533 N_("VFP single, double or Neon quad precision register expected"),
534 N_("VFP system register expected"),
535 N_("Maverick MVF register expected"),
536 N_("Maverick MVD register expected"),
537 N_("Maverick MVFX register expected"),
538 N_("Maverick MVDX register expected"),
539 N_("Maverick MVAX register expected"),
540 N_("Maverick DSPSC register expected"),
541 N_("iWMMXt data register expected"),
542 N_("iWMMXt control register expected"),
543 N_("iWMMXt scalar register expected"),
544 N_("XScale accumulator register expected"),
545 };
546
547 /* Some well known registers that we refer to directly elsewhere. */
548 #define REG_SP 13
549 #define REG_LR 14
550 #define REG_PC 15
551
552 /* ARM instructions take 4bytes in the object file, Thumb instructions
553 take 2: */
554 #define INSN_SIZE 4
555
556 struct asm_opcode
557 {
558 /* Basic string to match. */
559 const char * template_name;
560
561 /* Parameters to instruction. */
562 unsigned char operands[8];
563
564 /* Conditional tag - see opcode_lookup. */
565 unsigned int tag : 4;
566
567 /* Basic instruction code. */
568 unsigned int avalue : 28;
569
570 /* Thumb-format instruction code. */
571 unsigned int tvalue;
572
573 /* Which architecture variant provides this instruction. */
574 const arm_feature_set * avariant;
575 const arm_feature_set * tvariant;
576
577 /* Function to call to encode instruction in ARM format. */
578 void (* aencode) (void);
579
580 /* Function to call to encode instruction in Thumb format. */
581 void (* tencode) (void);
582 };
583
584 /* Defines for various bits that we will want to toggle. */
585 #define INST_IMMEDIATE 0x02000000
586 #define OFFSET_REG 0x02000000
587 #define HWOFFSET_IMM 0x00400000
588 #define SHIFT_BY_REG 0x00000010
589 #define PRE_INDEX 0x01000000
590 #define INDEX_UP 0x00800000
591 #define WRITE_BACK 0x00200000
592 #define LDM_TYPE_2_OR_3 0x00400000
593 #define CPSI_MMOD 0x00020000
594
595 #define LITERAL_MASK 0xf000f000
596 #define OPCODE_MASK 0xfe1fffff
597 #define V4_STR_BIT 0x00000020
598
599 #define T2_SUBS_PC_LR 0xf3de8f00
600
601 #define DATA_OP_SHIFT 21
602
603 #define T2_OPCODE_MASK 0xfe1fffff
604 #define T2_DATA_OP_SHIFT 21
605
606 /* Codes to distinguish the arithmetic instructions. */
607 #define OPCODE_AND 0
608 #define OPCODE_EOR 1
609 #define OPCODE_SUB 2
610 #define OPCODE_RSB 3
611 #define OPCODE_ADD 4
612 #define OPCODE_ADC 5
613 #define OPCODE_SBC 6
614 #define OPCODE_RSC 7
615 #define OPCODE_TST 8
616 #define OPCODE_TEQ 9
617 #define OPCODE_CMP 10
618 #define OPCODE_CMN 11
619 #define OPCODE_ORR 12
620 #define OPCODE_MOV 13
621 #define OPCODE_BIC 14
622 #define OPCODE_MVN 15
623
624 #define T2_OPCODE_AND 0
625 #define T2_OPCODE_BIC 1
626 #define T2_OPCODE_ORR 2
627 #define T2_OPCODE_ORN 3
628 #define T2_OPCODE_EOR 4
629 #define T2_OPCODE_ADD 8
630 #define T2_OPCODE_ADC 10
631 #define T2_OPCODE_SBC 11
632 #define T2_OPCODE_SUB 13
633 #define T2_OPCODE_RSB 14
634
635 #define T_OPCODE_MUL 0x4340
636 #define T_OPCODE_TST 0x4200
637 #define T_OPCODE_CMN 0x42c0
638 #define T_OPCODE_NEG 0x4240
639 #define T_OPCODE_MVN 0x43c0
640
641 #define T_OPCODE_ADD_R3 0x1800
642 #define T_OPCODE_SUB_R3 0x1a00
643 #define T_OPCODE_ADD_HI 0x4400
644 #define T_OPCODE_ADD_ST 0xb000
645 #define T_OPCODE_SUB_ST 0xb080
646 #define T_OPCODE_ADD_SP 0xa800
647 #define T_OPCODE_ADD_PC 0xa000
648 #define T_OPCODE_ADD_I8 0x3000
649 #define T_OPCODE_SUB_I8 0x3800
650 #define T_OPCODE_ADD_I3 0x1c00
651 #define T_OPCODE_SUB_I3 0x1e00
652
653 #define T_OPCODE_ASR_R 0x4100
654 #define T_OPCODE_LSL_R 0x4080
655 #define T_OPCODE_LSR_R 0x40c0
656 #define T_OPCODE_ROR_R 0x41c0
657 #define T_OPCODE_ASR_I 0x1000
658 #define T_OPCODE_LSL_I 0x0000
659 #define T_OPCODE_LSR_I 0x0800
660
661 #define T_OPCODE_MOV_I8 0x2000
662 #define T_OPCODE_CMP_I8 0x2800
663 #define T_OPCODE_CMP_LR 0x4280
664 #define T_OPCODE_MOV_HR 0x4600
665 #define T_OPCODE_CMP_HR 0x4500
666
667 #define T_OPCODE_LDR_PC 0x4800
668 #define T_OPCODE_LDR_SP 0x9800
669 #define T_OPCODE_STR_SP 0x9000
670 #define T_OPCODE_LDR_IW 0x6800
671 #define T_OPCODE_STR_IW 0x6000
672 #define T_OPCODE_LDR_IH 0x8800
673 #define T_OPCODE_STR_IH 0x8000
674 #define T_OPCODE_LDR_IB 0x7800
675 #define T_OPCODE_STR_IB 0x7000
676 #define T_OPCODE_LDR_RW 0x5800
677 #define T_OPCODE_STR_RW 0x5000
678 #define T_OPCODE_LDR_RH 0x5a00
679 #define T_OPCODE_STR_RH 0x5200
680 #define T_OPCODE_LDR_RB 0x5c00
681 #define T_OPCODE_STR_RB 0x5400
682
683 #define T_OPCODE_PUSH 0xb400
684 #define T_OPCODE_POP 0xbc00
685
686 #define T_OPCODE_BRANCH 0xe000
687
688 #define THUMB_SIZE 2 /* Size of thumb instruction. */
689 #define THUMB_PP_PC_LR 0x0100
690 #define THUMB_LOAD_BIT 0x0800
691 #define THUMB2_LOAD_BIT 0x00100000
692
693 #define BAD_ARGS _("bad arguments to instruction")
694 #define BAD_SP _("r13 not allowed here")
695 #define BAD_PC _("r15 not allowed here")
696 #define BAD_COND _("instruction cannot be conditional")
697 #define BAD_OVERLAP _("registers may not be the same")
698 #define BAD_HIREG _("lo register required")
699 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
700 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
701 #define BAD_BRANCH _("branch must be last instruction in IT block")
702 #define BAD_NOT_IT _("instruction not allowed in IT block")
703 #define BAD_FPU _("selected FPU does not support instruction")
704 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
705 #define BAD_IT_COND _("incorrect condition in IT block")
706 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
707 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
708
709 static struct hash_control * arm_ops_hsh;
710 static struct hash_control * arm_cond_hsh;
711 static struct hash_control * arm_shift_hsh;
712 static struct hash_control * arm_psr_hsh;
713 static struct hash_control * arm_v7m_psr_hsh;
714 static struct hash_control * arm_reg_hsh;
715 static struct hash_control * arm_reloc_hsh;
716 static struct hash_control * arm_barrier_opt_hsh;
717
718 /* Stuff needed to resolve the label ambiguity
719 As:
720 ...
721 label: <insn>
722 may differ from:
723 ...
724 label:
725 <insn> */
726
727 symbolS * last_label_seen;
728 static int label_is_thumb_function_name = FALSE;
729
730 /* Literal pool structure. Held on a per-section
731 and per-sub-section basis. */
732
733 #define MAX_LITERAL_POOL_SIZE 1024
734 typedef struct literal_pool
735 {
736 expressionS literals [MAX_LITERAL_POOL_SIZE];
737 unsigned int next_free_entry;
738 unsigned int id;
739 symbolS * symbol;
740 segT section;
741 subsegT sub_section;
742 struct literal_pool * next;
743 } literal_pool;
744
745 /* Pointer to a linked list of literal pools. */
746 literal_pool * list_of_pools = NULL;
747
748 #ifdef OBJ_ELF
749 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
750 #else
751 static struct current_it now_it;
752 #endif
753
754 static inline int
755 now_it_compatible (int cond)
756 {
757 return (cond & ~1) == (now_it.cc & ~1);
758 }
759
760 static inline int
761 conditional_insn (void)
762 {
763 return inst.cond != COND_ALWAYS;
764 }
765
766 static int in_it_block (void);
767
768 static int handle_it_state (void);
769
770 static void force_automatic_it_block_close (void);
771
772 static void it_fsm_post_encode (void);
773
774 #define set_it_insn_type(type) \
775 do \
776 { \
777 inst.it_insn_type = type; \
778 if (handle_it_state () == FAIL) \
779 return; \
780 } \
781 while (0)
782
783 #define set_it_insn_type_nonvoid(type, failret) \
784 do \
785 { \
786 inst.it_insn_type = type; \
787 if (handle_it_state () == FAIL) \
788 return failret; \
789 } \
790 while(0)
791
792 #define set_it_insn_type_last() \
793 do \
794 { \
795 if (inst.cond == COND_ALWAYS) \
796 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
797 else \
798 set_it_insn_type (INSIDE_IT_LAST_INSN); \
799 } \
800 while (0)
801
802 /* Pure syntax. */
803
804 /* This array holds the chars that always start a comment. If the
805 pre-processor is disabled, these aren't very useful. */
806 const char comment_chars[] = "@";
807
808 /* This array holds the chars that only start a comment at the beginning of
809 a line. If the line seems to have the form '# 123 filename'
810 .line and .file directives will appear in the pre-processed output. */
811 /* Note that input_file.c hand checks for '#' at the beginning of the
812 first line of the input file. This is because the compiler outputs
813 #NO_APP at the beginning of its output. */
814 /* Also note that comments like this one will always work. */
815 const char line_comment_chars[] = "#";
816
817 const char line_separator_chars[] = ";";
818
819 /* Chars that can be used to separate mant
820 from exp in floating point numbers. */
821 const char EXP_CHARS[] = "eE";
822
823 /* Chars that mean this number is a floating point constant. */
824 /* As in 0f12.456 */
825 /* or 0d1.2345e12 */
826
827 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
828
829 /* Prefix characters that indicate the start of an immediate
830 value. */
831 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
832
833 /* Separator character handling. */
834
835 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
836
837 static inline int
838 skip_past_char (char ** str, char c)
839 {
840 if (**str == c)
841 {
842 (*str)++;
843 return SUCCESS;
844 }
845 else
846 return FAIL;
847 }
848
849 #define skip_past_comma(str) skip_past_char (str, ',')
850
851 /* Arithmetic expressions (possibly involving symbols). */
852
853 /* Return TRUE if anything in the expression is a bignum. */
854
855 static int
856 walk_no_bignums (symbolS * sp)
857 {
858 if (symbol_get_value_expression (sp)->X_op == O_big)
859 return 1;
860
861 if (symbol_get_value_expression (sp)->X_add_symbol)
862 {
863 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
864 || (symbol_get_value_expression (sp)->X_op_symbol
865 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
866 }
867
868 return 0;
869 }
870
871 static int in_my_get_expression = 0;
872
873 /* Third argument to my_get_expression. */
874 #define GE_NO_PREFIX 0
875 #define GE_IMM_PREFIX 1
876 #define GE_OPT_PREFIX 2
877 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
878 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
879 #define GE_OPT_PREFIX_BIG 3
880
881 static int
882 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
883 {
884 char * save_in;
885 segT seg;
886
887 /* In unified syntax, all prefixes are optional. */
888 if (unified_syntax)
889 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
890 : GE_OPT_PREFIX;
891
892 switch (prefix_mode)
893 {
894 case GE_NO_PREFIX: break;
895 case GE_IMM_PREFIX:
896 if (!is_immediate_prefix (**str))
897 {
898 inst.error = _("immediate expression requires a # prefix");
899 return FAIL;
900 }
901 (*str)++;
902 break;
903 case GE_OPT_PREFIX:
904 case GE_OPT_PREFIX_BIG:
905 if (is_immediate_prefix (**str))
906 (*str)++;
907 break;
908 default: abort ();
909 }
910
911 memset (ep, 0, sizeof (expressionS));
912
913 save_in = input_line_pointer;
914 input_line_pointer = *str;
915 in_my_get_expression = 1;
916 seg = expression (ep);
917 in_my_get_expression = 0;
918
919 if (ep->X_op == O_illegal || ep->X_op == O_absent)
920 {
921 /* We found a bad or missing expression in md_operand(). */
922 *str = input_line_pointer;
923 input_line_pointer = save_in;
924 if (inst.error == NULL)
925 inst.error = (ep->X_op == O_absent
926 ? _("missing expression") :_("bad expression"));
927 return 1;
928 }
929
930 #ifdef OBJ_AOUT
931 if (seg != absolute_section
932 && seg != text_section
933 && seg != data_section
934 && seg != bss_section
935 && seg != undefined_section)
936 {
937 inst.error = _("bad segment");
938 *str = input_line_pointer;
939 input_line_pointer = save_in;
940 return 1;
941 }
942 #endif
943
944 /* Get rid of any bignums now, so that we don't generate an error for which
945 we can't establish a line number later on. Big numbers are never valid
946 in instructions, which is where this routine is always called. */
947 if (prefix_mode != GE_OPT_PREFIX_BIG
948 && (ep->X_op == O_big
949 || (ep->X_add_symbol
950 && (walk_no_bignums (ep->X_add_symbol)
951 || (ep->X_op_symbol
952 && walk_no_bignums (ep->X_op_symbol))))))
953 {
954 inst.error = _("invalid constant");
955 *str = input_line_pointer;
956 input_line_pointer = save_in;
957 return 1;
958 }
959
960 *str = input_line_pointer;
961 input_line_pointer = save_in;
962 return 0;
963 }
964
965 /* Turn a string in input_line_pointer into a floating point constant
966 of type TYPE, and store the appropriate bytes in *LITP. The number
967 of LITTLENUMS emitted is stored in *SIZEP. An error message is
968 returned, or NULL on OK.
969
970 Note that fp constants aren't represent in the normal way on the ARM.
971 In big endian mode, things are as expected. However, in little endian
972 mode fp constants are big-endian word-wise, and little-endian byte-wise
973 within the words. For example, (double) 1.1 in big endian mode is
974 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
975 the byte sequence 99 99 f1 3f 9a 99 99 99.
976
977 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
978
979 char *
980 md_atof (int type, char * litP, int * sizeP)
981 {
982 int prec;
983 LITTLENUM_TYPE words[MAX_LITTLENUMS];
984 char *t;
985 int i;
986
987 switch (type)
988 {
989 case 'f':
990 case 'F':
991 case 's':
992 case 'S':
993 prec = 2;
994 break;
995
996 case 'd':
997 case 'D':
998 case 'r':
999 case 'R':
1000 prec = 4;
1001 break;
1002
1003 case 'x':
1004 case 'X':
1005 prec = 5;
1006 break;
1007
1008 case 'p':
1009 case 'P':
1010 prec = 5;
1011 break;
1012
1013 default:
1014 *sizeP = 0;
1015 return _("Unrecognized or unsupported floating point constant");
1016 }
1017
1018 t = atof_ieee (input_line_pointer, type, words);
1019 if (t)
1020 input_line_pointer = t;
1021 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1022
1023 if (target_big_endian)
1024 {
1025 for (i = 0; i < prec; i++)
1026 {
1027 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1028 litP += sizeof (LITTLENUM_TYPE);
1029 }
1030 }
1031 else
1032 {
1033 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1034 for (i = prec - 1; i >= 0; i--)
1035 {
1036 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1037 litP += sizeof (LITTLENUM_TYPE);
1038 }
1039 else
1040 /* For a 4 byte float the order of elements in `words' is 1 0.
1041 For an 8 byte float the order is 1 0 3 2. */
1042 for (i = 0; i < prec; i += 2)
1043 {
1044 md_number_to_chars (litP, (valueT) words[i + 1],
1045 sizeof (LITTLENUM_TYPE));
1046 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1047 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1048 litP += 2 * sizeof (LITTLENUM_TYPE);
1049 }
1050 }
1051
1052 return NULL;
1053 }
1054
1055 /* We handle all bad expressions here, so that we can report the faulty
1056 instruction in the error message. */
1057 void
1058 md_operand (expressionS * exp)
1059 {
1060 if (in_my_get_expression)
1061 exp->X_op = O_illegal;
1062 }
1063
1064 /* Immediate values. */
1065
1066 /* Generic immediate-value read function for use in directives.
1067 Accepts anything that 'expression' can fold to a constant.
1068 *val receives the number. */
1069 #ifdef OBJ_ELF
1070 static int
1071 immediate_for_directive (int *val)
1072 {
1073 expressionS exp;
1074 exp.X_op = O_illegal;
1075
1076 if (is_immediate_prefix (*input_line_pointer))
1077 {
1078 input_line_pointer++;
1079 expression (&exp);
1080 }
1081
1082 if (exp.X_op != O_constant)
1083 {
1084 as_bad (_("expected #constant"));
1085 ignore_rest_of_line ();
1086 return FAIL;
1087 }
1088 *val = exp.X_add_number;
1089 return SUCCESS;
1090 }
1091 #endif
1092
1093 /* Register parsing. */
1094
1095 /* Generic register parser. CCP points to what should be the
1096 beginning of a register name. If it is indeed a valid register
1097 name, advance CCP over it and return the reg_entry structure;
1098 otherwise return NULL. Does not issue diagnostics. */
1099
1100 static struct reg_entry *
1101 arm_reg_parse_multi (char **ccp)
1102 {
1103 char *start = *ccp;
1104 char *p;
1105 struct reg_entry *reg;
1106
1107 #ifdef REGISTER_PREFIX
1108 if (*start != REGISTER_PREFIX)
1109 return NULL;
1110 start++;
1111 #endif
1112 #ifdef OPTIONAL_REGISTER_PREFIX
1113 if (*start == OPTIONAL_REGISTER_PREFIX)
1114 start++;
1115 #endif
1116
1117 p = start;
1118 if (!ISALPHA (*p) || !is_name_beginner (*p))
1119 return NULL;
1120
1121 do
1122 p++;
1123 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1124
1125 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1126
1127 if (!reg)
1128 return NULL;
1129
1130 *ccp = p;
1131 return reg;
1132 }
1133
1134 static int
1135 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1136 enum arm_reg_type type)
1137 {
1138 /* Alternative syntaxes are accepted for a few register classes. */
1139 switch (type)
1140 {
1141 case REG_TYPE_MVF:
1142 case REG_TYPE_MVD:
1143 case REG_TYPE_MVFX:
1144 case REG_TYPE_MVDX:
1145 /* Generic coprocessor register names are allowed for these. */
1146 if (reg && reg->type == REG_TYPE_CN)
1147 return reg->number;
1148 break;
1149
1150 case REG_TYPE_CP:
1151 /* For backward compatibility, a bare number is valid here. */
1152 {
1153 unsigned long processor = strtoul (start, ccp, 10);
1154 if (*ccp != start && processor <= 15)
1155 return processor;
1156 }
1157
1158 case REG_TYPE_MMXWC:
1159 /* WC includes WCG. ??? I'm not sure this is true for all
1160 instructions that take WC registers. */
1161 if (reg && reg->type == REG_TYPE_MMXWCG)
1162 return reg->number;
1163 break;
1164
1165 default:
1166 break;
1167 }
1168
1169 return FAIL;
1170 }
1171
1172 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1173 return value is the register number or FAIL. */
1174
1175 static int
1176 arm_reg_parse (char **ccp, enum arm_reg_type type)
1177 {
1178 char *start = *ccp;
1179 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1180 int ret;
1181
1182 /* Do not allow a scalar (reg+index) to parse as a register. */
1183 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1184 return FAIL;
1185
1186 if (reg && reg->type == type)
1187 return reg->number;
1188
1189 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1190 return ret;
1191
1192 *ccp = start;
1193 return FAIL;
1194 }
1195
1196 /* Parse a Neon type specifier. *STR should point at the leading '.'
1197 character. Does no verification at this stage that the type fits the opcode
1198 properly. E.g.,
1199
1200 .i32.i32.s16
1201 .s32.f32
1202 .u16
1203
1204 Can all be legally parsed by this function.
1205
1206 Fills in neon_type struct pointer with parsed information, and updates STR
1207 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1208 type, FAIL if not. */
1209
1210 static int
1211 parse_neon_type (struct neon_type *type, char **str)
1212 {
1213 char *ptr = *str;
1214
1215 if (type)
1216 type->elems = 0;
1217
1218 while (type->elems < NEON_MAX_TYPE_ELS)
1219 {
1220 enum neon_el_type thistype = NT_untyped;
1221 unsigned thissize = -1u;
1222
1223 if (*ptr != '.')
1224 break;
1225
1226 ptr++;
1227
1228 /* Just a size without an explicit type. */
1229 if (ISDIGIT (*ptr))
1230 goto parsesize;
1231
1232 switch (TOLOWER (*ptr))
1233 {
1234 case 'i': thistype = NT_integer; break;
1235 case 'f': thistype = NT_float; break;
1236 case 'p': thistype = NT_poly; break;
1237 case 's': thistype = NT_signed; break;
1238 case 'u': thistype = NT_unsigned; break;
1239 case 'd':
1240 thistype = NT_float;
1241 thissize = 64;
1242 ptr++;
1243 goto done;
1244 default:
1245 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1246 return FAIL;
1247 }
1248
1249 ptr++;
1250
1251 /* .f is an abbreviation for .f32. */
1252 if (thistype == NT_float && !ISDIGIT (*ptr))
1253 thissize = 32;
1254 else
1255 {
1256 parsesize:
1257 thissize = strtoul (ptr, &ptr, 10);
1258
1259 if (thissize != 8 && thissize != 16 && thissize != 32
1260 && thissize != 64)
1261 {
1262 as_bad (_("bad size %d in type specifier"), thissize);
1263 return FAIL;
1264 }
1265 }
1266
1267 done:
1268 if (type)
1269 {
1270 type->el[type->elems].type = thistype;
1271 type->el[type->elems].size = thissize;
1272 type->elems++;
1273 }
1274 }
1275
1276 /* Empty/missing type is not a successful parse. */
1277 if (type->elems == 0)
1278 return FAIL;
1279
1280 *str = ptr;
1281
1282 return SUCCESS;
1283 }
1284
1285 /* Errors may be set multiple times during parsing or bit encoding
1286 (particularly in the Neon bits), but usually the earliest error which is set
1287 will be the most meaningful. Avoid overwriting it with later (cascading)
1288 errors by calling this function. */
1289
1290 static void
1291 first_error (const char *err)
1292 {
1293 if (!inst.error)
1294 inst.error = err;
1295 }
1296
1297 /* Parse a single type, e.g. ".s32", leading period included. */
1298 static int
1299 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1300 {
1301 char *str = *ccp;
1302 struct neon_type optype;
1303
1304 if (*str == '.')
1305 {
1306 if (parse_neon_type (&optype, &str) == SUCCESS)
1307 {
1308 if (optype.elems == 1)
1309 *vectype = optype.el[0];
1310 else
1311 {
1312 first_error (_("only one type should be specified for operand"));
1313 return FAIL;
1314 }
1315 }
1316 else
1317 {
1318 first_error (_("vector type expected"));
1319 return FAIL;
1320 }
1321 }
1322 else
1323 return FAIL;
1324
1325 *ccp = str;
1326
1327 return SUCCESS;
1328 }
1329
1330 /* Special meanings for indices (which have a range of 0-7), which will fit into
1331 a 4-bit integer. */
1332
1333 #define NEON_ALL_LANES 15
1334 #define NEON_INTERLEAVE_LANES 14
1335
1336 /* Parse either a register or a scalar, with an optional type. Return the
1337 register number, and optionally fill in the actual type of the register
1338 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1339 type/index information in *TYPEINFO. */
1340
1341 static int
1342 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1343 enum arm_reg_type *rtype,
1344 struct neon_typed_alias *typeinfo)
1345 {
1346 char *str = *ccp;
1347 struct reg_entry *reg = arm_reg_parse_multi (&str);
1348 struct neon_typed_alias atype;
1349 struct neon_type_el parsetype;
1350
1351 atype.defined = 0;
1352 atype.index = -1;
1353 atype.eltype.type = NT_invtype;
1354 atype.eltype.size = -1;
1355
1356 /* Try alternate syntax for some types of register. Note these are mutually
1357 exclusive with the Neon syntax extensions. */
1358 if (reg == NULL)
1359 {
1360 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1361 if (altreg != FAIL)
1362 *ccp = str;
1363 if (typeinfo)
1364 *typeinfo = atype;
1365 return altreg;
1366 }
1367
1368 /* Undo polymorphism when a set of register types may be accepted. */
1369 if ((type == REG_TYPE_NDQ
1370 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1371 || (type == REG_TYPE_VFSD
1372 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1373 || (type == REG_TYPE_NSDQ
1374 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1375 || reg->type == REG_TYPE_NQ))
1376 || (type == REG_TYPE_MMXWC
1377 && (reg->type == REG_TYPE_MMXWCG)))
1378 type = (enum arm_reg_type) reg->type;
1379
1380 if (type != reg->type)
1381 return FAIL;
1382
1383 if (reg->neon)
1384 atype = *reg->neon;
1385
1386 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1387 {
1388 if ((atype.defined & NTA_HASTYPE) != 0)
1389 {
1390 first_error (_("can't redefine type for operand"));
1391 return FAIL;
1392 }
1393 atype.defined |= NTA_HASTYPE;
1394 atype.eltype = parsetype;
1395 }
1396
1397 if (skip_past_char (&str, '[') == SUCCESS)
1398 {
1399 if (type != REG_TYPE_VFD)
1400 {
1401 first_error (_("only D registers may be indexed"));
1402 return FAIL;
1403 }
1404
1405 if ((atype.defined & NTA_HASINDEX) != 0)
1406 {
1407 first_error (_("can't change index for operand"));
1408 return FAIL;
1409 }
1410
1411 atype.defined |= NTA_HASINDEX;
1412
1413 if (skip_past_char (&str, ']') == SUCCESS)
1414 atype.index = NEON_ALL_LANES;
1415 else
1416 {
1417 expressionS exp;
1418
1419 my_get_expression (&exp, &str, GE_NO_PREFIX);
1420
1421 if (exp.X_op != O_constant)
1422 {
1423 first_error (_("constant expression required"));
1424 return FAIL;
1425 }
1426
1427 if (skip_past_char (&str, ']') == FAIL)
1428 return FAIL;
1429
1430 atype.index = exp.X_add_number;
1431 }
1432 }
1433
1434 if (typeinfo)
1435 *typeinfo = atype;
1436
1437 if (rtype)
1438 *rtype = type;
1439
1440 *ccp = str;
1441
1442 return reg->number;
1443 }
1444
1445 /* Like arm_reg_parse, but allow allow the following extra features:
1446 - If RTYPE is non-zero, return the (possibly restricted) type of the
1447 register (e.g. Neon double or quad reg when either has been requested).
1448 - If this is a Neon vector type with additional type information, fill
1449 in the struct pointed to by VECTYPE (if non-NULL).
1450 This function will fault on encountering a scalar. */
1451
1452 static int
1453 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1454 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1455 {
1456 struct neon_typed_alias atype;
1457 char *str = *ccp;
1458 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1459
1460 if (reg == FAIL)
1461 return FAIL;
1462
1463 /* Do not allow a scalar (reg+index) to parse as a register. */
1464 if ((atype.defined & NTA_HASINDEX) != 0)
1465 {
1466 first_error (_("register operand expected, but got scalar"));
1467 return FAIL;
1468 }
1469
1470 if (vectype)
1471 *vectype = atype.eltype;
1472
1473 *ccp = str;
1474
1475 return reg;
1476 }
1477
1478 #define NEON_SCALAR_REG(X) ((X) >> 4)
1479 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1480
1481 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1482 have enough information to be able to do a good job bounds-checking. So, we
1483 just do easy checks here, and do further checks later. */
1484
1485 static int
1486 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1487 {
1488 int reg;
1489 char *str = *ccp;
1490 struct neon_typed_alias atype;
1491
1492 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1493
1494 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1495 return FAIL;
1496
1497 if (atype.index == NEON_ALL_LANES)
1498 {
1499 first_error (_("scalar must have an index"));
1500 return FAIL;
1501 }
1502 else if (atype.index >= 64 / elsize)
1503 {
1504 first_error (_("scalar index out of range"));
1505 return FAIL;
1506 }
1507
1508 if (type)
1509 *type = atype.eltype;
1510
1511 *ccp = str;
1512
1513 return reg * 16 + atype.index;
1514 }
1515
1516 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1517
1518 static long
1519 parse_reg_list (char ** strp)
1520 {
1521 char * str = * strp;
1522 long range = 0;
1523 int another_range;
1524
1525 /* We come back here if we get ranges concatenated by '+' or '|'. */
1526 do
1527 {
1528 another_range = 0;
1529
1530 if (*str == '{')
1531 {
1532 int in_range = 0;
1533 int cur_reg = -1;
1534
1535 str++;
1536 do
1537 {
1538 int reg;
1539
1540 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1541 {
1542 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1543 return FAIL;
1544 }
1545
1546 if (in_range)
1547 {
1548 int i;
1549
1550 if (reg <= cur_reg)
1551 {
1552 first_error (_("bad range in register list"));
1553 return FAIL;
1554 }
1555
1556 for (i = cur_reg + 1; i < reg; i++)
1557 {
1558 if (range & (1 << i))
1559 as_tsktsk
1560 (_("Warning: duplicated register (r%d) in register list"),
1561 i);
1562 else
1563 range |= 1 << i;
1564 }
1565 in_range = 0;
1566 }
1567
1568 if (range & (1 << reg))
1569 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1570 reg);
1571 else if (reg <= cur_reg)
1572 as_tsktsk (_("Warning: register range not in ascending order"));
1573
1574 range |= 1 << reg;
1575 cur_reg = reg;
1576 }
1577 while (skip_past_comma (&str) != FAIL
1578 || (in_range = 1, *str++ == '-'));
1579 str--;
1580
1581 if (*str++ != '}')
1582 {
1583 first_error (_("missing `}'"));
1584 return FAIL;
1585 }
1586 }
1587 else
1588 {
1589 expressionS exp;
1590
1591 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1592 return FAIL;
1593
1594 if (exp.X_op == O_constant)
1595 {
1596 if (exp.X_add_number
1597 != (exp.X_add_number & 0x0000ffff))
1598 {
1599 inst.error = _("invalid register mask");
1600 return FAIL;
1601 }
1602
1603 if ((range & exp.X_add_number) != 0)
1604 {
1605 int regno = range & exp.X_add_number;
1606
1607 regno &= -regno;
1608 regno = (1 << regno) - 1;
1609 as_tsktsk
1610 (_("Warning: duplicated register (r%d) in register list"),
1611 regno);
1612 }
1613
1614 range |= exp.X_add_number;
1615 }
1616 else
1617 {
1618 if (inst.reloc.type != 0)
1619 {
1620 inst.error = _("expression too complex");
1621 return FAIL;
1622 }
1623
1624 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1625 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1626 inst.reloc.pc_rel = 0;
1627 }
1628 }
1629
1630 if (*str == '|' || *str == '+')
1631 {
1632 str++;
1633 another_range = 1;
1634 }
1635 }
1636 while (another_range);
1637
1638 *strp = str;
1639 return range;
1640 }
1641
1642 /* Types of registers in a list. */
1643
1644 enum reg_list_els
1645 {
1646 REGLIST_VFP_S,
1647 REGLIST_VFP_D,
1648 REGLIST_NEON_D
1649 };
1650
1651 /* Parse a VFP register list. If the string is invalid return FAIL.
1652 Otherwise return the number of registers, and set PBASE to the first
1653 register. Parses registers of type ETYPE.
1654 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1655 - Q registers can be used to specify pairs of D registers
1656 - { } can be omitted from around a singleton register list
1657 FIXME: This is not implemented, as it would require backtracking in
1658 some cases, e.g.:
1659 vtbl.8 d3,d4,d5
1660 This could be done (the meaning isn't really ambiguous), but doesn't
1661 fit in well with the current parsing framework.
1662 - 32 D registers may be used (also true for VFPv3).
1663 FIXME: Types are ignored in these register lists, which is probably a
1664 bug. */
1665
1666 static int
1667 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1668 {
1669 char *str = *ccp;
1670 int base_reg;
1671 int new_base;
1672 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1673 int max_regs = 0;
1674 int count = 0;
1675 int warned = 0;
1676 unsigned long mask = 0;
1677 int i;
1678
1679 if (*str != '{')
1680 {
1681 inst.error = _("expecting {");
1682 return FAIL;
1683 }
1684
1685 str++;
1686
1687 switch (etype)
1688 {
1689 case REGLIST_VFP_S:
1690 regtype = REG_TYPE_VFS;
1691 max_regs = 32;
1692 break;
1693
1694 case REGLIST_VFP_D:
1695 regtype = REG_TYPE_VFD;
1696 break;
1697
1698 case REGLIST_NEON_D:
1699 regtype = REG_TYPE_NDQ;
1700 break;
1701 }
1702
1703 if (etype != REGLIST_VFP_S)
1704 {
1705 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1706 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1707 {
1708 max_regs = 32;
1709 if (thumb_mode)
1710 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1711 fpu_vfp_ext_d32);
1712 else
1713 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1714 fpu_vfp_ext_d32);
1715 }
1716 else
1717 max_regs = 16;
1718 }
1719
1720 base_reg = max_regs;
1721
1722 do
1723 {
1724 int setmask = 1, addregs = 1;
1725
1726 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1727
1728 if (new_base == FAIL)
1729 {
1730 first_error (_(reg_expected_msgs[regtype]));
1731 return FAIL;
1732 }
1733
1734 if (new_base >= max_regs)
1735 {
1736 first_error (_("register out of range in list"));
1737 return FAIL;
1738 }
1739
1740 /* Note: a value of 2 * n is returned for the register Q<n>. */
1741 if (regtype == REG_TYPE_NQ)
1742 {
1743 setmask = 3;
1744 addregs = 2;
1745 }
1746
1747 if (new_base < base_reg)
1748 base_reg = new_base;
1749
1750 if (mask & (setmask << new_base))
1751 {
1752 first_error (_("invalid register list"));
1753 return FAIL;
1754 }
1755
1756 if ((mask >> new_base) != 0 && ! warned)
1757 {
1758 as_tsktsk (_("register list not in ascending order"));
1759 warned = 1;
1760 }
1761
1762 mask |= setmask << new_base;
1763 count += addregs;
1764
1765 if (*str == '-') /* We have the start of a range expression */
1766 {
1767 int high_range;
1768
1769 str++;
1770
1771 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1772 == FAIL)
1773 {
1774 inst.error = gettext (reg_expected_msgs[regtype]);
1775 return FAIL;
1776 }
1777
1778 if (high_range >= max_regs)
1779 {
1780 first_error (_("register out of range in list"));
1781 return FAIL;
1782 }
1783
1784 if (regtype == REG_TYPE_NQ)
1785 high_range = high_range + 1;
1786
1787 if (high_range <= new_base)
1788 {
1789 inst.error = _("register range not in ascending order");
1790 return FAIL;
1791 }
1792
1793 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1794 {
1795 if (mask & (setmask << new_base))
1796 {
1797 inst.error = _("invalid register list");
1798 return FAIL;
1799 }
1800
1801 mask |= setmask << new_base;
1802 count += addregs;
1803 }
1804 }
1805 }
1806 while (skip_past_comma (&str) != FAIL);
1807
1808 str++;
1809
1810 /* Sanity check -- should have raised a parse error above. */
1811 if (count == 0 || count > max_regs)
1812 abort ();
1813
1814 *pbase = base_reg;
1815
1816 /* Final test -- the registers must be consecutive. */
1817 mask >>= base_reg;
1818 for (i = 0; i < count; i++)
1819 {
1820 if ((mask & (1u << i)) == 0)
1821 {
1822 inst.error = _("non-contiguous register range");
1823 return FAIL;
1824 }
1825 }
1826
1827 *ccp = str;
1828
1829 return count;
1830 }
1831
1832 /* True if two alias types are the same. */
1833
1834 static bfd_boolean
1835 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1836 {
1837 if (!a && !b)
1838 return TRUE;
1839
1840 if (!a || !b)
1841 return FALSE;
1842
1843 if (a->defined != b->defined)
1844 return FALSE;
1845
1846 if ((a->defined & NTA_HASTYPE) != 0
1847 && (a->eltype.type != b->eltype.type
1848 || a->eltype.size != b->eltype.size))
1849 return FALSE;
1850
1851 if ((a->defined & NTA_HASINDEX) != 0
1852 && (a->index != b->index))
1853 return FALSE;
1854
1855 return TRUE;
1856 }
1857
1858 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1859 The base register is put in *PBASE.
1860 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1861 the return value.
1862 The register stride (minus one) is put in bit 4 of the return value.
1863 Bits [6:5] encode the list length (minus one).
1864 The type of the list elements is put in *ELTYPE, if non-NULL. */
1865
1866 #define NEON_LANE(X) ((X) & 0xf)
1867 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1868 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1869
1870 static int
1871 parse_neon_el_struct_list (char **str, unsigned *pbase,
1872 struct neon_type_el *eltype)
1873 {
1874 char *ptr = *str;
1875 int base_reg = -1;
1876 int reg_incr = -1;
1877 int count = 0;
1878 int lane = -1;
1879 int leading_brace = 0;
1880 enum arm_reg_type rtype = REG_TYPE_NDQ;
1881 int addregs = 1;
1882 const char *const incr_error = _("register stride must be 1 or 2");
1883 const char *const type_error = _("mismatched element/structure types in list");
1884 struct neon_typed_alias firsttype;
1885
1886 if (skip_past_char (&ptr, '{') == SUCCESS)
1887 leading_brace = 1;
1888
1889 do
1890 {
1891 struct neon_typed_alias atype;
1892 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1893
1894 if (getreg == FAIL)
1895 {
1896 first_error (_(reg_expected_msgs[rtype]));
1897 return FAIL;
1898 }
1899
1900 if (base_reg == -1)
1901 {
1902 base_reg = getreg;
1903 if (rtype == REG_TYPE_NQ)
1904 {
1905 reg_incr = 1;
1906 addregs = 2;
1907 }
1908 firsttype = atype;
1909 }
1910 else if (reg_incr == -1)
1911 {
1912 reg_incr = getreg - base_reg;
1913 if (reg_incr < 1 || reg_incr > 2)
1914 {
1915 first_error (_(incr_error));
1916 return FAIL;
1917 }
1918 }
1919 else if (getreg != base_reg + reg_incr * count)
1920 {
1921 first_error (_(incr_error));
1922 return FAIL;
1923 }
1924
1925 if (! neon_alias_types_same (&atype, &firsttype))
1926 {
1927 first_error (_(type_error));
1928 return FAIL;
1929 }
1930
1931 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1932 modes. */
1933 if (ptr[0] == '-')
1934 {
1935 struct neon_typed_alias htype;
1936 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1937 if (lane == -1)
1938 lane = NEON_INTERLEAVE_LANES;
1939 else if (lane != NEON_INTERLEAVE_LANES)
1940 {
1941 first_error (_(type_error));
1942 return FAIL;
1943 }
1944 if (reg_incr == -1)
1945 reg_incr = 1;
1946 else if (reg_incr != 1)
1947 {
1948 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1949 return FAIL;
1950 }
1951 ptr++;
1952 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1953 if (hireg == FAIL)
1954 {
1955 first_error (_(reg_expected_msgs[rtype]));
1956 return FAIL;
1957 }
1958 if (! neon_alias_types_same (&htype, &firsttype))
1959 {
1960 first_error (_(type_error));
1961 return FAIL;
1962 }
1963 count += hireg + dregs - getreg;
1964 continue;
1965 }
1966
1967 /* If we're using Q registers, we can't use [] or [n] syntax. */
1968 if (rtype == REG_TYPE_NQ)
1969 {
1970 count += 2;
1971 continue;
1972 }
1973
1974 if ((atype.defined & NTA_HASINDEX) != 0)
1975 {
1976 if (lane == -1)
1977 lane = atype.index;
1978 else if (lane != atype.index)
1979 {
1980 first_error (_(type_error));
1981 return FAIL;
1982 }
1983 }
1984 else if (lane == -1)
1985 lane = NEON_INTERLEAVE_LANES;
1986 else if (lane != NEON_INTERLEAVE_LANES)
1987 {
1988 first_error (_(type_error));
1989 return FAIL;
1990 }
1991 count++;
1992 }
1993 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1994
1995 /* No lane set by [x]. We must be interleaving structures. */
1996 if (lane == -1)
1997 lane = NEON_INTERLEAVE_LANES;
1998
1999 /* Sanity check. */
2000 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2001 || (count > 1 && reg_incr == -1))
2002 {
2003 first_error (_("error parsing element/structure list"));
2004 return FAIL;
2005 }
2006
2007 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2008 {
2009 first_error (_("expected }"));
2010 return FAIL;
2011 }
2012
2013 if (reg_incr == -1)
2014 reg_incr = 1;
2015
2016 if (eltype)
2017 *eltype = firsttype.eltype;
2018
2019 *pbase = base_reg;
2020 *str = ptr;
2021
2022 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2023 }
2024
2025 /* Parse an explicit relocation suffix on an expression. This is
2026 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2027 arm_reloc_hsh contains no entries, so this function can only
2028 succeed if there is no () after the word. Returns -1 on error,
2029 BFD_RELOC_UNUSED if there wasn't any suffix. */
2030 static int
2031 parse_reloc (char **str)
2032 {
2033 struct reloc_entry *r;
2034 char *p, *q;
2035
2036 if (**str != '(')
2037 return BFD_RELOC_UNUSED;
2038
2039 p = *str + 1;
2040 q = p;
2041
2042 while (*q && *q != ')' && *q != ',')
2043 q++;
2044 if (*q != ')')
2045 return -1;
2046
2047 if ((r = (struct reloc_entry *)
2048 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2049 return -1;
2050
2051 *str = q + 1;
2052 return r->reloc;
2053 }
2054
2055 /* Directives: register aliases. */
2056
2057 static struct reg_entry *
2058 insert_reg_alias (char *str, int number, int type)
2059 {
2060 struct reg_entry *new_reg;
2061 const char *name;
2062
2063 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2064 {
2065 if (new_reg->builtin)
2066 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2067
2068 /* Only warn about a redefinition if it's not defined as the
2069 same register. */
2070 else if (new_reg->number != number || new_reg->type != type)
2071 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2072
2073 return NULL;
2074 }
2075
2076 name = xstrdup (str);
2077 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2078
2079 new_reg->name = name;
2080 new_reg->number = number;
2081 new_reg->type = type;
2082 new_reg->builtin = FALSE;
2083 new_reg->neon = NULL;
2084
2085 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2086 abort ();
2087
2088 return new_reg;
2089 }
2090
2091 static void
2092 insert_neon_reg_alias (char *str, int number, int type,
2093 struct neon_typed_alias *atype)
2094 {
2095 struct reg_entry *reg = insert_reg_alias (str, number, type);
2096
2097 if (!reg)
2098 {
2099 first_error (_("attempt to redefine typed alias"));
2100 return;
2101 }
2102
2103 if (atype)
2104 {
2105 reg->neon = (struct neon_typed_alias *)
2106 xmalloc (sizeof (struct neon_typed_alias));
2107 *reg->neon = *atype;
2108 }
2109 }
2110
2111 /* Look for the .req directive. This is of the form:
2112
2113 new_register_name .req existing_register_name
2114
2115 If we find one, or if it looks sufficiently like one that we want to
2116 handle any error here, return TRUE. Otherwise return FALSE. */
2117
2118 static bfd_boolean
2119 create_register_alias (char * newname, char *p)
2120 {
2121 struct reg_entry *old;
2122 char *oldname, *nbuf;
2123 size_t nlen;
2124
2125 /* The input scrubber ensures that whitespace after the mnemonic is
2126 collapsed to single spaces. */
2127 oldname = p;
2128 if (strncmp (oldname, " .req ", 6) != 0)
2129 return FALSE;
2130
2131 oldname += 6;
2132 if (*oldname == '\0')
2133 return FALSE;
2134
2135 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2136 if (!old)
2137 {
2138 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2139 return TRUE;
2140 }
2141
2142 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2143 the desired alias name, and p points to its end. If not, then
2144 the desired alias name is in the global original_case_string. */
2145 #ifdef TC_CASE_SENSITIVE
2146 nlen = p - newname;
2147 #else
2148 newname = original_case_string;
2149 nlen = strlen (newname);
2150 #endif
2151
2152 nbuf = (char *) alloca (nlen + 1);
2153 memcpy (nbuf, newname, nlen);
2154 nbuf[nlen] = '\0';
2155
2156 /* Create aliases under the new name as stated; an all-lowercase
2157 version of the new name; and an all-uppercase version of the new
2158 name. */
2159 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2160 {
2161 for (p = nbuf; *p; p++)
2162 *p = TOUPPER (*p);
2163
2164 if (strncmp (nbuf, newname, nlen))
2165 {
2166 /* If this attempt to create an additional alias fails, do not bother
2167 trying to create the all-lower case alias. We will fail and issue
2168 a second, duplicate error message. This situation arises when the
2169 programmer does something like:
2170 foo .req r0
2171 Foo .req r1
2172 The second .req creates the "Foo" alias but then fails to create
2173 the artificial FOO alias because it has already been created by the
2174 first .req. */
2175 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2176 return TRUE;
2177 }
2178
2179 for (p = nbuf; *p; p++)
2180 *p = TOLOWER (*p);
2181
2182 if (strncmp (nbuf, newname, nlen))
2183 insert_reg_alias (nbuf, old->number, old->type);
2184 }
2185
2186 return TRUE;
2187 }
2188
2189 /* Create a Neon typed/indexed register alias using directives, e.g.:
2190 X .dn d5.s32[1]
2191 Y .qn 6.s16
2192 Z .dn d7
2193 T .dn Z[0]
2194 These typed registers can be used instead of the types specified after the
2195 Neon mnemonic, so long as all operands given have types. Types can also be
2196 specified directly, e.g.:
2197 vadd d0.s32, d1.s32, d2.s32 */
2198
2199 static bfd_boolean
2200 create_neon_reg_alias (char *newname, char *p)
2201 {
2202 enum arm_reg_type basetype;
2203 struct reg_entry *basereg;
2204 struct reg_entry mybasereg;
2205 struct neon_type ntype;
2206 struct neon_typed_alias typeinfo;
2207 char *namebuf, *nameend;
2208 int namelen;
2209
2210 typeinfo.defined = 0;
2211 typeinfo.eltype.type = NT_invtype;
2212 typeinfo.eltype.size = -1;
2213 typeinfo.index = -1;
2214
2215 nameend = p;
2216
2217 if (strncmp (p, " .dn ", 5) == 0)
2218 basetype = REG_TYPE_VFD;
2219 else if (strncmp (p, " .qn ", 5) == 0)
2220 basetype = REG_TYPE_NQ;
2221 else
2222 return FALSE;
2223
2224 p += 5;
2225
2226 if (*p == '\0')
2227 return FALSE;
2228
2229 basereg = arm_reg_parse_multi (&p);
2230
2231 if (basereg && basereg->type != basetype)
2232 {
2233 as_bad (_("bad type for register"));
2234 return FALSE;
2235 }
2236
2237 if (basereg == NULL)
2238 {
2239 expressionS exp;
2240 /* Try parsing as an integer. */
2241 my_get_expression (&exp, &p, GE_NO_PREFIX);
2242 if (exp.X_op != O_constant)
2243 {
2244 as_bad (_("expression must be constant"));
2245 return FALSE;
2246 }
2247 basereg = &mybasereg;
2248 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2249 : exp.X_add_number;
2250 basereg->neon = 0;
2251 }
2252
2253 if (basereg->neon)
2254 typeinfo = *basereg->neon;
2255
2256 if (parse_neon_type (&ntype, &p) == SUCCESS)
2257 {
2258 /* We got a type. */
2259 if (typeinfo.defined & NTA_HASTYPE)
2260 {
2261 as_bad (_("can't redefine the type of a register alias"));
2262 return FALSE;
2263 }
2264
2265 typeinfo.defined |= NTA_HASTYPE;
2266 if (ntype.elems != 1)
2267 {
2268 as_bad (_("you must specify a single type only"));
2269 return FALSE;
2270 }
2271 typeinfo.eltype = ntype.el[0];
2272 }
2273
2274 if (skip_past_char (&p, '[') == SUCCESS)
2275 {
2276 expressionS exp;
2277 /* We got a scalar index. */
2278
2279 if (typeinfo.defined & NTA_HASINDEX)
2280 {
2281 as_bad (_("can't redefine the index of a scalar alias"));
2282 return FALSE;
2283 }
2284
2285 my_get_expression (&exp, &p, GE_NO_PREFIX);
2286
2287 if (exp.X_op != O_constant)
2288 {
2289 as_bad (_("scalar index must be constant"));
2290 return FALSE;
2291 }
2292
2293 typeinfo.defined |= NTA_HASINDEX;
2294 typeinfo.index = exp.X_add_number;
2295
2296 if (skip_past_char (&p, ']') == FAIL)
2297 {
2298 as_bad (_("expecting ]"));
2299 return FALSE;
2300 }
2301 }
2302
2303 namelen = nameend - newname;
2304 namebuf = (char *) alloca (namelen + 1);
2305 strncpy (namebuf, newname, namelen);
2306 namebuf[namelen] = '\0';
2307
2308 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2309 typeinfo.defined != 0 ? &typeinfo : NULL);
2310
2311 /* Insert name in all uppercase. */
2312 for (p = namebuf; *p; p++)
2313 *p = TOUPPER (*p);
2314
2315 if (strncmp (namebuf, newname, namelen))
2316 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2317 typeinfo.defined != 0 ? &typeinfo : NULL);
2318
2319 /* Insert name in all lowercase. */
2320 for (p = namebuf; *p; p++)
2321 *p = TOLOWER (*p);
2322
2323 if (strncmp (namebuf, newname, namelen))
2324 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2325 typeinfo.defined != 0 ? &typeinfo : NULL);
2326
2327 return TRUE;
2328 }
2329
2330 /* Should never be called, as .req goes between the alias and the
2331 register name, not at the beginning of the line. */
2332
2333 static void
2334 s_req (int a ATTRIBUTE_UNUSED)
2335 {
2336 as_bad (_("invalid syntax for .req directive"));
2337 }
2338
2339 static void
2340 s_dn (int a ATTRIBUTE_UNUSED)
2341 {
2342 as_bad (_("invalid syntax for .dn directive"));
2343 }
2344
2345 static void
2346 s_qn (int a ATTRIBUTE_UNUSED)
2347 {
2348 as_bad (_("invalid syntax for .qn directive"));
2349 }
2350
2351 /* The .unreq directive deletes an alias which was previously defined
2352 by .req. For example:
2353
2354 my_alias .req r11
2355 .unreq my_alias */
2356
2357 static void
2358 s_unreq (int a ATTRIBUTE_UNUSED)
2359 {
2360 char * name;
2361 char saved_char;
2362
2363 name = input_line_pointer;
2364
2365 while (*input_line_pointer != 0
2366 && *input_line_pointer != ' '
2367 && *input_line_pointer != '\n')
2368 ++input_line_pointer;
2369
2370 saved_char = *input_line_pointer;
2371 *input_line_pointer = 0;
2372
2373 if (!*name)
2374 as_bad (_("invalid syntax for .unreq directive"));
2375 else
2376 {
2377 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2378 name);
2379
2380 if (!reg)
2381 as_bad (_("unknown register alias '%s'"), name);
2382 else if (reg->builtin)
2383 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2384 name);
2385 else
2386 {
2387 char * p;
2388 char * nbuf;
2389
2390 hash_delete (arm_reg_hsh, name, FALSE);
2391 free ((char *) reg->name);
2392 if (reg->neon)
2393 free (reg->neon);
2394 free (reg);
2395
2396 /* Also locate the all upper case and all lower case versions.
2397 Do not complain if we cannot find one or the other as it
2398 was probably deleted above. */
2399
2400 nbuf = strdup (name);
2401 for (p = nbuf; *p; p++)
2402 *p = TOUPPER (*p);
2403 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2404 if (reg)
2405 {
2406 hash_delete (arm_reg_hsh, nbuf, FALSE);
2407 free ((char *) reg->name);
2408 if (reg->neon)
2409 free (reg->neon);
2410 free (reg);
2411 }
2412
2413 for (p = nbuf; *p; p++)
2414 *p = TOLOWER (*p);
2415 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2416 if (reg)
2417 {
2418 hash_delete (arm_reg_hsh, nbuf, FALSE);
2419 free ((char *) reg->name);
2420 if (reg->neon)
2421 free (reg->neon);
2422 free (reg);
2423 }
2424
2425 free (nbuf);
2426 }
2427 }
2428
2429 *input_line_pointer = saved_char;
2430 demand_empty_rest_of_line ();
2431 }
2432
2433 /* Directives: Instruction set selection. */
2434
2435 #ifdef OBJ_ELF
2436 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2437 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2438 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2439 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2440
2441 /* Create a new mapping symbol for the transition to STATE. */
2442
2443 static void
2444 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2445 {
2446 symbolS * symbolP;
2447 const char * symname;
2448 int type;
2449
2450 switch (state)
2451 {
2452 case MAP_DATA:
2453 symname = "$d";
2454 type = BSF_NO_FLAGS;
2455 break;
2456 case MAP_ARM:
2457 symname = "$a";
2458 type = BSF_NO_FLAGS;
2459 break;
2460 case MAP_THUMB:
2461 symname = "$t";
2462 type = BSF_NO_FLAGS;
2463 break;
2464 default:
2465 abort ();
2466 }
2467
2468 symbolP = symbol_new (symname, now_seg, value, frag);
2469 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2470
2471 switch (state)
2472 {
2473 case MAP_ARM:
2474 THUMB_SET_FUNC (symbolP, 0);
2475 ARM_SET_THUMB (symbolP, 0);
2476 ARM_SET_INTERWORK (symbolP, support_interwork);
2477 break;
2478
2479 case MAP_THUMB:
2480 THUMB_SET_FUNC (symbolP, 1);
2481 ARM_SET_THUMB (symbolP, 1);
2482 ARM_SET_INTERWORK (symbolP, support_interwork);
2483 break;
2484
2485 case MAP_DATA:
2486 default:
2487 break;
2488 }
2489
2490 /* Save the mapping symbols for future reference. Also check that
2491 we do not place two mapping symbols at the same offset within a
2492 frag. We'll handle overlap between frags in
2493 check_mapping_symbols. */
2494 if (value == 0)
2495 {
2496 know (frag->tc_frag_data.first_map == NULL);
2497 frag->tc_frag_data.first_map = symbolP;
2498 }
2499 if (frag->tc_frag_data.last_map != NULL)
2500 know (S_GET_VALUE (frag->tc_frag_data.last_map) < S_GET_VALUE (symbolP));
2501 frag->tc_frag_data.last_map = symbolP;
2502 }
2503
2504 /* We must sometimes convert a region marked as code to data during
2505 code alignment, if an odd number of bytes have to be padded. The
2506 code mapping symbol is pushed to an aligned address. */
2507
2508 static void
2509 insert_data_mapping_symbol (enum mstate state,
2510 valueT value, fragS *frag, offsetT bytes)
2511 {
2512 /* If there was already a mapping symbol, remove it. */
2513 if (frag->tc_frag_data.last_map != NULL
2514 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2515 {
2516 symbolS *symp = frag->tc_frag_data.last_map;
2517
2518 if (value == 0)
2519 {
2520 know (frag->tc_frag_data.first_map == symp);
2521 frag->tc_frag_data.first_map = NULL;
2522 }
2523 frag->tc_frag_data.last_map = NULL;
2524 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2525 }
2526
2527 make_mapping_symbol (MAP_DATA, value, frag);
2528 make_mapping_symbol (state, value + bytes, frag);
2529 }
2530
2531 static void mapping_state_2 (enum mstate state, int max_chars);
2532
2533 /* Set the mapping state to STATE. Only call this when about to
2534 emit some STATE bytes to the file. */
2535
2536 void
2537 mapping_state (enum mstate state)
2538 {
2539 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2540
2541 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2542
2543 if (mapstate == state)
2544 /* The mapping symbol has already been emitted.
2545 There is nothing else to do. */
2546 return;
2547 else if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2548 /* This case will be evaluated later in the next else. */
2549 return;
2550 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2551 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2552 {
2553 /* Only add the symbol if the offset is > 0:
2554 if we're at the first frag, check it's size > 0;
2555 if we're not at the first frag, then for sure
2556 the offset is > 0. */
2557 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2558 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2559
2560 if (add_symbol)
2561 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2562 }
2563
2564 mapping_state_2 (state, 0);
2565 #undef TRANSITION
2566 }
2567
2568 /* Same as mapping_state, but MAX_CHARS bytes have already been
2569 allocated. Put the mapping symbol that far back. */
2570
2571 static void
2572 mapping_state_2 (enum mstate state, int max_chars)
2573 {
2574 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2575
2576 if (!SEG_NORMAL (now_seg))
2577 return;
2578
2579 if (mapstate == state)
2580 /* The mapping symbol has already been emitted.
2581 There is nothing else to do. */
2582 return;
2583
2584 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2585 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2586 }
2587 #else
2588 #define mapping_state(x) ((void)0)
2589 #define mapping_state_2(x, y) ((void)0)
2590 #endif
2591
2592 /* Find the real, Thumb encoded start of a Thumb function. */
2593
2594 #ifdef OBJ_COFF
2595 static symbolS *
2596 find_real_start (symbolS * symbolP)
2597 {
2598 char * real_start;
2599 const char * name = S_GET_NAME (symbolP);
2600 symbolS * new_target;
2601
2602 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2603 #define STUB_NAME ".real_start_of"
2604
2605 if (name == NULL)
2606 abort ();
2607
2608 /* The compiler may generate BL instructions to local labels because
2609 it needs to perform a branch to a far away location. These labels
2610 do not have a corresponding ".real_start_of" label. We check
2611 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2612 the ".real_start_of" convention for nonlocal branches. */
2613 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2614 return symbolP;
2615
2616 real_start = ACONCAT ((STUB_NAME, name, NULL));
2617 new_target = symbol_find (real_start);
2618
2619 if (new_target == NULL)
2620 {
2621 as_warn (_("Failed to find real start of function: %s\n"), name);
2622 new_target = symbolP;
2623 }
2624
2625 return new_target;
2626 }
2627 #endif
2628
2629 static void
2630 opcode_select (int width)
2631 {
2632 switch (width)
2633 {
2634 case 16:
2635 if (! thumb_mode)
2636 {
2637 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2638 as_bad (_("selected processor does not support THUMB opcodes"));
2639
2640 thumb_mode = 1;
2641 /* No need to force the alignment, since we will have been
2642 coming from ARM mode, which is word-aligned. */
2643 record_alignment (now_seg, 1);
2644 }
2645 break;
2646
2647 case 32:
2648 if (thumb_mode)
2649 {
2650 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2651 as_bad (_("selected processor does not support ARM opcodes"));
2652
2653 thumb_mode = 0;
2654
2655 if (!need_pass_2)
2656 frag_align (2, 0, 0);
2657
2658 record_alignment (now_seg, 1);
2659 }
2660 break;
2661
2662 default:
2663 as_bad (_("invalid instruction size selected (%d)"), width);
2664 }
2665 }
2666
2667 static void
2668 s_arm (int ignore ATTRIBUTE_UNUSED)
2669 {
2670 opcode_select (32);
2671 demand_empty_rest_of_line ();
2672 }
2673
2674 static void
2675 s_thumb (int ignore ATTRIBUTE_UNUSED)
2676 {
2677 opcode_select (16);
2678 demand_empty_rest_of_line ();
2679 }
2680
2681 static void
2682 s_code (int unused ATTRIBUTE_UNUSED)
2683 {
2684 int temp;
2685
2686 temp = get_absolute_expression ();
2687 switch (temp)
2688 {
2689 case 16:
2690 case 32:
2691 opcode_select (temp);
2692 break;
2693
2694 default:
2695 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2696 }
2697 }
2698
2699 static void
2700 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2701 {
2702 /* If we are not already in thumb mode go into it, EVEN if
2703 the target processor does not support thumb instructions.
2704 This is used by gcc/config/arm/lib1funcs.asm for example
2705 to compile interworking support functions even if the
2706 target processor should not support interworking. */
2707 if (! thumb_mode)
2708 {
2709 thumb_mode = 2;
2710 record_alignment (now_seg, 1);
2711 }
2712
2713 demand_empty_rest_of_line ();
2714 }
2715
2716 static void
2717 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2718 {
2719 s_thumb (0);
2720
2721 /* The following label is the name/address of the start of a Thumb function.
2722 We need to know this for the interworking support. */
2723 label_is_thumb_function_name = TRUE;
2724 }
2725
2726 /* Perform a .set directive, but also mark the alias as
2727 being a thumb function. */
2728
2729 static void
2730 s_thumb_set (int equiv)
2731 {
2732 /* XXX the following is a duplicate of the code for s_set() in read.c
2733 We cannot just call that code as we need to get at the symbol that
2734 is created. */
2735 char * name;
2736 char delim;
2737 char * end_name;
2738 symbolS * symbolP;
2739
2740 /* Especial apologies for the random logic:
2741 This just grew, and could be parsed much more simply!
2742 Dean - in haste. */
2743 name = input_line_pointer;
2744 delim = get_symbol_end ();
2745 end_name = input_line_pointer;
2746 *end_name = delim;
2747
2748 if (*input_line_pointer != ',')
2749 {
2750 *end_name = 0;
2751 as_bad (_("expected comma after name \"%s\""), name);
2752 *end_name = delim;
2753 ignore_rest_of_line ();
2754 return;
2755 }
2756
2757 input_line_pointer++;
2758 *end_name = 0;
2759
2760 if (name[0] == '.' && name[1] == '\0')
2761 {
2762 /* XXX - this should not happen to .thumb_set. */
2763 abort ();
2764 }
2765
2766 if ((symbolP = symbol_find (name)) == NULL
2767 && (symbolP = md_undefined_symbol (name)) == NULL)
2768 {
2769 #ifndef NO_LISTING
2770 /* When doing symbol listings, play games with dummy fragments living
2771 outside the normal fragment chain to record the file and line info
2772 for this symbol. */
2773 if (listing & LISTING_SYMBOLS)
2774 {
2775 extern struct list_info_struct * listing_tail;
2776 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2777
2778 memset (dummy_frag, 0, sizeof (fragS));
2779 dummy_frag->fr_type = rs_fill;
2780 dummy_frag->line = listing_tail;
2781 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2782 dummy_frag->fr_symbol = symbolP;
2783 }
2784 else
2785 #endif
2786 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2787
2788 #ifdef OBJ_COFF
2789 /* "set" symbols are local unless otherwise specified. */
2790 SF_SET_LOCAL (symbolP);
2791 #endif /* OBJ_COFF */
2792 } /* Make a new symbol. */
2793
2794 symbol_table_insert (symbolP);
2795
2796 * end_name = delim;
2797
2798 if (equiv
2799 && S_IS_DEFINED (symbolP)
2800 && S_GET_SEGMENT (symbolP) != reg_section)
2801 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2802
2803 pseudo_set (symbolP);
2804
2805 demand_empty_rest_of_line ();
2806
2807 /* XXX Now we come to the Thumb specific bit of code. */
2808
2809 THUMB_SET_FUNC (symbolP, 1);
2810 ARM_SET_THUMB (symbolP, 1);
2811 #if defined OBJ_ELF || defined OBJ_COFF
2812 ARM_SET_INTERWORK (symbolP, support_interwork);
2813 #endif
2814 }
2815
2816 /* Directives: Mode selection. */
2817
2818 /* .syntax [unified|divided] - choose the new unified syntax
2819 (same for Arm and Thumb encoding, modulo slight differences in what
2820 can be represented) or the old divergent syntax for each mode. */
2821 static void
2822 s_syntax (int unused ATTRIBUTE_UNUSED)
2823 {
2824 char *name, delim;
2825
2826 name = input_line_pointer;
2827 delim = get_symbol_end ();
2828
2829 if (!strcasecmp (name, "unified"))
2830 unified_syntax = TRUE;
2831 else if (!strcasecmp (name, "divided"))
2832 unified_syntax = FALSE;
2833 else
2834 {
2835 as_bad (_("unrecognized syntax mode \"%s\""), name);
2836 return;
2837 }
2838 *input_line_pointer = delim;
2839 demand_empty_rest_of_line ();
2840 }
2841
2842 /* Directives: sectioning and alignment. */
2843
2844 /* Same as s_align_ptwo but align 0 => align 2. */
2845
2846 static void
2847 s_align (int unused ATTRIBUTE_UNUSED)
2848 {
2849 int temp;
2850 bfd_boolean fill_p;
2851 long temp_fill;
2852 long max_alignment = 15;
2853
2854 temp = get_absolute_expression ();
2855 if (temp > max_alignment)
2856 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2857 else if (temp < 0)
2858 {
2859 as_bad (_("alignment negative. 0 assumed."));
2860 temp = 0;
2861 }
2862
2863 if (*input_line_pointer == ',')
2864 {
2865 input_line_pointer++;
2866 temp_fill = get_absolute_expression ();
2867 fill_p = TRUE;
2868 }
2869 else
2870 {
2871 fill_p = FALSE;
2872 temp_fill = 0;
2873 }
2874
2875 if (!temp)
2876 temp = 2;
2877
2878 /* Only make a frag if we HAVE to. */
2879 if (temp && !need_pass_2)
2880 {
2881 if (!fill_p && subseg_text_p (now_seg))
2882 frag_align_code (temp, 0);
2883 else
2884 frag_align (temp, (int) temp_fill, 0);
2885 }
2886 demand_empty_rest_of_line ();
2887
2888 record_alignment (now_seg, temp);
2889 }
2890
2891 static void
2892 s_bss (int ignore ATTRIBUTE_UNUSED)
2893 {
2894 /* We don't support putting frags in the BSS segment, we fake it by
2895 marking in_bss, then looking at s_skip for clues. */
2896 subseg_set (bss_section, 0);
2897 demand_empty_rest_of_line ();
2898
2899 #ifdef md_elf_section_change_hook
2900 md_elf_section_change_hook ();
2901 #endif
2902 }
2903
2904 static void
2905 s_even (int ignore ATTRIBUTE_UNUSED)
2906 {
2907 /* Never make frag if expect extra pass. */
2908 if (!need_pass_2)
2909 frag_align (1, 0, 0);
2910
2911 record_alignment (now_seg, 1);
2912
2913 demand_empty_rest_of_line ();
2914 }
2915
2916 /* Directives: Literal pools. */
2917
2918 static literal_pool *
2919 find_literal_pool (void)
2920 {
2921 literal_pool * pool;
2922
2923 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2924 {
2925 if (pool->section == now_seg
2926 && pool->sub_section == now_subseg)
2927 break;
2928 }
2929
2930 return pool;
2931 }
2932
2933 static literal_pool *
2934 find_or_make_literal_pool (void)
2935 {
2936 /* Next literal pool ID number. */
2937 static unsigned int latest_pool_num = 1;
2938 literal_pool * pool;
2939
2940 pool = find_literal_pool ();
2941
2942 if (pool == NULL)
2943 {
2944 /* Create a new pool. */
2945 pool = (literal_pool *) xmalloc (sizeof (* pool));
2946 if (! pool)
2947 return NULL;
2948
2949 pool->next_free_entry = 0;
2950 pool->section = now_seg;
2951 pool->sub_section = now_subseg;
2952 pool->next = list_of_pools;
2953 pool->symbol = NULL;
2954
2955 /* Add it to the list. */
2956 list_of_pools = pool;
2957 }
2958
2959 /* New pools, and emptied pools, will have a NULL symbol. */
2960 if (pool->symbol == NULL)
2961 {
2962 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2963 (valueT) 0, &zero_address_frag);
2964 pool->id = latest_pool_num ++;
2965 }
2966
2967 /* Done. */
2968 return pool;
2969 }
2970
2971 /* Add the literal in the global 'inst'
2972 structure to the relevant literal pool. */
2973
2974 static int
2975 add_to_lit_pool (void)
2976 {
2977 literal_pool * pool;
2978 unsigned int entry;
2979
2980 pool = find_or_make_literal_pool ();
2981
2982 /* Check if this literal value is already in the pool. */
2983 for (entry = 0; entry < pool->next_free_entry; entry ++)
2984 {
2985 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2986 && (inst.reloc.exp.X_op == O_constant)
2987 && (pool->literals[entry].X_add_number
2988 == inst.reloc.exp.X_add_number)
2989 && (pool->literals[entry].X_unsigned
2990 == inst.reloc.exp.X_unsigned))
2991 break;
2992
2993 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2994 && (inst.reloc.exp.X_op == O_symbol)
2995 && (pool->literals[entry].X_add_number
2996 == inst.reloc.exp.X_add_number)
2997 && (pool->literals[entry].X_add_symbol
2998 == inst.reloc.exp.X_add_symbol)
2999 && (pool->literals[entry].X_op_symbol
3000 == inst.reloc.exp.X_op_symbol))
3001 break;
3002 }
3003
3004 /* Do we need to create a new entry? */
3005 if (entry == pool->next_free_entry)
3006 {
3007 if (entry >= MAX_LITERAL_POOL_SIZE)
3008 {
3009 inst.error = _("literal pool overflow");
3010 return FAIL;
3011 }
3012
3013 pool->literals[entry] = inst.reloc.exp;
3014 pool->next_free_entry += 1;
3015 }
3016
3017 inst.reloc.exp.X_op = O_symbol;
3018 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3019 inst.reloc.exp.X_add_symbol = pool->symbol;
3020
3021 return SUCCESS;
3022 }
3023
3024 /* Can't use symbol_new here, so have to create a symbol and then at
3025 a later date assign it a value. Thats what these functions do. */
3026
3027 static void
3028 symbol_locate (symbolS * symbolP,
3029 const char * name, /* It is copied, the caller can modify. */
3030 segT segment, /* Segment identifier (SEG_<something>). */
3031 valueT valu, /* Symbol value. */
3032 fragS * frag) /* Associated fragment. */
3033 {
3034 unsigned int name_length;
3035 char * preserved_copy_of_name;
3036
3037 name_length = strlen (name) + 1; /* +1 for \0. */
3038 obstack_grow (&notes, name, name_length);
3039 preserved_copy_of_name = (char *) obstack_finish (&notes);
3040
3041 #ifdef tc_canonicalize_symbol_name
3042 preserved_copy_of_name =
3043 tc_canonicalize_symbol_name (preserved_copy_of_name);
3044 #endif
3045
3046 S_SET_NAME (symbolP, preserved_copy_of_name);
3047
3048 S_SET_SEGMENT (symbolP, segment);
3049 S_SET_VALUE (symbolP, valu);
3050 symbol_clear_list_pointers (symbolP);
3051
3052 symbol_set_frag (symbolP, frag);
3053
3054 /* Link to end of symbol chain. */
3055 {
3056 extern int symbol_table_frozen;
3057
3058 if (symbol_table_frozen)
3059 abort ();
3060 }
3061
3062 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3063
3064 obj_symbol_new_hook (symbolP);
3065
3066 #ifdef tc_symbol_new_hook
3067 tc_symbol_new_hook (symbolP);
3068 #endif
3069
3070 #ifdef DEBUG_SYMS
3071 verify_symbol_chain (symbol_rootP, symbol_lastP);
3072 #endif /* DEBUG_SYMS */
3073 }
3074
3075
3076 static void
3077 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3078 {
3079 unsigned int entry;
3080 literal_pool * pool;
3081 char sym_name[20];
3082
3083 pool = find_literal_pool ();
3084 if (pool == NULL
3085 || pool->symbol == NULL
3086 || pool->next_free_entry == 0)
3087 return;
3088
3089 mapping_state (MAP_DATA);
3090
3091 /* Align pool as you have word accesses.
3092 Only make a frag if we have to. */
3093 if (!need_pass_2)
3094 frag_align (2, 0, 0);
3095
3096 record_alignment (now_seg, 2);
3097
3098 sprintf (sym_name, "$$lit_\002%x", pool->id);
3099
3100 symbol_locate (pool->symbol, sym_name, now_seg,
3101 (valueT) frag_now_fix (), frag_now);
3102 symbol_table_insert (pool->symbol);
3103
3104 ARM_SET_THUMB (pool->symbol, thumb_mode);
3105
3106 #if defined OBJ_COFF || defined OBJ_ELF
3107 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3108 #endif
3109
3110 for (entry = 0; entry < pool->next_free_entry; entry ++)
3111 /* First output the expression in the instruction to the pool. */
3112 emit_expr (&(pool->literals[entry]), 4); /* .word */
3113
3114 /* Mark the pool as empty. */
3115 pool->next_free_entry = 0;
3116 pool->symbol = NULL;
3117 }
3118
3119 #ifdef OBJ_ELF
3120 /* Forward declarations for functions below, in the MD interface
3121 section. */
3122 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3123 static valueT create_unwind_entry (int);
3124 static void start_unwind_section (const segT, int);
3125 static void add_unwind_opcode (valueT, int);
3126 static void flush_pending_unwind (void);
3127
3128 /* Directives: Data. */
3129
3130 static void
3131 s_arm_elf_cons (int nbytes)
3132 {
3133 expressionS exp;
3134
3135 #ifdef md_flush_pending_output
3136 md_flush_pending_output ();
3137 #endif
3138
3139 if (is_it_end_of_statement ())
3140 {
3141 demand_empty_rest_of_line ();
3142 return;
3143 }
3144
3145 #ifdef md_cons_align
3146 md_cons_align (nbytes);
3147 #endif
3148
3149 mapping_state (MAP_DATA);
3150 do
3151 {
3152 int reloc;
3153 char *base = input_line_pointer;
3154
3155 expression (& exp);
3156
3157 if (exp.X_op != O_symbol)
3158 emit_expr (&exp, (unsigned int) nbytes);
3159 else
3160 {
3161 char *before_reloc = input_line_pointer;
3162 reloc = parse_reloc (&input_line_pointer);
3163 if (reloc == -1)
3164 {
3165 as_bad (_("unrecognized relocation suffix"));
3166 ignore_rest_of_line ();
3167 return;
3168 }
3169 else if (reloc == BFD_RELOC_UNUSED)
3170 emit_expr (&exp, (unsigned int) nbytes);
3171 else
3172 {
3173 reloc_howto_type *howto = (reloc_howto_type *)
3174 bfd_reloc_type_lookup (stdoutput,
3175 (bfd_reloc_code_real_type) reloc);
3176 int size = bfd_get_reloc_size (howto);
3177
3178 if (reloc == BFD_RELOC_ARM_PLT32)
3179 {
3180 as_bad (_("(plt) is only valid on branch targets"));
3181 reloc = BFD_RELOC_UNUSED;
3182 size = 0;
3183 }
3184
3185 if (size > nbytes)
3186 as_bad (_("%s relocations do not fit in %d bytes"),
3187 howto->name, nbytes);
3188 else
3189 {
3190 /* We've parsed an expression stopping at O_symbol.
3191 But there may be more expression left now that we
3192 have parsed the relocation marker. Parse it again.
3193 XXX Surely there is a cleaner way to do this. */
3194 char *p = input_line_pointer;
3195 int offset;
3196 char *save_buf = (char *) alloca (input_line_pointer - base);
3197 memcpy (save_buf, base, input_line_pointer - base);
3198 memmove (base + (input_line_pointer - before_reloc),
3199 base, before_reloc - base);
3200
3201 input_line_pointer = base + (input_line_pointer-before_reloc);
3202 expression (&exp);
3203 memcpy (base, save_buf, p - base);
3204
3205 offset = nbytes - size;
3206 p = frag_more ((int) nbytes);
3207 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3208 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3209 }
3210 }
3211 }
3212 }
3213 while (*input_line_pointer++ == ',');
3214
3215 /* Put terminator back into stream. */
3216 input_line_pointer --;
3217 demand_empty_rest_of_line ();
3218 }
3219
3220 /* Emit an expression containing a 32-bit thumb instruction.
3221 Implementation based on put_thumb32_insn. */
3222
3223 static void
3224 emit_thumb32_expr (expressionS * exp)
3225 {
3226 expressionS exp_high = *exp;
3227
3228 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3229 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3230 exp->X_add_number &= 0xffff;
3231 emit_expr (exp, (unsigned int) THUMB_SIZE);
3232 }
3233
3234 /* Guess the instruction size based on the opcode. */
3235
3236 static int
3237 thumb_insn_size (int opcode)
3238 {
3239 if ((unsigned int) opcode < 0xe800u)
3240 return 2;
3241 else if ((unsigned int) opcode >= 0xe8000000u)
3242 return 4;
3243 else
3244 return 0;
3245 }
3246
3247 static bfd_boolean
3248 emit_insn (expressionS *exp, int nbytes)
3249 {
3250 int size = 0;
3251
3252 if (exp->X_op == O_constant)
3253 {
3254 size = nbytes;
3255
3256 if (size == 0)
3257 size = thumb_insn_size (exp->X_add_number);
3258
3259 if (size != 0)
3260 {
3261 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3262 {
3263 as_bad (_(".inst.n operand too big. "\
3264 "Use .inst.w instead"));
3265 size = 0;
3266 }
3267 else
3268 {
3269 if (now_it.state == AUTOMATIC_IT_BLOCK)
3270 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3271 else
3272 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3273
3274 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3275 emit_thumb32_expr (exp);
3276 else
3277 emit_expr (exp, (unsigned int) size);
3278
3279 it_fsm_post_encode ();
3280 }
3281 }
3282 else
3283 as_bad (_("cannot determine Thumb instruction size. " \
3284 "Use .inst.n/.inst.w instead"));
3285 }
3286 else
3287 as_bad (_("constant expression required"));
3288
3289 return (size != 0);
3290 }
3291
3292 /* Like s_arm_elf_cons but do not use md_cons_align and
3293 set the mapping state to MAP_ARM/MAP_THUMB. */
3294
3295 static void
3296 s_arm_elf_inst (int nbytes)
3297 {
3298 if (is_it_end_of_statement ())
3299 {
3300 demand_empty_rest_of_line ();
3301 return;
3302 }
3303
3304 /* Calling mapping_state () here will not change ARM/THUMB,
3305 but will ensure not to be in DATA state. */
3306
3307 if (thumb_mode)
3308 mapping_state (MAP_THUMB);
3309 else
3310 {
3311 if (nbytes != 0)
3312 {
3313 as_bad (_("width suffixes are invalid in ARM mode"));
3314 ignore_rest_of_line ();
3315 return;
3316 }
3317
3318 nbytes = 4;
3319
3320 mapping_state (MAP_ARM);
3321 }
3322
3323 do
3324 {
3325 expressionS exp;
3326
3327 expression (& exp);
3328
3329 if (! emit_insn (& exp, nbytes))
3330 {
3331 ignore_rest_of_line ();
3332 return;
3333 }
3334 }
3335 while (*input_line_pointer++ == ',');
3336
3337 /* Put terminator back into stream. */
3338 input_line_pointer --;
3339 demand_empty_rest_of_line ();
3340 }
3341
3342 /* Parse a .rel31 directive. */
3343
3344 static void
3345 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3346 {
3347 expressionS exp;
3348 char *p;
3349 valueT highbit;
3350
3351 highbit = 0;
3352 if (*input_line_pointer == '1')
3353 highbit = 0x80000000;
3354 else if (*input_line_pointer != '0')
3355 as_bad (_("expected 0 or 1"));
3356
3357 input_line_pointer++;
3358 if (*input_line_pointer != ',')
3359 as_bad (_("missing comma"));
3360 input_line_pointer++;
3361
3362 #ifdef md_flush_pending_output
3363 md_flush_pending_output ();
3364 #endif
3365
3366 #ifdef md_cons_align
3367 md_cons_align (4);
3368 #endif
3369
3370 mapping_state (MAP_DATA);
3371
3372 expression (&exp);
3373
3374 p = frag_more (4);
3375 md_number_to_chars (p, highbit, 4);
3376 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3377 BFD_RELOC_ARM_PREL31);
3378
3379 demand_empty_rest_of_line ();
3380 }
3381
3382 /* Directives: AEABI stack-unwind tables. */
3383
3384 /* Parse an unwind_fnstart directive. Simply records the current location. */
3385
3386 static void
3387 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3388 {
3389 demand_empty_rest_of_line ();
3390 if (unwind.proc_start)
3391 {
3392 as_bad (_("duplicate .fnstart directive"));
3393 return;
3394 }
3395
3396 /* Mark the start of the function. */
3397 unwind.proc_start = expr_build_dot ();
3398
3399 /* Reset the rest of the unwind info. */
3400 unwind.opcode_count = 0;
3401 unwind.table_entry = NULL;
3402 unwind.personality_routine = NULL;
3403 unwind.personality_index = -1;
3404 unwind.frame_size = 0;
3405 unwind.fp_offset = 0;
3406 unwind.fp_reg = REG_SP;
3407 unwind.fp_used = 0;
3408 unwind.sp_restored = 0;
3409 }
3410
3411
3412 /* Parse a handlerdata directive. Creates the exception handling table entry
3413 for the function. */
3414
3415 static void
3416 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3417 {
3418 demand_empty_rest_of_line ();
3419 if (!unwind.proc_start)
3420 as_bad (MISSING_FNSTART);
3421
3422 if (unwind.table_entry)
3423 as_bad (_("duplicate .handlerdata directive"));
3424
3425 create_unwind_entry (1);
3426 }
3427
3428 /* Parse an unwind_fnend directive. Generates the index table entry. */
3429
3430 static void
3431 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3432 {
3433 long where;
3434 char *ptr;
3435 valueT val;
3436 unsigned int marked_pr_dependency;
3437
3438 demand_empty_rest_of_line ();
3439
3440 if (!unwind.proc_start)
3441 {
3442 as_bad (_(".fnend directive without .fnstart"));
3443 return;
3444 }
3445
3446 /* Add eh table entry. */
3447 if (unwind.table_entry == NULL)
3448 val = create_unwind_entry (0);
3449 else
3450 val = 0;
3451
3452 /* Add index table entry. This is two words. */
3453 start_unwind_section (unwind.saved_seg, 1);
3454 frag_align (2, 0, 0);
3455 record_alignment (now_seg, 2);
3456
3457 ptr = frag_more (8);
3458 where = frag_now_fix () - 8;
3459
3460 /* Self relative offset of the function start. */
3461 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3462 BFD_RELOC_ARM_PREL31);
3463
3464 /* Indicate dependency on EHABI-defined personality routines to the
3465 linker, if it hasn't been done already. */
3466 marked_pr_dependency
3467 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3468 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3469 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3470 {
3471 static const char *const name[] =
3472 {
3473 "__aeabi_unwind_cpp_pr0",
3474 "__aeabi_unwind_cpp_pr1",
3475 "__aeabi_unwind_cpp_pr2"
3476 };
3477 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3478 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3479 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3480 |= 1 << unwind.personality_index;
3481 }
3482
3483 if (val)
3484 /* Inline exception table entry. */
3485 md_number_to_chars (ptr + 4, val, 4);
3486 else
3487 /* Self relative offset of the table entry. */
3488 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3489 BFD_RELOC_ARM_PREL31);
3490
3491 /* Restore the original section. */
3492 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3493
3494 unwind.proc_start = NULL;
3495 }
3496
3497
3498 /* Parse an unwind_cantunwind directive. */
3499
3500 static void
3501 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3502 {
3503 demand_empty_rest_of_line ();
3504 if (!unwind.proc_start)
3505 as_bad (MISSING_FNSTART);
3506
3507 if (unwind.personality_routine || unwind.personality_index != -1)
3508 as_bad (_("personality routine specified for cantunwind frame"));
3509
3510 unwind.personality_index = -2;
3511 }
3512
3513
3514 /* Parse a personalityindex directive. */
3515
3516 static void
3517 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3518 {
3519 expressionS exp;
3520
3521 if (!unwind.proc_start)
3522 as_bad (MISSING_FNSTART);
3523
3524 if (unwind.personality_routine || unwind.personality_index != -1)
3525 as_bad (_("duplicate .personalityindex directive"));
3526
3527 expression (&exp);
3528
3529 if (exp.X_op != O_constant
3530 || exp.X_add_number < 0 || exp.X_add_number > 15)
3531 {
3532 as_bad (_("bad personality routine number"));
3533 ignore_rest_of_line ();
3534 return;
3535 }
3536
3537 unwind.personality_index = exp.X_add_number;
3538
3539 demand_empty_rest_of_line ();
3540 }
3541
3542
3543 /* Parse a personality directive. */
3544
3545 static void
3546 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3547 {
3548 char *name, *p, c;
3549
3550 if (!unwind.proc_start)
3551 as_bad (MISSING_FNSTART);
3552
3553 if (unwind.personality_routine || unwind.personality_index != -1)
3554 as_bad (_("duplicate .personality directive"));
3555
3556 name = input_line_pointer;
3557 c = get_symbol_end ();
3558 p = input_line_pointer;
3559 unwind.personality_routine = symbol_find_or_make (name);
3560 *p = c;
3561 demand_empty_rest_of_line ();
3562 }
3563
3564
3565 /* Parse a directive saving core registers. */
3566
3567 static void
3568 s_arm_unwind_save_core (void)
3569 {
3570 valueT op;
3571 long range;
3572 int n;
3573
3574 range = parse_reg_list (&input_line_pointer);
3575 if (range == FAIL)
3576 {
3577 as_bad (_("expected register list"));
3578 ignore_rest_of_line ();
3579 return;
3580 }
3581
3582 demand_empty_rest_of_line ();
3583
3584 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3585 into .unwind_save {..., sp...}. We aren't bothered about the value of
3586 ip because it is clobbered by calls. */
3587 if (unwind.sp_restored && unwind.fp_reg == 12
3588 && (range & 0x3000) == 0x1000)
3589 {
3590 unwind.opcode_count--;
3591 unwind.sp_restored = 0;
3592 range = (range | 0x2000) & ~0x1000;
3593 unwind.pending_offset = 0;
3594 }
3595
3596 /* Pop r4-r15. */
3597 if (range & 0xfff0)
3598 {
3599 /* See if we can use the short opcodes. These pop a block of up to 8
3600 registers starting with r4, plus maybe r14. */
3601 for (n = 0; n < 8; n++)
3602 {
3603 /* Break at the first non-saved register. */
3604 if ((range & (1 << (n + 4))) == 0)
3605 break;
3606 }
3607 /* See if there are any other bits set. */
3608 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3609 {
3610 /* Use the long form. */
3611 op = 0x8000 | ((range >> 4) & 0xfff);
3612 add_unwind_opcode (op, 2);
3613 }
3614 else
3615 {
3616 /* Use the short form. */
3617 if (range & 0x4000)
3618 op = 0xa8; /* Pop r14. */
3619 else
3620 op = 0xa0; /* Do not pop r14. */
3621 op |= (n - 1);
3622 add_unwind_opcode (op, 1);
3623 }
3624 }
3625
3626 /* Pop r0-r3. */
3627 if (range & 0xf)
3628 {
3629 op = 0xb100 | (range & 0xf);
3630 add_unwind_opcode (op, 2);
3631 }
3632
3633 /* Record the number of bytes pushed. */
3634 for (n = 0; n < 16; n++)
3635 {
3636 if (range & (1 << n))
3637 unwind.frame_size += 4;
3638 }
3639 }
3640
3641
3642 /* Parse a directive saving FPA registers. */
3643
3644 static void
3645 s_arm_unwind_save_fpa (int reg)
3646 {
3647 expressionS exp;
3648 int num_regs;
3649 valueT op;
3650
3651 /* Get Number of registers to transfer. */
3652 if (skip_past_comma (&input_line_pointer) != FAIL)
3653 expression (&exp);
3654 else
3655 exp.X_op = O_illegal;
3656
3657 if (exp.X_op != O_constant)
3658 {
3659 as_bad (_("expected , <constant>"));
3660 ignore_rest_of_line ();
3661 return;
3662 }
3663
3664 num_regs = exp.X_add_number;
3665
3666 if (num_regs < 1 || num_regs > 4)
3667 {
3668 as_bad (_("number of registers must be in the range [1:4]"));
3669 ignore_rest_of_line ();
3670 return;
3671 }
3672
3673 demand_empty_rest_of_line ();
3674
3675 if (reg == 4)
3676 {
3677 /* Short form. */
3678 op = 0xb4 | (num_regs - 1);
3679 add_unwind_opcode (op, 1);
3680 }
3681 else
3682 {
3683 /* Long form. */
3684 op = 0xc800 | (reg << 4) | (num_regs - 1);
3685 add_unwind_opcode (op, 2);
3686 }
3687 unwind.frame_size += num_regs * 12;
3688 }
3689
3690
3691 /* Parse a directive saving VFP registers for ARMv6 and above. */
3692
3693 static void
3694 s_arm_unwind_save_vfp_armv6 (void)
3695 {
3696 int count;
3697 unsigned int start;
3698 valueT op;
3699 int num_vfpv3_regs = 0;
3700 int num_regs_below_16;
3701
3702 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3703 if (count == FAIL)
3704 {
3705 as_bad (_("expected register list"));
3706 ignore_rest_of_line ();
3707 return;
3708 }
3709
3710 demand_empty_rest_of_line ();
3711
3712 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3713 than FSTMX/FLDMX-style ones). */
3714
3715 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3716 if (start >= 16)
3717 num_vfpv3_regs = count;
3718 else if (start + count > 16)
3719 num_vfpv3_regs = start + count - 16;
3720
3721 if (num_vfpv3_regs > 0)
3722 {
3723 int start_offset = start > 16 ? start - 16 : 0;
3724 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3725 add_unwind_opcode (op, 2);
3726 }
3727
3728 /* Generate opcode for registers numbered in the range 0 .. 15. */
3729 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3730 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3731 if (num_regs_below_16 > 0)
3732 {
3733 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3734 add_unwind_opcode (op, 2);
3735 }
3736
3737 unwind.frame_size += count * 8;
3738 }
3739
3740
3741 /* Parse a directive saving VFP registers for pre-ARMv6. */
3742
3743 static void
3744 s_arm_unwind_save_vfp (void)
3745 {
3746 int count;
3747 unsigned int reg;
3748 valueT op;
3749
3750 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
3751 if (count == FAIL)
3752 {
3753 as_bad (_("expected register list"));
3754 ignore_rest_of_line ();
3755 return;
3756 }
3757
3758 demand_empty_rest_of_line ();
3759
3760 if (reg == 8)
3761 {
3762 /* Short form. */
3763 op = 0xb8 | (count - 1);
3764 add_unwind_opcode (op, 1);
3765 }
3766 else
3767 {
3768 /* Long form. */
3769 op = 0xb300 | (reg << 4) | (count - 1);
3770 add_unwind_opcode (op, 2);
3771 }
3772 unwind.frame_size += count * 8 + 4;
3773 }
3774
3775
3776 /* Parse a directive saving iWMMXt data registers. */
3777
3778 static void
3779 s_arm_unwind_save_mmxwr (void)
3780 {
3781 int reg;
3782 int hi_reg;
3783 int i;
3784 unsigned mask = 0;
3785 valueT op;
3786
3787 if (*input_line_pointer == '{')
3788 input_line_pointer++;
3789
3790 do
3791 {
3792 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3793
3794 if (reg == FAIL)
3795 {
3796 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3797 goto error;
3798 }
3799
3800 if (mask >> reg)
3801 as_tsktsk (_("register list not in ascending order"));
3802 mask |= 1 << reg;
3803
3804 if (*input_line_pointer == '-')
3805 {
3806 input_line_pointer++;
3807 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3808 if (hi_reg == FAIL)
3809 {
3810 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3811 goto error;
3812 }
3813 else if (reg >= hi_reg)
3814 {
3815 as_bad (_("bad register range"));
3816 goto error;
3817 }
3818 for (; reg < hi_reg; reg++)
3819 mask |= 1 << reg;
3820 }
3821 }
3822 while (skip_past_comma (&input_line_pointer) != FAIL);
3823
3824 if (*input_line_pointer == '}')
3825 input_line_pointer++;
3826
3827 demand_empty_rest_of_line ();
3828
3829 /* Generate any deferred opcodes because we're going to be looking at
3830 the list. */
3831 flush_pending_unwind ();
3832
3833 for (i = 0; i < 16; i++)
3834 {
3835 if (mask & (1 << i))
3836 unwind.frame_size += 8;
3837 }
3838
3839 /* Attempt to combine with a previous opcode. We do this because gcc
3840 likes to output separate unwind directives for a single block of
3841 registers. */
3842 if (unwind.opcode_count > 0)
3843 {
3844 i = unwind.opcodes[unwind.opcode_count - 1];
3845 if ((i & 0xf8) == 0xc0)
3846 {
3847 i &= 7;
3848 /* Only merge if the blocks are contiguous. */
3849 if (i < 6)
3850 {
3851 if ((mask & 0xfe00) == (1 << 9))
3852 {
3853 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3854 unwind.opcode_count--;
3855 }
3856 }
3857 else if (i == 6 && unwind.opcode_count >= 2)
3858 {
3859 i = unwind.opcodes[unwind.opcode_count - 2];
3860 reg = i >> 4;
3861 i &= 0xf;
3862
3863 op = 0xffff << (reg - 1);
3864 if (reg > 0
3865 && ((mask & op) == (1u << (reg - 1))))
3866 {
3867 op = (1 << (reg + i + 1)) - 1;
3868 op &= ~((1 << reg) - 1);
3869 mask |= op;
3870 unwind.opcode_count -= 2;
3871 }
3872 }
3873 }
3874 }
3875
3876 hi_reg = 15;
3877 /* We want to generate opcodes in the order the registers have been
3878 saved, ie. descending order. */
3879 for (reg = 15; reg >= -1; reg--)
3880 {
3881 /* Save registers in blocks. */
3882 if (reg < 0
3883 || !(mask & (1 << reg)))
3884 {
3885 /* We found an unsaved reg. Generate opcodes to save the
3886 preceding block. */
3887 if (reg != hi_reg)
3888 {
3889 if (reg == 9)
3890 {
3891 /* Short form. */
3892 op = 0xc0 | (hi_reg - 10);
3893 add_unwind_opcode (op, 1);
3894 }
3895 else
3896 {
3897 /* Long form. */
3898 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3899 add_unwind_opcode (op, 2);
3900 }
3901 }
3902 hi_reg = reg - 1;
3903 }
3904 }
3905
3906 return;
3907 error:
3908 ignore_rest_of_line ();
3909 }
3910
3911 static void
3912 s_arm_unwind_save_mmxwcg (void)
3913 {
3914 int reg;
3915 int hi_reg;
3916 unsigned mask = 0;
3917 valueT op;
3918
3919 if (*input_line_pointer == '{')
3920 input_line_pointer++;
3921
3922 do
3923 {
3924 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3925
3926 if (reg == FAIL)
3927 {
3928 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3929 goto error;
3930 }
3931
3932 reg -= 8;
3933 if (mask >> reg)
3934 as_tsktsk (_("register list not in ascending order"));
3935 mask |= 1 << reg;
3936
3937 if (*input_line_pointer == '-')
3938 {
3939 input_line_pointer++;
3940 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3941 if (hi_reg == FAIL)
3942 {
3943 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
3944 goto error;
3945 }
3946 else if (reg >= hi_reg)
3947 {
3948 as_bad (_("bad register range"));
3949 goto error;
3950 }
3951 for (; reg < hi_reg; reg++)
3952 mask |= 1 << reg;
3953 }
3954 }
3955 while (skip_past_comma (&input_line_pointer) != FAIL);
3956
3957 if (*input_line_pointer == '}')
3958 input_line_pointer++;
3959
3960 demand_empty_rest_of_line ();
3961
3962 /* Generate any deferred opcodes because we're going to be looking at
3963 the list. */
3964 flush_pending_unwind ();
3965
3966 for (reg = 0; reg < 16; reg++)
3967 {
3968 if (mask & (1 << reg))
3969 unwind.frame_size += 4;
3970 }
3971 op = 0xc700 | mask;
3972 add_unwind_opcode (op, 2);
3973 return;
3974 error:
3975 ignore_rest_of_line ();
3976 }
3977
3978
3979 /* Parse an unwind_save directive.
3980 If the argument is non-zero, this is a .vsave directive. */
3981
3982 static void
3983 s_arm_unwind_save (int arch_v6)
3984 {
3985 char *peek;
3986 struct reg_entry *reg;
3987 bfd_boolean had_brace = FALSE;
3988
3989 if (!unwind.proc_start)
3990 as_bad (MISSING_FNSTART);
3991
3992 /* Figure out what sort of save we have. */
3993 peek = input_line_pointer;
3994
3995 if (*peek == '{')
3996 {
3997 had_brace = TRUE;
3998 peek++;
3999 }
4000
4001 reg = arm_reg_parse_multi (&peek);
4002
4003 if (!reg)
4004 {
4005 as_bad (_("register expected"));
4006 ignore_rest_of_line ();
4007 return;
4008 }
4009
4010 switch (reg->type)
4011 {
4012 case REG_TYPE_FN:
4013 if (had_brace)
4014 {
4015 as_bad (_("FPA .unwind_save does not take a register list"));
4016 ignore_rest_of_line ();
4017 return;
4018 }
4019 input_line_pointer = peek;
4020 s_arm_unwind_save_fpa (reg->number);
4021 return;
4022
4023 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4024 case REG_TYPE_VFD:
4025 if (arch_v6)
4026 s_arm_unwind_save_vfp_armv6 ();
4027 else
4028 s_arm_unwind_save_vfp ();
4029 return;
4030 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4031 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4032
4033 default:
4034 as_bad (_(".unwind_save does not support this kind of register"));
4035 ignore_rest_of_line ();
4036 }
4037 }
4038
4039
4040 /* Parse an unwind_movsp directive. */
4041
4042 static void
4043 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4044 {
4045 int reg;
4046 valueT op;
4047 int offset;
4048
4049 if (!unwind.proc_start)
4050 as_bad (MISSING_FNSTART);
4051
4052 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4053 if (reg == FAIL)
4054 {
4055 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4056 ignore_rest_of_line ();
4057 return;
4058 }
4059
4060 /* Optional constant. */
4061 if (skip_past_comma (&input_line_pointer) != FAIL)
4062 {
4063 if (immediate_for_directive (&offset) == FAIL)
4064 return;
4065 }
4066 else
4067 offset = 0;
4068
4069 demand_empty_rest_of_line ();
4070
4071 if (reg == REG_SP || reg == REG_PC)
4072 {
4073 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4074 return;
4075 }
4076
4077 if (unwind.fp_reg != REG_SP)
4078 as_bad (_("unexpected .unwind_movsp directive"));
4079
4080 /* Generate opcode to restore the value. */
4081 op = 0x90 | reg;
4082 add_unwind_opcode (op, 1);
4083
4084 /* Record the information for later. */
4085 unwind.fp_reg = reg;
4086 unwind.fp_offset = unwind.frame_size - offset;
4087 unwind.sp_restored = 1;
4088 }
4089
4090 /* Parse an unwind_pad directive. */
4091
4092 static void
4093 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4094 {
4095 int offset;
4096
4097 if (!unwind.proc_start)
4098 as_bad (MISSING_FNSTART);
4099
4100 if (immediate_for_directive (&offset) == FAIL)
4101 return;
4102
4103 if (offset & 3)
4104 {
4105 as_bad (_("stack increment must be multiple of 4"));
4106 ignore_rest_of_line ();
4107 return;
4108 }
4109
4110 /* Don't generate any opcodes, just record the details for later. */
4111 unwind.frame_size += offset;
4112 unwind.pending_offset += offset;
4113
4114 demand_empty_rest_of_line ();
4115 }
4116
4117 /* Parse an unwind_setfp directive. */
4118
4119 static void
4120 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4121 {
4122 int sp_reg;
4123 int fp_reg;
4124 int offset;
4125
4126 if (!unwind.proc_start)
4127 as_bad (MISSING_FNSTART);
4128
4129 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4130 if (skip_past_comma (&input_line_pointer) == FAIL)
4131 sp_reg = FAIL;
4132 else
4133 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4134
4135 if (fp_reg == FAIL || sp_reg == FAIL)
4136 {
4137 as_bad (_("expected <reg>, <reg>"));
4138 ignore_rest_of_line ();
4139 return;
4140 }
4141
4142 /* Optional constant. */
4143 if (skip_past_comma (&input_line_pointer) != FAIL)
4144 {
4145 if (immediate_for_directive (&offset) == FAIL)
4146 return;
4147 }
4148 else
4149 offset = 0;
4150
4151 demand_empty_rest_of_line ();
4152
4153 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4154 {
4155 as_bad (_("register must be either sp or set by a previous"
4156 "unwind_movsp directive"));
4157 return;
4158 }
4159
4160 /* Don't generate any opcodes, just record the information for later. */
4161 unwind.fp_reg = fp_reg;
4162 unwind.fp_used = 1;
4163 if (sp_reg == REG_SP)
4164 unwind.fp_offset = unwind.frame_size - offset;
4165 else
4166 unwind.fp_offset -= offset;
4167 }
4168
4169 /* Parse an unwind_raw directive. */
4170
4171 static void
4172 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4173 {
4174 expressionS exp;
4175 /* This is an arbitrary limit. */
4176 unsigned char op[16];
4177 int count;
4178
4179 if (!unwind.proc_start)
4180 as_bad (MISSING_FNSTART);
4181
4182 expression (&exp);
4183 if (exp.X_op == O_constant
4184 && skip_past_comma (&input_line_pointer) != FAIL)
4185 {
4186 unwind.frame_size += exp.X_add_number;
4187 expression (&exp);
4188 }
4189 else
4190 exp.X_op = O_illegal;
4191
4192 if (exp.X_op != O_constant)
4193 {
4194 as_bad (_("expected <offset>, <opcode>"));
4195 ignore_rest_of_line ();
4196 return;
4197 }
4198
4199 count = 0;
4200
4201 /* Parse the opcode. */
4202 for (;;)
4203 {
4204 if (count >= 16)
4205 {
4206 as_bad (_("unwind opcode too long"));
4207 ignore_rest_of_line ();
4208 }
4209 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4210 {
4211 as_bad (_("invalid unwind opcode"));
4212 ignore_rest_of_line ();
4213 return;
4214 }
4215 op[count++] = exp.X_add_number;
4216
4217 /* Parse the next byte. */
4218 if (skip_past_comma (&input_line_pointer) == FAIL)
4219 break;
4220
4221 expression (&exp);
4222 }
4223
4224 /* Add the opcode bytes in reverse order. */
4225 while (count--)
4226 add_unwind_opcode (op[count], 1);
4227
4228 demand_empty_rest_of_line ();
4229 }
4230
4231
4232 /* Parse a .eabi_attribute directive. */
4233
4234 static void
4235 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4236 {
4237 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4238
4239 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4240 attributes_set_explicitly[tag] = 1;
4241 }
4242 #endif /* OBJ_ELF */
4243
4244 static void s_arm_arch (int);
4245 static void s_arm_object_arch (int);
4246 static void s_arm_cpu (int);
4247 static void s_arm_fpu (int);
4248
4249 #ifdef TE_PE
4250
4251 static void
4252 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4253 {
4254 expressionS exp;
4255
4256 do
4257 {
4258 expression (&exp);
4259 if (exp.X_op == O_symbol)
4260 exp.X_op = O_secrel;
4261
4262 emit_expr (&exp, 4);
4263 }
4264 while (*input_line_pointer++ == ',');
4265
4266 input_line_pointer--;
4267 demand_empty_rest_of_line ();
4268 }
4269 #endif /* TE_PE */
4270
4271 /* This table describes all the machine specific pseudo-ops the assembler
4272 has to support. The fields are:
4273 pseudo-op name without dot
4274 function to call to execute this pseudo-op
4275 Integer arg to pass to the function. */
4276
4277 const pseudo_typeS md_pseudo_table[] =
4278 {
4279 /* Never called because '.req' does not start a line. */
4280 { "req", s_req, 0 },
4281 /* Following two are likewise never called. */
4282 { "dn", s_dn, 0 },
4283 { "qn", s_qn, 0 },
4284 { "unreq", s_unreq, 0 },
4285 { "bss", s_bss, 0 },
4286 { "align", s_align, 0 },
4287 { "arm", s_arm, 0 },
4288 { "thumb", s_thumb, 0 },
4289 { "code", s_code, 0 },
4290 { "force_thumb", s_force_thumb, 0 },
4291 { "thumb_func", s_thumb_func, 0 },
4292 { "thumb_set", s_thumb_set, 0 },
4293 { "even", s_even, 0 },
4294 { "ltorg", s_ltorg, 0 },
4295 { "pool", s_ltorg, 0 },
4296 { "syntax", s_syntax, 0 },
4297 { "cpu", s_arm_cpu, 0 },
4298 { "arch", s_arm_arch, 0 },
4299 { "object_arch", s_arm_object_arch, 0 },
4300 { "fpu", s_arm_fpu, 0 },
4301 #ifdef OBJ_ELF
4302 { "word", s_arm_elf_cons, 4 },
4303 { "long", s_arm_elf_cons, 4 },
4304 { "inst.n", s_arm_elf_inst, 2 },
4305 { "inst.w", s_arm_elf_inst, 4 },
4306 { "inst", s_arm_elf_inst, 0 },
4307 { "rel31", s_arm_rel31, 0 },
4308 { "fnstart", s_arm_unwind_fnstart, 0 },
4309 { "fnend", s_arm_unwind_fnend, 0 },
4310 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4311 { "personality", s_arm_unwind_personality, 0 },
4312 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4313 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4314 { "save", s_arm_unwind_save, 0 },
4315 { "vsave", s_arm_unwind_save, 1 },
4316 { "movsp", s_arm_unwind_movsp, 0 },
4317 { "pad", s_arm_unwind_pad, 0 },
4318 { "setfp", s_arm_unwind_setfp, 0 },
4319 { "unwind_raw", s_arm_unwind_raw, 0 },
4320 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4321 #else
4322 { "word", cons, 4},
4323
4324 /* These are used for dwarf. */
4325 {"2byte", cons, 2},
4326 {"4byte", cons, 4},
4327 {"8byte", cons, 8},
4328 /* These are used for dwarf2. */
4329 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4330 { "loc", dwarf2_directive_loc, 0 },
4331 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4332 #endif
4333 { "extend", float_cons, 'x' },
4334 { "ldouble", float_cons, 'x' },
4335 { "packed", float_cons, 'p' },
4336 #ifdef TE_PE
4337 {"secrel32", pe_directive_secrel, 0},
4338 #endif
4339 { 0, 0, 0 }
4340 };
4341 \f
4342 /* Parser functions used exclusively in instruction operands. */
4343
4344 /* Generic immediate-value read function for use in insn parsing.
4345 STR points to the beginning of the immediate (the leading #);
4346 VAL receives the value; if the value is outside [MIN, MAX]
4347 issue an error. PREFIX_OPT is true if the immediate prefix is
4348 optional. */
4349
4350 static int
4351 parse_immediate (char **str, int *val, int min, int max,
4352 bfd_boolean prefix_opt)
4353 {
4354 expressionS exp;
4355 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4356 if (exp.X_op != O_constant)
4357 {
4358 inst.error = _("constant expression required");
4359 return FAIL;
4360 }
4361
4362 if (exp.X_add_number < min || exp.X_add_number > max)
4363 {
4364 inst.error = _("immediate value out of range");
4365 return FAIL;
4366 }
4367
4368 *val = exp.X_add_number;
4369 return SUCCESS;
4370 }
4371
4372 /* Less-generic immediate-value read function with the possibility of loading a
4373 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4374 instructions. Puts the result directly in inst.operands[i]. */
4375
4376 static int
4377 parse_big_immediate (char **str, int i)
4378 {
4379 expressionS exp;
4380 char *ptr = *str;
4381
4382 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4383
4384 if (exp.X_op == O_constant)
4385 {
4386 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4387 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4388 O_constant. We have to be careful not to break compilation for
4389 32-bit X_add_number, though. */
4390 if ((exp.X_add_number & ~0xffffffffl) != 0)
4391 {
4392 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4393 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4394 inst.operands[i].regisimm = 1;
4395 }
4396 }
4397 else if (exp.X_op == O_big
4398 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4399 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4400 {
4401 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4402 /* Bignums have their least significant bits in
4403 generic_bignum[0]. Make sure we put 32 bits in imm and
4404 32 bits in reg, in a (hopefully) portable way. */
4405 gas_assert (parts != 0);
4406 inst.operands[i].imm = 0;
4407 for (j = 0; j < parts; j++, idx++)
4408 inst.operands[i].imm |= generic_bignum[idx]
4409 << (LITTLENUM_NUMBER_OF_BITS * j);
4410 inst.operands[i].reg = 0;
4411 for (j = 0; j < parts; j++, idx++)
4412 inst.operands[i].reg |= generic_bignum[idx]
4413 << (LITTLENUM_NUMBER_OF_BITS * j);
4414 inst.operands[i].regisimm = 1;
4415 }
4416 else
4417 return FAIL;
4418
4419 *str = ptr;
4420
4421 return SUCCESS;
4422 }
4423
4424 /* Returns the pseudo-register number of an FPA immediate constant,
4425 or FAIL if there isn't a valid constant here. */
4426
4427 static int
4428 parse_fpa_immediate (char ** str)
4429 {
4430 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4431 char * save_in;
4432 expressionS exp;
4433 int i;
4434 int j;
4435
4436 /* First try and match exact strings, this is to guarantee
4437 that some formats will work even for cross assembly. */
4438
4439 for (i = 0; fp_const[i]; i++)
4440 {
4441 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4442 {
4443 char *start = *str;
4444
4445 *str += strlen (fp_const[i]);
4446 if (is_end_of_line[(unsigned char) **str])
4447 return i + 8;
4448 *str = start;
4449 }
4450 }
4451
4452 /* Just because we didn't get a match doesn't mean that the constant
4453 isn't valid, just that it is in a format that we don't
4454 automatically recognize. Try parsing it with the standard
4455 expression routines. */
4456
4457 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4458
4459 /* Look for a raw floating point number. */
4460 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4461 && is_end_of_line[(unsigned char) *save_in])
4462 {
4463 for (i = 0; i < NUM_FLOAT_VALS; i++)
4464 {
4465 for (j = 0; j < MAX_LITTLENUMS; j++)
4466 {
4467 if (words[j] != fp_values[i][j])
4468 break;
4469 }
4470
4471 if (j == MAX_LITTLENUMS)
4472 {
4473 *str = save_in;
4474 return i + 8;
4475 }
4476 }
4477 }
4478
4479 /* Try and parse a more complex expression, this will probably fail
4480 unless the code uses a floating point prefix (eg "0f"). */
4481 save_in = input_line_pointer;
4482 input_line_pointer = *str;
4483 if (expression (&exp) == absolute_section
4484 && exp.X_op == O_big
4485 && exp.X_add_number < 0)
4486 {
4487 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4488 Ditto for 15. */
4489 if (gen_to_words (words, 5, (long) 15) == 0)
4490 {
4491 for (i = 0; i < NUM_FLOAT_VALS; i++)
4492 {
4493 for (j = 0; j < MAX_LITTLENUMS; j++)
4494 {
4495 if (words[j] != fp_values[i][j])
4496 break;
4497 }
4498
4499 if (j == MAX_LITTLENUMS)
4500 {
4501 *str = input_line_pointer;
4502 input_line_pointer = save_in;
4503 return i + 8;
4504 }
4505 }
4506 }
4507 }
4508
4509 *str = input_line_pointer;
4510 input_line_pointer = save_in;
4511 inst.error = _("invalid FPA immediate expression");
4512 return FAIL;
4513 }
4514
4515 /* Returns 1 if a number has "quarter-precision" float format
4516 0baBbbbbbc defgh000 00000000 00000000. */
4517
4518 static int
4519 is_quarter_float (unsigned imm)
4520 {
4521 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4522 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4523 }
4524
4525 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4526 0baBbbbbbc defgh000 00000000 00000000.
4527 The zero and minus-zero cases need special handling, since they can't be
4528 encoded in the "quarter-precision" float format, but can nonetheless be
4529 loaded as integer constants. */
4530
4531 static unsigned
4532 parse_qfloat_immediate (char **ccp, int *immed)
4533 {
4534 char *str = *ccp;
4535 char *fpnum;
4536 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4537 int found_fpchar = 0;
4538
4539 skip_past_char (&str, '#');
4540
4541 /* We must not accidentally parse an integer as a floating-point number. Make
4542 sure that the value we parse is not an integer by checking for special
4543 characters '.' or 'e'.
4544 FIXME: This is a horrible hack, but doing better is tricky because type
4545 information isn't in a very usable state at parse time. */
4546 fpnum = str;
4547 skip_whitespace (fpnum);
4548
4549 if (strncmp (fpnum, "0x", 2) == 0)
4550 return FAIL;
4551 else
4552 {
4553 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4554 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4555 {
4556 found_fpchar = 1;
4557 break;
4558 }
4559
4560 if (!found_fpchar)
4561 return FAIL;
4562 }
4563
4564 if ((str = atof_ieee (str, 's', words)) != NULL)
4565 {
4566 unsigned fpword = 0;
4567 int i;
4568
4569 /* Our FP word must be 32 bits (single-precision FP). */
4570 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4571 {
4572 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4573 fpword |= words[i];
4574 }
4575
4576 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4577 *immed = fpword;
4578 else
4579 return FAIL;
4580
4581 *ccp = str;
4582
4583 return SUCCESS;
4584 }
4585
4586 return FAIL;
4587 }
4588
4589 /* Shift operands. */
4590 enum shift_kind
4591 {
4592 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4593 };
4594
4595 struct asm_shift_name
4596 {
4597 const char *name;
4598 enum shift_kind kind;
4599 };
4600
4601 /* Third argument to parse_shift. */
4602 enum parse_shift_mode
4603 {
4604 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4605 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4606 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4607 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4608 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4609 };
4610
4611 /* Parse a <shift> specifier on an ARM data processing instruction.
4612 This has three forms:
4613
4614 (LSL|LSR|ASL|ASR|ROR) Rs
4615 (LSL|LSR|ASL|ASR|ROR) #imm
4616 RRX
4617
4618 Note that ASL is assimilated to LSL in the instruction encoding, and
4619 RRX to ROR #0 (which cannot be written as such). */
4620
4621 static int
4622 parse_shift (char **str, int i, enum parse_shift_mode mode)
4623 {
4624 const struct asm_shift_name *shift_name;
4625 enum shift_kind shift;
4626 char *s = *str;
4627 char *p = s;
4628 int reg;
4629
4630 for (p = *str; ISALPHA (*p); p++)
4631 ;
4632
4633 if (p == *str)
4634 {
4635 inst.error = _("shift expression expected");
4636 return FAIL;
4637 }
4638
4639 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4640 p - *str);
4641
4642 if (shift_name == NULL)
4643 {
4644 inst.error = _("shift expression expected");
4645 return FAIL;
4646 }
4647
4648 shift = shift_name->kind;
4649
4650 switch (mode)
4651 {
4652 case NO_SHIFT_RESTRICT:
4653 case SHIFT_IMMEDIATE: break;
4654
4655 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4656 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4657 {
4658 inst.error = _("'LSL' or 'ASR' required");
4659 return FAIL;
4660 }
4661 break;
4662
4663 case SHIFT_LSL_IMMEDIATE:
4664 if (shift != SHIFT_LSL)
4665 {
4666 inst.error = _("'LSL' required");
4667 return FAIL;
4668 }
4669 break;
4670
4671 case SHIFT_ASR_IMMEDIATE:
4672 if (shift != SHIFT_ASR)
4673 {
4674 inst.error = _("'ASR' required");
4675 return FAIL;
4676 }
4677 break;
4678
4679 default: abort ();
4680 }
4681
4682 if (shift != SHIFT_RRX)
4683 {
4684 /* Whitespace can appear here if the next thing is a bare digit. */
4685 skip_whitespace (p);
4686
4687 if (mode == NO_SHIFT_RESTRICT
4688 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4689 {
4690 inst.operands[i].imm = reg;
4691 inst.operands[i].immisreg = 1;
4692 }
4693 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4694 return FAIL;
4695 }
4696 inst.operands[i].shift_kind = shift;
4697 inst.operands[i].shifted = 1;
4698 *str = p;
4699 return SUCCESS;
4700 }
4701
4702 /* Parse a <shifter_operand> for an ARM data processing instruction:
4703
4704 #<immediate>
4705 #<immediate>, <rotate>
4706 <Rm>
4707 <Rm>, <shift>
4708
4709 where <shift> is defined by parse_shift above, and <rotate> is a
4710 multiple of 2 between 0 and 30. Validation of immediate operands
4711 is deferred to md_apply_fix. */
4712
4713 static int
4714 parse_shifter_operand (char **str, int i)
4715 {
4716 int value;
4717 expressionS exp;
4718
4719 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4720 {
4721 inst.operands[i].reg = value;
4722 inst.operands[i].isreg = 1;
4723
4724 /* parse_shift will override this if appropriate */
4725 inst.reloc.exp.X_op = O_constant;
4726 inst.reloc.exp.X_add_number = 0;
4727
4728 if (skip_past_comma (str) == FAIL)
4729 return SUCCESS;
4730
4731 /* Shift operation on register. */
4732 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4733 }
4734
4735 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4736 return FAIL;
4737
4738 if (skip_past_comma (str) == SUCCESS)
4739 {
4740 /* #x, y -- ie explicit rotation by Y. */
4741 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4742 return FAIL;
4743
4744 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4745 {
4746 inst.error = _("constant expression expected");
4747 return FAIL;
4748 }
4749
4750 value = exp.X_add_number;
4751 if (value < 0 || value > 30 || value % 2 != 0)
4752 {
4753 inst.error = _("invalid rotation");
4754 return FAIL;
4755 }
4756 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4757 {
4758 inst.error = _("invalid constant");
4759 return FAIL;
4760 }
4761
4762 /* Convert to decoded value. md_apply_fix will put it back. */
4763 inst.reloc.exp.X_add_number
4764 = (((inst.reloc.exp.X_add_number << (32 - value))
4765 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4766 }
4767
4768 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4769 inst.reloc.pc_rel = 0;
4770 return SUCCESS;
4771 }
4772
4773 /* Group relocation information. Each entry in the table contains the
4774 textual name of the relocation as may appear in assembler source
4775 and must end with a colon.
4776 Along with this textual name are the relocation codes to be used if
4777 the corresponding instruction is an ALU instruction (ADD or SUB only),
4778 an LDR, an LDRS, or an LDC. */
4779
4780 struct group_reloc_table_entry
4781 {
4782 const char *name;
4783 int alu_code;
4784 int ldr_code;
4785 int ldrs_code;
4786 int ldc_code;
4787 };
4788
4789 typedef enum
4790 {
4791 /* Varieties of non-ALU group relocation. */
4792
4793 GROUP_LDR,
4794 GROUP_LDRS,
4795 GROUP_LDC
4796 } group_reloc_type;
4797
4798 static struct group_reloc_table_entry group_reloc_table[] =
4799 { /* Program counter relative: */
4800 { "pc_g0_nc",
4801 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4802 0, /* LDR */
4803 0, /* LDRS */
4804 0 }, /* LDC */
4805 { "pc_g0",
4806 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4807 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4808 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4809 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4810 { "pc_g1_nc",
4811 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4812 0, /* LDR */
4813 0, /* LDRS */
4814 0 }, /* LDC */
4815 { "pc_g1",
4816 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4817 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4818 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4819 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4820 { "pc_g2",
4821 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4822 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4823 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4824 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4825 /* Section base relative */
4826 { "sb_g0_nc",
4827 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4828 0, /* LDR */
4829 0, /* LDRS */
4830 0 }, /* LDC */
4831 { "sb_g0",
4832 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4833 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4834 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4835 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4836 { "sb_g1_nc",
4837 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4838 0, /* LDR */
4839 0, /* LDRS */
4840 0 }, /* LDC */
4841 { "sb_g1",
4842 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4843 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4844 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4845 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4846 { "sb_g2",
4847 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4848 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4849 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4850 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4851
4852 /* Given the address of a pointer pointing to the textual name of a group
4853 relocation as may appear in assembler source, attempt to find its details
4854 in group_reloc_table. The pointer will be updated to the character after
4855 the trailing colon. On failure, FAIL will be returned; SUCCESS
4856 otherwise. On success, *entry will be updated to point at the relevant
4857 group_reloc_table entry. */
4858
4859 static int
4860 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4861 {
4862 unsigned int i;
4863 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4864 {
4865 int length = strlen (group_reloc_table[i].name);
4866
4867 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
4868 && (*str)[length] == ':')
4869 {
4870 *out = &group_reloc_table[i];
4871 *str += (length + 1);
4872 return SUCCESS;
4873 }
4874 }
4875
4876 return FAIL;
4877 }
4878
4879 /* Parse a <shifter_operand> for an ARM data processing instruction
4880 (as for parse_shifter_operand) where group relocations are allowed:
4881
4882 #<immediate>
4883 #<immediate>, <rotate>
4884 #:<group_reloc>:<expression>
4885 <Rm>
4886 <Rm>, <shift>
4887
4888 where <group_reloc> is one of the strings defined in group_reloc_table.
4889 The hashes are optional.
4890
4891 Everything else is as for parse_shifter_operand. */
4892
4893 static parse_operand_result
4894 parse_shifter_operand_group_reloc (char **str, int i)
4895 {
4896 /* Determine if we have the sequence of characters #: or just :
4897 coming next. If we do, then we check for a group relocation.
4898 If we don't, punt the whole lot to parse_shifter_operand. */
4899
4900 if (((*str)[0] == '#' && (*str)[1] == ':')
4901 || (*str)[0] == ':')
4902 {
4903 struct group_reloc_table_entry *entry;
4904
4905 if ((*str)[0] == '#')
4906 (*str) += 2;
4907 else
4908 (*str)++;
4909
4910 /* Try to parse a group relocation. Anything else is an error. */
4911 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4912 {
4913 inst.error = _("unknown group relocation");
4914 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4915 }
4916
4917 /* We now have the group relocation table entry corresponding to
4918 the name in the assembler source. Next, we parse the expression. */
4919 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4920 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4921
4922 /* Record the relocation type (always the ALU variant here). */
4923 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
4924 gas_assert (inst.reloc.type != 0);
4925
4926 return PARSE_OPERAND_SUCCESS;
4927 }
4928 else
4929 return parse_shifter_operand (str, i) == SUCCESS
4930 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4931
4932 /* Never reached. */
4933 }
4934
4935 /* Parse all forms of an ARM address expression. Information is written
4936 to inst.operands[i] and/or inst.reloc.
4937
4938 Preindexed addressing (.preind=1):
4939
4940 [Rn, #offset] .reg=Rn .reloc.exp=offset
4941 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4942 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4943 .shift_kind=shift .reloc.exp=shift_imm
4944
4945 These three may have a trailing ! which causes .writeback to be set also.
4946
4947 Postindexed addressing (.postind=1, .writeback=1):
4948
4949 [Rn], #offset .reg=Rn .reloc.exp=offset
4950 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4951 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4952 .shift_kind=shift .reloc.exp=shift_imm
4953
4954 Unindexed addressing (.preind=0, .postind=0):
4955
4956 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4957
4958 Other:
4959
4960 [Rn]{!} shorthand for [Rn,#0]{!}
4961 =immediate .isreg=0 .reloc.exp=immediate
4962 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4963
4964 It is the caller's responsibility to check for addressing modes not
4965 supported by the instruction, and to set inst.reloc.type. */
4966
4967 static parse_operand_result
4968 parse_address_main (char **str, int i, int group_relocations,
4969 group_reloc_type group_type)
4970 {
4971 char *p = *str;
4972 int reg;
4973
4974 if (skip_past_char (&p, '[') == FAIL)
4975 {
4976 if (skip_past_char (&p, '=') == FAIL)
4977 {
4978 /* Bare address - translate to PC-relative offset. */
4979 inst.reloc.pc_rel = 1;
4980 inst.operands[i].reg = REG_PC;
4981 inst.operands[i].isreg = 1;
4982 inst.operands[i].preind = 1;
4983 }
4984 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
4985
4986 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4987 return PARSE_OPERAND_FAIL;
4988
4989 *str = p;
4990 return PARSE_OPERAND_SUCCESS;
4991 }
4992
4993 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4994 {
4995 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4996 return PARSE_OPERAND_FAIL;
4997 }
4998 inst.operands[i].reg = reg;
4999 inst.operands[i].isreg = 1;
5000
5001 if (skip_past_comma (&p) == SUCCESS)
5002 {
5003 inst.operands[i].preind = 1;
5004
5005 if (*p == '+') p++;
5006 else if (*p == '-') p++, inst.operands[i].negative = 1;
5007
5008 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5009 {
5010 inst.operands[i].imm = reg;
5011 inst.operands[i].immisreg = 1;
5012
5013 if (skip_past_comma (&p) == SUCCESS)
5014 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5015 return PARSE_OPERAND_FAIL;
5016 }
5017 else if (skip_past_char (&p, ':') == SUCCESS)
5018 {
5019 /* FIXME: '@' should be used here, but it's filtered out by generic
5020 code before we get to see it here. This may be subject to
5021 change. */
5022 expressionS exp;
5023 my_get_expression (&exp, &p, GE_NO_PREFIX);
5024 if (exp.X_op != O_constant)
5025 {
5026 inst.error = _("alignment must be constant");
5027 return PARSE_OPERAND_FAIL;
5028 }
5029 inst.operands[i].imm = exp.X_add_number << 8;
5030 inst.operands[i].immisalign = 1;
5031 /* Alignments are not pre-indexes. */
5032 inst.operands[i].preind = 0;
5033 }
5034 else
5035 {
5036 if (inst.operands[i].negative)
5037 {
5038 inst.operands[i].negative = 0;
5039 p--;
5040 }
5041
5042 if (group_relocations
5043 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5044 {
5045 struct group_reloc_table_entry *entry;
5046
5047 /* Skip over the #: or : sequence. */
5048 if (*p == '#')
5049 p += 2;
5050 else
5051 p++;
5052
5053 /* Try to parse a group relocation. Anything else is an
5054 error. */
5055 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5056 {
5057 inst.error = _("unknown group relocation");
5058 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5059 }
5060
5061 /* We now have the group relocation table entry corresponding to
5062 the name in the assembler source. Next, we parse the
5063 expression. */
5064 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5065 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5066
5067 /* Record the relocation type. */
5068 switch (group_type)
5069 {
5070 case GROUP_LDR:
5071 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5072 break;
5073
5074 case GROUP_LDRS:
5075 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5076 break;
5077
5078 case GROUP_LDC:
5079 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5080 break;
5081
5082 default:
5083 gas_assert (0);
5084 }
5085
5086 if (inst.reloc.type == 0)
5087 {
5088 inst.error = _("this group relocation is not allowed on this instruction");
5089 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5090 }
5091 }
5092 else
5093 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5094 return PARSE_OPERAND_FAIL;
5095 }
5096 }
5097
5098 if (skip_past_char (&p, ']') == FAIL)
5099 {
5100 inst.error = _("']' expected");
5101 return PARSE_OPERAND_FAIL;
5102 }
5103
5104 if (skip_past_char (&p, '!') == SUCCESS)
5105 inst.operands[i].writeback = 1;
5106
5107 else if (skip_past_comma (&p) == SUCCESS)
5108 {
5109 if (skip_past_char (&p, '{') == SUCCESS)
5110 {
5111 /* [Rn], {expr} - unindexed, with option */
5112 if (parse_immediate (&p, &inst.operands[i].imm,
5113 0, 255, TRUE) == FAIL)
5114 return PARSE_OPERAND_FAIL;
5115
5116 if (skip_past_char (&p, '}') == FAIL)
5117 {
5118 inst.error = _("'}' expected at end of 'option' field");
5119 return PARSE_OPERAND_FAIL;
5120 }
5121 if (inst.operands[i].preind)
5122 {
5123 inst.error = _("cannot combine index with option");
5124 return PARSE_OPERAND_FAIL;
5125 }
5126 *str = p;
5127 return PARSE_OPERAND_SUCCESS;
5128 }
5129 else
5130 {
5131 inst.operands[i].postind = 1;
5132 inst.operands[i].writeback = 1;
5133
5134 if (inst.operands[i].preind)
5135 {
5136 inst.error = _("cannot combine pre- and post-indexing");
5137 return PARSE_OPERAND_FAIL;
5138 }
5139
5140 if (*p == '+') p++;
5141 else if (*p == '-') p++, inst.operands[i].negative = 1;
5142
5143 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5144 {
5145 /* We might be using the immediate for alignment already. If we
5146 are, OR the register number into the low-order bits. */
5147 if (inst.operands[i].immisalign)
5148 inst.operands[i].imm |= reg;
5149 else
5150 inst.operands[i].imm = reg;
5151 inst.operands[i].immisreg = 1;
5152
5153 if (skip_past_comma (&p) == SUCCESS)
5154 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5155 return PARSE_OPERAND_FAIL;
5156 }
5157 else
5158 {
5159 if (inst.operands[i].negative)
5160 {
5161 inst.operands[i].negative = 0;
5162 p--;
5163 }
5164 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5165 return PARSE_OPERAND_FAIL;
5166 }
5167 }
5168 }
5169
5170 /* If at this point neither .preind nor .postind is set, we have a
5171 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5172 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5173 {
5174 inst.operands[i].preind = 1;
5175 inst.reloc.exp.X_op = O_constant;
5176 inst.reloc.exp.X_add_number = 0;
5177 }
5178 *str = p;
5179 return PARSE_OPERAND_SUCCESS;
5180 }
5181
5182 static int
5183 parse_address (char **str, int i)
5184 {
5185 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5186 ? SUCCESS : FAIL;
5187 }
5188
5189 static parse_operand_result
5190 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5191 {
5192 return parse_address_main (str, i, 1, type);
5193 }
5194
5195 /* Parse an operand for a MOVW or MOVT instruction. */
5196 static int
5197 parse_half (char **str)
5198 {
5199 char * p;
5200
5201 p = *str;
5202 skip_past_char (&p, '#');
5203 if (strncasecmp (p, ":lower16:", 9) == 0)
5204 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5205 else if (strncasecmp (p, ":upper16:", 9) == 0)
5206 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5207
5208 if (inst.reloc.type != BFD_RELOC_UNUSED)
5209 {
5210 p += 9;
5211 skip_whitespace (p);
5212 }
5213
5214 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5215 return FAIL;
5216
5217 if (inst.reloc.type == BFD_RELOC_UNUSED)
5218 {
5219 if (inst.reloc.exp.X_op != O_constant)
5220 {
5221 inst.error = _("constant expression expected");
5222 return FAIL;
5223 }
5224 if (inst.reloc.exp.X_add_number < 0
5225 || inst.reloc.exp.X_add_number > 0xffff)
5226 {
5227 inst.error = _("immediate value out of range");
5228 return FAIL;
5229 }
5230 }
5231 *str = p;
5232 return SUCCESS;
5233 }
5234
5235 /* Miscellaneous. */
5236
5237 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5238 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5239 static int
5240 parse_psr (char **str)
5241 {
5242 char *p;
5243 unsigned long psr_field;
5244 const struct asm_psr *psr;
5245 char *start;
5246
5247 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5248 feature for ease of use and backwards compatibility. */
5249 p = *str;
5250 if (strncasecmp (p, "SPSR", 4) == 0)
5251 psr_field = SPSR_BIT;
5252 else if (strncasecmp (p, "CPSR", 4) == 0)
5253 psr_field = 0;
5254 else
5255 {
5256 start = p;
5257 do
5258 p++;
5259 while (ISALNUM (*p) || *p == '_');
5260
5261 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5262 p - start);
5263 if (!psr)
5264 return FAIL;
5265
5266 *str = p;
5267 return psr->field;
5268 }
5269
5270 p += 4;
5271 if (*p == '_')
5272 {
5273 /* A suffix follows. */
5274 p++;
5275 start = p;
5276
5277 do
5278 p++;
5279 while (ISALNUM (*p) || *p == '_');
5280
5281 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5282 p - start);
5283 if (!psr)
5284 goto error;
5285
5286 psr_field |= psr->field;
5287 }
5288 else
5289 {
5290 if (ISALNUM (*p))
5291 goto error; /* Garbage after "[CS]PSR". */
5292
5293 psr_field |= (PSR_c | PSR_f);
5294 }
5295 *str = p;
5296 return psr_field;
5297
5298 error:
5299 inst.error = _("flag for {c}psr instruction expected");
5300 return FAIL;
5301 }
5302
5303 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5304 value suitable for splatting into the AIF field of the instruction. */
5305
5306 static int
5307 parse_cps_flags (char **str)
5308 {
5309 int val = 0;
5310 int saw_a_flag = 0;
5311 char *s = *str;
5312
5313 for (;;)
5314 switch (*s++)
5315 {
5316 case '\0': case ',':
5317 goto done;
5318
5319 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5320 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5321 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5322
5323 default:
5324 inst.error = _("unrecognized CPS flag");
5325 return FAIL;
5326 }
5327
5328 done:
5329 if (saw_a_flag == 0)
5330 {
5331 inst.error = _("missing CPS flags");
5332 return FAIL;
5333 }
5334
5335 *str = s - 1;
5336 return val;
5337 }
5338
5339 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5340 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5341
5342 static int
5343 parse_endian_specifier (char **str)
5344 {
5345 int little_endian;
5346 char *s = *str;
5347
5348 if (strncasecmp (s, "BE", 2))
5349 little_endian = 0;
5350 else if (strncasecmp (s, "LE", 2))
5351 little_endian = 1;
5352 else
5353 {
5354 inst.error = _("valid endian specifiers are be or le");
5355 return FAIL;
5356 }
5357
5358 if (ISALNUM (s[2]) || s[2] == '_')
5359 {
5360 inst.error = _("valid endian specifiers are be or le");
5361 return FAIL;
5362 }
5363
5364 *str = s + 2;
5365 return little_endian;
5366 }
5367
5368 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5369 value suitable for poking into the rotate field of an sxt or sxta
5370 instruction, or FAIL on error. */
5371
5372 static int
5373 parse_ror (char **str)
5374 {
5375 int rot;
5376 char *s = *str;
5377
5378 if (strncasecmp (s, "ROR", 3) == 0)
5379 s += 3;
5380 else
5381 {
5382 inst.error = _("missing rotation field after comma");
5383 return FAIL;
5384 }
5385
5386 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5387 return FAIL;
5388
5389 switch (rot)
5390 {
5391 case 0: *str = s; return 0x0;
5392 case 8: *str = s; return 0x1;
5393 case 16: *str = s; return 0x2;
5394 case 24: *str = s; return 0x3;
5395
5396 default:
5397 inst.error = _("rotation can only be 0, 8, 16, or 24");
5398 return FAIL;
5399 }
5400 }
5401
5402 /* Parse a conditional code (from conds[] below). The value returned is in the
5403 range 0 .. 14, or FAIL. */
5404 static int
5405 parse_cond (char **str)
5406 {
5407 char *q;
5408 const struct asm_cond *c;
5409 int n;
5410 /* Condition codes are always 2 characters, so matching up to
5411 3 characters is sufficient. */
5412 char cond[3];
5413
5414 q = *str;
5415 n = 0;
5416 while (ISALPHA (*q) && n < 3)
5417 {
5418 cond[n] = TOLOWER (*q);
5419 q++;
5420 n++;
5421 }
5422
5423 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5424 if (!c)
5425 {
5426 inst.error = _("condition required");
5427 return FAIL;
5428 }
5429
5430 *str = q;
5431 return c->value;
5432 }
5433
5434 /* Parse an option for a barrier instruction. Returns the encoding for the
5435 option, or FAIL. */
5436 static int
5437 parse_barrier (char **str)
5438 {
5439 char *p, *q;
5440 const struct asm_barrier_opt *o;
5441
5442 p = q = *str;
5443 while (ISALPHA (*q))
5444 q++;
5445
5446 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5447 q - p);
5448 if (!o)
5449 return FAIL;
5450
5451 *str = q;
5452 return o->value;
5453 }
5454
5455 /* Parse the operands of a table branch instruction. Similar to a memory
5456 operand. */
5457 static int
5458 parse_tb (char **str)
5459 {
5460 char * p = *str;
5461 int reg;
5462
5463 if (skip_past_char (&p, '[') == FAIL)
5464 {
5465 inst.error = _("'[' expected");
5466 return FAIL;
5467 }
5468
5469 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5470 {
5471 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5472 return FAIL;
5473 }
5474 inst.operands[0].reg = reg;
5475
5476 if (skip_past_comma (&p) == FAIL)
5477 {
5478 inst.error = _("',' expected");
5479 return FAIL;
5480 }
5481
5482 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5483 {
5484 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5485 return FAIL;
5486 }
5487 inst.operands[0].imm = reg;
5488
5489 if (skip_past_comma (&p) == SUCCESS)
5490 {
5491 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5492 return FAIL;
5493 if (inst.reloc.exp.X_add_number != 1)
5494 {
5495 inst.error = _("invalid shift");
5496 return FAIL;
5497 }
5498 inst.operands[0].shifted = 1;
5499 }
5500
5501 if (skip_past_char (&p, ']') == FAIL)
5502 {
5503 inst.error = _("']' expected");
5504 return FAIL;
5505 }
5506 *str = p;
5507 return SUCCESS;
5508 }
5509
5510 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5511 information on the types the operands can take and how they are encoded.
5512 Up to four operands may be read; this function handles setting the
5513 ".present" field for each read operand itself.
5514 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5515 else returns FAIL. */
5516
5517 static int
5518 parse_neon_mov (char **str, int *which_operand)
5519 {
5520 int i = *which_operand, val;
5521 enum arm_reg_type rtype;
5522 char *ptr = *str;
5523 struct neon_type_el optype;
5524
5525 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5526 {
5527 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5528 inst.operands[i].reg = val;
5529 inst.operands[i].isscalar = 1;
5530 inst.operands[i].vectype = optype;
5531 inst.operands[i++].present = 1;
5532
5533 if (skip_past_comma (&ptr) == FAIL)
5534 goto wanted_comma;
5535
5536 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5537 goto wanted_arm;
5538
5539 inst.operands[i].reg = val;
5540 inst.operands[i].isreg = 1;
5541 inst.operands[i].present = 1;
5542 }
5543 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5544 != FAIL)
5545 {
5546 /* Cases 0, 1, 2, 3, 5 (D only). */
5547 if (skip_past_comma (&ptr) == FAIL)
5548 goto wanted_comma;
5549
5550 inst.operands[i].reg = val;
5551 inst.operands[i].isreg = 1;
5552 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5553 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5554 inst.operands[i].isvec = 1;
5555 inst.operands[i].vectype = optype;
5556 inst.operands[i++].present = 1;
5557
5558 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5559 {
5560 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5561 Case 13: VMOV <Sd>, <Rm> */
5562 inst.operands[i].reg = val;
5563 inst.operands[i].isreg = 1;
5564 inst.operands[i].present = 1;
5565
5566 if (rtype == REG_TYPE_NQ)
5567 {
5568 first_error (_("can't use Neon quad register here"));
5569 return FAIL;
5570 }
5571 else if (rtype != REG_TYPE_VFS)
5572 {
5573 i++;
5574 if (skip_past_comma (&ptr) == FAIL)
5575 goto wanted_comma;
5576 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5577 goto wanted_arm;
5578 inst.operands[i].reg = val;
5579 inst.operands[i].isreg = 1;
5580 inst.operands[i].present = 1;
5581 }
5582 }
5583 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5584 &optype)) != FAIL)
5585 {
5586 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5587 Case 1: VMOV<c><q> <Dd>, <Dm>
5588 Case 8: VMOV.F32 <Sd>, <Sm>
5589 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5590
5591 inst.operands[i].reg = val;
5592 inst.operands[i].isreg = 1;
5593 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5594 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5595 inst.operands[i].isvec = 1;
5596 inst.operands[i].vectype = optype;
5597 inst.operands[i].present = 1;
5598
5599 if (skip_past_comma (&ptr) == SUCCESS)
5600 {
5601 /* Case 15. */
5602 i++;
5603
5604 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5605 goto wanted_arm;
5606
5607 inst.operands[i].reg = val;
5608 inst.operands[i].isreg = 1;
5609 inst.operands[i++].present = 1;
5610
5611 if (skip_past_comma (&ptr) == FAIL)
5612 goto wanted_comma;
5613
5614 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5615 goto wanted_arm;
5616
5617 inst.operands[i].reg = val;
5618 inst.operands[i].isreg = 1;
5619 inst.operands[i++].present = 1;
5620 }
5621 }
5622 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5623 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5624 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5625 Case 10: VMOV.F32 <Sd>, #<imm>
5626 Case 11: VMOV.F64 <Dd>, #<imm> */
5627 inst.operands[i].immisfloat = 1;
5628 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5629 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5630 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5631 ;
5632 else
5633 {
5634 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5635 return FAIL;
5636 }
5637 }
5638 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5639 {
5640 /* Cases 6, 7. */
5641 inst.operands[i].reg = val;
5642 inst.operands[i].isreg = 1;
5643 inst.operands[i++].present = 1;
5644
5645 if (skip_past_comma (&ptr) == FAIL)
5646 goto wanted_comma;
5647
5648 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5649 {
5650 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5651 inst.operands[i].reg = val;
5652 inst.operands[i].isscalar = 1;
5653 inst.operands[i].present = 1;
5654 inst.operands[i].vectype = optype;
5655 }
5656 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5657 {
5658 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5659 inst.operands[i].reg = val;
5660 inst.operands[i].isreg = 1;
5661 inst.operands[i++].present = 1;
5662
5663 if (skip_past_comma (&ptr) == FAIL)
5664 goto wanted_comma;
5665
5666 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5667 == FAIL)
5668 {
5669 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5670 return FAIL;
5671 }
5672
5673 inst.operands[i].reg = val;
5674 inst.operands[i].isreg = 1;
5675 inst.operands[i].isvec = 1;
5676 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5677 inst.operands[i].vectype = optype;
5678 inst.operands[i].present = 1;
5679
5680 if (rtype == REG_TYPE_VFS)
5681 {
5682 /* Case 14. */
5683 i++;
5684 if (skip_past_comma (&ptr) == FAIL)
5685 goto wanted_comma;
5686 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5687 &optype)) == FAIL)
5688 {
5689 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5690 return FAIL;
5691 }
5692 inst.operands[i].reg = val;
5693 inst.operands[i].isreg = 1;
5694 inst.operands[i].isvec = 1;
5695 inst.operands[i].issingle = 1;
5696 inst.operands[i].vectype = optype;
5697 inst.operands[i].present = 1;
5698 }
5699 }
5700 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5701 != FAIL)
5702 {
5703 /* Case 13. */
5704 inst.operands[i].reg = val;
5705 inst.operands[i].isreg = 1;
5706 inst.operands[i].isvec = 1;
5707 inst.operands[i].issingle = 1;
5708 inst.operands[i].vectype = optype;
5709 inst.operands[i++].present = 1;
5710 }
5711 }
5712 else
5713 {
5714 first_error (_("parse error"));
5715 return FAIL;
5716 }
5717
5718 /* Successfully parsed the operands. Update args. */
5719 *which_operand = i;
5720 *str = ptr;
5721 return SUCCESS;
5722
5723 wanted_comma:
5724 first_error (_("expected comma"));
5725 return FAIL;
5726
5727 wanted_arm:
5728 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5729 return FAIL;
5730 }
5731
5732 /* Matcher codes for parse_operands. */
5733 enum operand_parse_code
5734 {
5735 OP_stop, /* end of line */
5736
5737 OP_RR, /* ARM register */
5738 OP_RRnpc, /* ARM register, not r15 */
5739 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5740 OP_RRw, /* ARM register, not r15, optional trailing ! */
5741 OP_RCP, /* Coprocessor number */
5742 OP_RCN, /* Coprocessor register */
5743 OP_RF, /* FPA register */
5744 OP_RVS, /* VFP single precision register */
5745 OP_RVD, /* VFP double precision register (0..15) */
5746 OP_RND, /* Neon double precision register (0..31) */
5747 OP_RNQ, /* Neon quad precision register */
5748 OP_RVSD, /* VFP single or double precision register */
5749 OP_RNDQ, /* Neon double or quad precision register */
5750 OP_RNSDQ, /* Neon single, double or quad precision register */
5751 OP_RNSC, /* Neon scalar D[X] */
5752 OP_RVC, /* VFP control register */
5753 OP_RMF, /* Maverick F register */
5754 OP_RMD, /* Maverick D register */
5755 OP_RMFX, /* Maverick FX register */
5756 OP_RMDX, /* Maverick DX register */
5757 OP_RMAX, /* Maverick AX register */
5758 OP_RMDS, /* Maverick DSPSC register */
5759 OP_RIWR, /* iWMMXt wR register */
5760 OP_RIWC, /* iWMMXt wC register */
5761 OP_RIWG, /* iWMMXt wCG register */
5762 OP_RXA, /* XScale accumulator register */
5763
5764 OP_REGLST, /* ARM register list */
5765 OP_VRSLST, /* VFP single-precision register list */
5766 OP_VRDLST, /* VFP double-precision register list */
5767 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5768 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5769 OP_NSTRLST, /* Neon element/structure list */
5770
5771 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5772 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5773 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5774 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5775 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5776 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5777 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5778 OP_VMOV, /* Neon VMOV operands. */
5779 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5780 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5781 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5782
5783 OP_I0, /* immediate zero */
5784 OP_I7, /* immediate value 0 .. 7 */
5785 OP_I15, /* 0 .. 15 */
5786 OP_I16, /* 1 .. 16 */
5787 OP_I16z, /* 0 .. 16 */
5788 OP_I31, /* 0 .. 31 */
5789 OP_I31w, /* 0 .. 31, optional trailing ! */
5790 OP_I32, /* 1 .. 32 */
5791 OP_I32z, /* 0 .. 32 */
5792 OP_I63, /* 0 .. 63 */
5793 OP_I63s, /* -64 .. 63 */
5794 OP_I64, /* 1 .. 64 */
5795 OP_I64z, /* 0 .. 64 */
5796 OP_I255, /* 0 .. 255 */
5797
5798 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5799 OP_I7b, /* 0 .. 7 */
5800 OP_I15b, /* 0 .. 15 */
5801 OP_I31b, /* 0 .. 31 */
5802
5803 OP_SH, /* shifter operand */
5804 OP_SHG, /* shifter operand with possible group relocation */
5805 OP_ADDR, /* Memory address expression (any mode) */
5806 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5807 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5808 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5809 OP_EXP, /* arbitrary expression */
5810 OP_EXPi, /* same, with optional immediate prefix */
5811 OP_EXPr, /* same, with optional relocation suffix */
5812 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5813
5814 OP_CPSF, /* CPS flags */
5815 OP_ENDI, /* Endianness specifier */
5816 OP_PSR, /* CPSR/SPSR mask for msr */
5817 OP_COND, /* conditional code */
5818 OP_TB, /* Table branch. */
5819
5820 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5821 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5822
5823 OP_RRnpc_I0, /* ARM register or literal 0 */
5824 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5825 OP_RR_EXi, /* ARM register or expression with imm prefix */
5826 OP_RF_IF, /* FPA register or immediate */
5827 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5828 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5829
5830 /* Optional operands. */
5831 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5832 OP_oI31b, /* 0 .. 31 */
5833 OP_oI32b, /* 1 .. 32 */
5834 OP_oIffffb, /* 0 .. 65535 */
5835 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5836
5837 OP_oRR, /* ARM register */
5838 OP_oRRnpc, /* ARM register, not the PC */
5839 OP_oRRw, /* ARM register, not r15, optional trailing ! */
5840 OP_oRND, /* Optional Neon double precision register */
5841 OP_oRNQ, /* Optional Neon quad precision register */
5842 OP_oRNDQ, /* Optional Neon double or quad precision register */
5843 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5844 OP_oSHll, /* LSL immediate */
5845 OP_oSHar, /* ASR immediate */
5846 OP_oSHllar, /* LSL or ASR immediate */
5847 OP_oROR, /* ROR 0/8/16/24 */
5848 OP_oBARRIER, /* Option argument for a barrier instruction. */
5849
5850 OP_FIRST_OPTIONAL = OP_oI7b
5851 };
5852
5853 /* Generic instruction operand parser. This does no encoding and no
5854 semantic validation; it merely squirrels values away in the inst
5855 structure. Returns SUCCESS or FAIL depending on whether the
5856 specified grammar matched. */
5857 static int
5858 parse_operands (char *str, const unsigned char *pattern)
5859 {
5860 unsigned const char *upat = pattern;
5861 char *backtrack_pos = 0;
5862 const char *backtrack_error = 0;
5863 int i, val, backtrack_index = 0;
5864 enum arm_reg_type rtype;
5865 parse_operand_result result;
5866
5867 #define po_char_or_fail(chr) \
5868 do \
5869 { \
5870 if (skip_past_char (&str, chr) == FAIL) \
5871 goto bad_args; \
5872 } \
5873 while (0)
5874
5875 #define po_reg_or_fail(regtype) \
5876 do \
5877 { \
5878 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5879 & inst.operands[i].vectype); \
5880 if (val == FAIL) \
5881 { \
5882 first_error (_(reg_expected_msgs[regtype])); \
5883 goto failure; \
5884 } \
5885 inst.operands[i].reg = val; \
5886 inst.operands[i].isreg = 1; \
5887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5889 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5890 || rtype == REG_TYPE_VFD \
5891 || rtype == REG_TYPE_NQ); \
5892 } \
5893 while (0)
5894
5895 #define po_reg_or_goto(regtype, label) \
5896 do \
5897 { \
5898 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5899 & inst.operands[i].vectype); \
5900 if (val == FAIL) \
5901 goto label; \
5902 \
5903 inst.operands[i].reg = val; \
5904 inst.operands[i].isreg = 1; \
5905 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5906 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5907 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5908 || rtype == REG_TYPE_VFD \
5909 || rtype == REG_TYPE_NQ); \
5910 } \
5911 while (0)
5912
5913 #define po_imm_or_fail(min, max, popt) \
5914 do \
5915 { \
5916 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5917 goto failure; \
5918 inst.operands[i].imm = val; \
5919 } \
5920 while (0)
5921
5922 #define po_scalar_or_goto(elsz, label) \
5923 do \
5924 { \
5925 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5926 if (val == FAIL) \
5927 goto label; \
5928 inst.operands[i].reg = val; \
5929 inst.operands[i].isscalar = 1; \
5930 } \
5931 while (0)
5932
5933 #define po_misc_or_fail(expr) \
5934 do \
5935 { \
5936 if (expr) \
5937 goto failure; \
5938 } \
5939 while (0)
5940
5941 #define po_misc_or_fail_no_backtrack(expr) \
5942 do \
5943 { \
5944 result = expr; \
5945 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5946 backtrack_pos = 0; \
5947 if (result != PARSE_OPERAND_SUCCESS) \
5948 goto failure; \
5949 } \
5950 while (0)
5951
5952 skip_whitespace (str);
5953
5954 for (i = 0; upat[i] != OP_stop; i++)
5955 {
5956 if (upat[i] >= OP_FIRST_OPTIONAL)
5957 {
5958 /* Remember where we are in case we need to backtrack. */
5959 gas_assert (!backtrack_pos);
5960 backtrack_pos = str;
5961 backtrack_error = inst.error;
5962 backtrack_index = i;
5963 }
5964
5965 if (i > 0 && (i > 1 || inst.operands[0].present))
5966 po_char_or_fail (',');
5967
5968 switch (upat[i])
5969 {
5970 /* Registers */
5971 case OP_oRRnpc:
5972 case OP_RRnpc:
5973 case OP_oRR:
5974 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5975 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5976 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5977 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5978 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5979 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5980 case OP_oRND:
5981 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5982 case OP_RVC:
5983 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
5984 break;
5985 /* Also accept generic coprocessor regs for unknown registers. */
5986 coproc_reg:
5987 po_reg_or_fail (REG_TYPE_CN);
5988 break;
5989 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5990 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5991 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5992 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5993 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5994 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5995 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5996 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5997 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5998 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5999 case OP_oRNQ:
6000 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6001 case OP_oRNDQ:
6002 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6003 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6004 case OP_oRNSDQ:
6005 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6006
6007 /* Neon scalar. Using an element size of 8 means that some invalid
6008 scalars are accepted here, so deal with those in later code. */
6009 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6010
6011 /* WARNING: We can expand to two operands here. This has the potential
6012 to totally confuse the backtracking mechanism! It will be OK at
6013 least as long as we don't try to use optional args as well,
6014 though. */
6015 case OP_NILO:
6016 {
6017 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
6018 inst.operands[i].present = 1;
6019 i++;
6020 skip_past_comma (&str);
6021 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
6022 break;
6023 one_reg_only:
6024 /* Optional register operand was omitted. Unfortunately, it's in
6025 operands[i-1] and we need it to be in inst.operands[i]. Fix that
6026 here (this is a bit grotty). */
6027 inst.operands[i] = inst.operands[i-1];
6028 inst.operands[i-1].present = 0;
6029 break;
6030 try_imm:
6031 /* There's a possibility of getting a 64-bit immediate here, so
6032 we need special handling. */
6033 if (parse_big_immediate (&str, i) == FAIL)
6034 {
6035 inst.error = _("immediate value is out of range");
6036 goto failure;
6037 }
6038 }
6039 break;
6040
6041 case OP_RNDQ_I0:
6042 {
6043 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6044 break;
6045 try_imm0:
6046 po_imm_or_fail (0, 0, TRUE);
6047 }
6048 break;
6049
6050 case OP_RVSD_I0:
6051 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6052 break;
6053
6054 case OP_RR_RNSC:
6055 {
6056 po_scalar_or_goto (8, try_rr);
6057 break;
6058 try_rr:
6059 po_reg_or_fail (REG_TYPE_RN);
6060 }
6061 break;
6062
6063 case OP_RNSDQ_RNSC:
6064 {
6065 po_scalar_or_goto (8, try_nsdq);
6066 break;
6067 try_nsdq:
6068 po_reg_or_fail (REG_TYPE_NSDQ);
6069 }
6070 break;
6071
6072 case OP_RNDQ_RNSC:
6073 {
6074 po_scalar_or_goto (8, try_ndq);
6075 break;
6076 try_ndq:
6077 po_reg_or_fail (REG_TYPE_NDQ);
6078 }
6079 break;
6080
6081 case OP_RND_RNSC:
6082 {
6083 po_scalar_or_goto (8, try_vfd);
6084 break;
6085 try_vfd:
6086 po_reg_or_fail (REG_TYPE_VFD);
6087 }
6088 break;
6089
6090 case OP_VMOV:
6091 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6092 not careful then bad things might happen. */
6093 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6094 break;
6095
6096 case OP_RNDQ_IMVNb:
6097 {
6098 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
6099 break;
6100 try_mvnimm:
6101 /* There's a possibility of getting a 64-bit immediate here, so
6102 we need special handling. */
6103 if (parse_big_immediate (&str, i) == FAIL)
6104 {
6105 inst.error = _("immediate value is out of range");
6106 goto failure;
6107 }
6108 }
6109 break;
6110
6111 case OP_RNDQ_I63b:
6112 {
6113 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6114 break;
6115 try_shimm:
6116 po_imm_or_fail (0, 63, TRUE);
6117 }
6118 break;
6119
6120 case OP_RRnpcb:
6121 po_char_or_fail ('[');
6122 po_reg_or_fail (REG_TYPE_RN);
6123 po_char_or_fail (']');
6124 break;
6125
6126 case OP_RRw:
6127 case OP_oRRw:
6128 po_reg_or_fail (REG_TYPE_RN);
6129 if (skip_past_char (&str, '!') == SUCCESS)
6130 inst.operands[i].writeback = 1;
6131 break;
6132
6133 /* Immediates */
6134 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6135 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6136 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6137 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6138 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6139 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6140 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6141 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6142 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6143 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6144 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6145 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6146
6147 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6148 case OP_oI7b:
6149 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6150 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6151 case OP_oI31b:
6152 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6153 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6154 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6155
6156 /* Immediate variants */
6157 case OP_oI255c:
6158 po_char_or_fail ('{');
6159 po_imm_or_fail (0, 255, TRUE);
6160 po_char_or_fail ('}');
6161 break;
6162
6163 case OP_I31w:
6164 /* The expression parser chokes on a trailing !, so we have
6165 to find it first and zap it. */
6166 {
6167 char *s = str;
6168 while (*s && *s != ',')
6169 s++;
6170 if (s[-1] == '!')
6171 {
6172 s[-1] = '\0';
6173 inst.operands[i].writeback = 1;
6174 }
6175 po_imm_or_fail (0, 31, TRUE);
6176 if (str == s - 1)
6177 str = s;
6178 }
6179 break;
6180
6181 /* Expressions */
6182 case OP_EXPi: EXPi:
6183 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6184 GE_OPT_PREFIX));
6185 break;
6186
6187 case OP_EXP:
6188 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6189 GE_NO_PREFIX));
6190 break;
6191
6192 case OP_EXPr: EXPr:
6193 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6194 GE_NO_PREFIX));
6195 if (inst.reloc.exp.X_op == O_symbol)
6196 {
6197 val = parse_reloc (&str);
6198 if (val == -1)
6199 {
6200 inst.error = _("unrecognized relocation suffix");
6201 goto failure;
6202 }
6203 else if (val != BFD_RELOC_UNUSED)
6204 {
6205 inst.operands[i].imm = val;
6206 inst.operands[i].hasreloc = 1;
6207 }
6208 }
6209 break;
6210
6211 /* Operand for MOVW or MOVT. */
6212 case OP_HALF:
6213 po_misc_or_fail (parse_half (&str));
6214 break;
6215
6216 /* Register or expression. */
6217 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6218 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6219
6220 /* Register or immediate. */
6221 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6222 I0: po_imm_or_fail (0, 0, FALSE); break;
6223
6224 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6225 IF:
6226 if (!is_immediate_prefix (*str))
6227 goto bad_args;
6228 str++;
6229 val = parse_fpa_immediate (&str);
6230 if (val == FAIL)
6231 goto failure;
6232 /* FPA immediates are encoded as registers 8-15.
6233 parse_fpa_immediate has already applied the offset. */
6234 inst.operands[i].reg = val;
6235 inst.operands[i].isreg = 1;
6236 break;
6237
6238 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6239 I32z: po_imm_or_fail (0, 32, FALSE); break;
6240
6241 /* Two kinds of register. */
6242 case OP_RIWR_RIWC:
6243 {
6244 struct reg_entry *rege = arm_reg_parse_multi (&str);
6245 if (!rege
6246 || (rege->type != REG_TYPE_MMXWR
6247 && rege->type != REG_TYPE_MMXWC
6248 && rege->type != REG_TYPE_MMXWCG))
6249 {
6250 inst.error = _("iWMMXt data or control register expected");
6251 goto failure;
6252 }
6253 inst.operands[i].reg = rege->number;
6254 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6255 }
6256 break;
6257
6258 case OP_RIWC_RIWG:
6259 {
6260 struct reg_entry *rege = arm_reg_parse_multi (&str);
6261 if (!rege
6262 || (rege->type != REG_TYPE_MMXWC
6263 && rege->type != REG_TYPE_MMXWCG))
6264 {
6265 inst.error = _("iWMMXt control register expected");
6266 goto failure;
6267 }
6268 inst.operands[i].reg = rege->number;
6269 inst.operands[i].isreg = 1;
6270 }
6271 break;
6272
6273 /* Misc */
6274 case OP_CPSF: val = parse_cps_flags (&str); break;
6275 case OP_ENDI: val = parse_endian_specifier (&str); break;
6276 case OP_oROR: val = parse_ror (&str); break;
6277 case OP_PSR: val = parse_psr (&str); break;
6278 case OP_COND: val = parse_cond (&str); break;
6279 case OP_oBARRIER:val = parse_barrier (&str); break;
6280
6281 case OP_RVC_PSR:
6282 po_reg_or_goto (REG_TYPE_VFC, try_psr);
6283 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
6284 break;
6285 try_psr:
6286 val = parse_psr (&str);
6287 break;
6288
6289 case OP_APSR_RR:
6290 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6291 break;
6292 try_apsr:
6293 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6294 instruction). */
6295 if (strncasecmp (str, "APSR_", 5) == 0)
6296 {
6297 unsigned found = 0;
6298 str += 5;
6299 while (found < 15)
6300 switch (*str++)
6301 {
6302 case 'c': found = (found & 1) ? 16 : found | 1; break;
6303 case 'n': found = (found & 2) ? 16 : found | 2; break;
6304 case 'z': found = (found & 4) ? 16 : found | 4; break;
6305 case 'v': found = (found & 8) ? 16 : found | 8; break;
6306 default: found = 16;
6307 }
6308 if (found != 15)
6309 goto failure;
6310 inst.operands[i].isvec = 1;
6311 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6312 inst.operands[i].reg = REG_PC;
6313 }
6314 else
6315 goto failure;
6316 break;
6317
6318 case OP_TB:
6319 po_misc_or_fail (parse_tb (&str));
6320 break;
6321
6322 /* Register lists. */
6323 case OP_REGLST:
6324 val = parse_reg_list (&str);
6325 if (*str == '^')
6326 {
6327 inst.operands[1].writeback = 1;
6328 str++;
6329 }
6330 break;
6331
6332 case OP_VRSLST:
6333 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6334 break;
6335
6336 case OP_VRDLST:
6337 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6338 break;
6339
6340 case OP_VRSDLST:
6341 /* Allow Q registers too. */
6342 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6343 REGLIST_NEON_D);
6344 if (val == FAIL)
6345 {
6346 inst.error = NULL;
6347 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6348 REGLIST_VFP_S);
6349 inst.operands[i].issingle = 1;
6350 }
6351 break;
6352
6353 case OP_NRDLST:
6354 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6355 REGLIST_NEON_D);
6356 break;
6357
6358 case OP_NSTRLST:
6359 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6360 &inst.operands[i].vectype);
6361 break;
6362
6363 /* Addressing modes */
6364 case OP_ADDR:
6365 po_misc_or_fail (parse_address (&str, i));
6366 break;
6367
6368 case OP_ADDRGLDR:
6369 po_misc_or_fail_no_backtrack (
6370 parse_address_group_reloc (&str, i, GROUP_LDR));
6371 break;
6372
6373 case OP_ADDRGLDRS:
6374 po_misc_or_fail_no_backtrack (
6375 parse_address_group_reloc (&str, i, GROUP_LDRS));
6376 break;
6377
6378 case OP_ADDRGLDC:
6379 po_misc_or_fail_no_backtrack (
6380 parse_address_group_reloc (&str, i, GROUP_LDC));
6381 break;
6382
6383 case OP_SH:
6384 po_misc_or_fail (parse_shifter_operand (&str, i));
6385 break;
6386
6387 case OP_SHG:
6388 po_misc_or_fail_no_backtrack (
6389 parse_shifter_operand_group_reloc (&str, i));
6390 break;
6391
6392 case OP_oSHll:
6393 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6394 break;
6395
6396 case OP_oSHar:
6397 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6398 break;
6399
6400 case OP_oSHllar:
6401 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6402 break;
6403
6404 default:
6405 as_fatal (_("unhandled operand code %d"), upat[i]);
6406 }
6407
6408 /* Various value-based sanity checks and shared operations. We
6409 do not signal immediate failures for the register constraints;
6410 this allows a syntax error to take precedence. */
6411 switch (upat[i])
6412 {
6413 case OP_oRRnpc:
6414 case OP_RRnpc:
6415 case OP_RRnpcb:
6416 case OP_RRw:
6417 case OP_oRRw:
6418 case OP_RRnpc_I0:
6419 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6420 inst.error = BAD_PC;
6421 break;
6422
6423 case OP_CPSF:
6424 case OP_ENDI:
6425 case OP_oROR:
6426 case OP_PSR:
6427 case OP_RVC_PSR:
6428 case OP_COND:
6429 case OP_oBARRIER:
6430 case OP_REGLST:
6431 case OP_VRSLST:
6432 case OP_VRDLST:
6433 case OP_VRSDLST:
6434 case OP_NRDLST:
6435 case OP_NSTRLST:
6436 if (val == FAIL)
6437 goto failure;
6438 inst.operands[i].imm = val;
6439 break;
6440
6441 default:
6442 break;
6443 }
6444
6445 /* If we get here, this operand was successfully parsed. */
6446 inst.operands[i].present = 1;
6447 continue;
6448
6449 bad_args:
6450 inst.error = BAD_ARGS;
6451
6452 failure:
6453 if (!backtrack_pos)
6454 {
6455 /* The parse routine should already have set inst.error, but set a
6456 default here just in case. */
6457 if (!inst.error)
6458 inst.error = _("syntax error");
6459 return FAIL;
6460 }
6461
6462 /* Do not backtrack over a trailing optional argument that
6463 absorbed some text. We will only fail again, with the
6464 'garbage following instruction' error message, which is
6465 probably less helpful than the current one. */
6466 if (backtrack_index == i && backtrack_pos != str
6467 && upat[i+1] == OP_stop)
6468 {
6469 if (!inst.error)
6470 inst.error = _("syntax error");
6471 return FAIL;
6472 }
6473
6474 /* Try again, skipping the optional argument at backtrack_pos. */
6475 str = backtrack_pos;
6476 inst.error = backtrack_error;
6477 inst.operands[backtrack_index].present = 0;
6478 i = backtrack_index;
6479 backtrack_pos = 0;
6480 }
6481
6482 /* Check that we have parsed all the arguments. */
6483 if (*str != '\0' && !inst.error)
6484 inst.error = _("garbage following instruction");
6485
6486 return inst.error ? FAIL : SUCCESS;
6487 }
6488
6489 #undef po_char_or_fail
6490 #undef po_reg_or_fail
6491 #undef po_reg_or_goto
6492 #undef po_imm_or_fail
6493 #undef po_scalar_or_fail
6494
6495 /* Shorthand macro for instruction encoding functions issuing errors. */
6496 #define constraint(expr, err) \
6497 do \
6498 { \
6499 if (expr) \
6500 { \
6501 inst.error = err; \
6502 return; \
6503 } \
6504 } \
6505 while (0)
6506
6507 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6508 instructions are unpredictable if these registers are used. This
6509 is the BadReg predicate in ARM's Thumb-2 documentation. */
6510 #define reject_bad_reg(reg) \
6511 do \
6512 if (reg == REG_SP || reg == REG_PC) \
6513 { \
6514 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6515 return; \
6516 } \
6517 while (0)
6518
6519 /* If REG is R13 (the stack pointer), warn that its use is
6520 deprecated. */
6521 #define warn_deprecated_sp(reg) \
6522 do \
6523 if (warn_on_deprecated && reg == REG_SP) \
6524 as_warn (_("use of r13 is deprecated")); \
6525 while (0)
6526
6527 /* Functions for operand encoding. ARM, then Thumb. */
6528
6529 #define rotate_left(v, n) (v << n | v >> (32 - n))
6530
6531 /* If VAL can be encoded in the immediate field of an ARM instruction,
6532 return the encoded form. Otherwise, return FAIL. */
6533
6534 static unsigned int
6535 encode_arm_immediate (unsigned int val)
6536 {
6537 unsigned int a, i;
6538
6539 for (i = 0; i < 32; i += 2)
6540 if ((a = rotate_left (val, i)) <= 0xff)
6541 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6542
6543 return FAIL;
6544 }
6545
6546 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6547 return the encoded form. Otherwise, return FAIL. */
6548 static unsigned int
6549 encode_thumb32_immediate (unsigned int val)
6550 {
6551 unsigned int a, i;
6552
6553 if (val <= 0xff)
6554 return val;
6555
6556 for (i = 1; i <= 24; i++)
6557 {
6558 a = val >> i;
6559 if ((val & ~(0xff << i)) == 0)
6560 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6561 }
6562
6563 a = val & 0xff;
6564 if (val == ((a << 16) | a))
6565 return 0x100 | a;
6566 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6567 return 0x300 | a;
6568
6569 a = val & 0xff00;
6570 if (val == ((a << 16) | a))
6571 return 0x200 | (a >> 8);
6572
6573 return FAIL;
6574 }
6575 /* Encode a VFP SP or DP register number into inst.instruction. */
6576
6577 static void
6578 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6579 {
6580 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6581 && reg > 15)
6582 {
6583 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6584 {
6585 if (thumb_mode)
6586 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6587 fpu_vfp_ext_d32);
6588 else
6589 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6590 fpu_vfp_ext_d32);
6591 }
6592 else
6593 {
6594 first_error (_("D register out of range for selected VFP version"));
6595 return;
6596 }
6597 }
6598
6599 switch (pos)
6600 {
6601 case VFP_REG_Sd:
6602 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6603 break;
6604
6605 case VFP_REG_Sn:
6606 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6607 break;
6608
6609 case VFP_REG_Sm:
6610 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6611 break;
6612
6613 case VFP_REG_Dd:
6614 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6615 break;
6616
6617 case VFP_REG_Dn:
6618 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6619 break;
6620
6621 case VFP_REG_Dm:
6622 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6623 break;
6624
6625 default:
6626 abort ();
6627 }
6628 }
6629
6630 /* Encode a <shift> in an ARM-format instruction. The immediate,
6631 if any, is handled by md_apply_fix. */
6632 static void
6633 encode_arm_shift (int i)
6634 {
6635 if (inst.operands[i].shift_kind == SHIFT_RRX)
6636 inst.instruction |= SHIFT_ROR << 5;
6637 else
6638 {
6639 inst.instruction |= inst.operands[i].shift_kind << 5;
6640 if (inst.operands[i].immisreg)
6641 {
6642 inst.instruction |= SHIFT_BY_REG;
6643 inst.instruction |= inst.operands[i].imm << 8;
6644 }
6645 else
6646 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6647 }
6648 }
6649
6650 static void
6651 encode_arm_shifter_operand (int i)
6652 {
6653 if (inst.operands[i].isreg)
6654 {
6655 inst.instruction |= inst.operands[i].reg;
6656 encode_arm_shift (i);
6657 }
6658 else
6659 inst.instruction |= INST_IMMEDIATE;
6660 }
6661
6662 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6663 static void
6664 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6665 {
6666 gas_assert (inst.operands[i].isreg);
6667 inst.instruction |= inst.operands[i].reg << 16;
6668
6669 if (inst.operands[i].preind)
6670 {
6671 if (is_t)
6672 {
6673 inst.error = _("instruction does not accept preindexed addressing");
6674 return;
6675 }
6676 inst.instruction |= PRE_INDEX;
6677 if (inst.operands[i].writeback)
6678 inst.instruction |= WRITE_BACK;
6679
6680 }
6681 else if (inst.operands[i].postind)
6682 {
6683 gas_assert (inst.operands[i].writeback);
6684 if (is_t)
6685 inst.instruction |= WRITE_BACK;
6686 }
6687 else /* unindexed - only for coprocessor */
6688 {
6689 inst.error = _("instruction does not accept unindexed addressing");
6690 return;
6691 }
6692
6693 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6694 && (((inst.instruction & 0x000f0000) >> 16)
6695 == ((inst.instruction & 0x0000f000) >> 12)))
6696 as_warn ((inst.instruction & LOAD_BIT)
6697 ? _("destination register same as write-back base")
6698 : _("source register same as write-back base"));
6699 }
6700
6701 /* inst.operands[i] was set up by parse_address. Encode it into an
6702 ARM-format mode 2 load or store instruction. If is_t is true,
6703 reject forms that cannot be used with a T instruction (i.e. not
6704 post-indexed). */
6705 static void
6706 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6707 {
6708 encode_arm_addr_mode_common (i, is_t);
6709
6710 if (inst.operands[i].immisreg)
6711 {
6712 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6713 inst.instruction |= inst.operands[i].imm;
6714 if (!inst.operands[i].negative)
6715 inst.instruction |= INDEX_UP;
6716 if (inst.operands[i].shifted)
6717 {
6718 if (inst.operands[i].shift_kind == SHIFT_RRX)
6719 inst.instruction |= SHIFT_ROR << 5;
6720 else
6721 {
6722 inst.instruction |= inst.operands[i].shift_kind << 5;
6723 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6724 }
6725 }
6726 }
6727 else /* immediate offset in inst.reloc */
6728 {
6729 if (inst.reloc.type == BFD_RELOC_UNUSED)
6730 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6731 }
6732 }
6733
6734 /* inst.operands[i] was set up by parse_address. Encode it into an
6735 ARM-format mode 3 load or store instruction. Reject forms that
6736 cannot be used with such instructions. If is_t is true, reject
6737 forms that cannot be used with a T instruction (i.e. not
6738 post-indexed). */
6739 static void
6740 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6741 {
6742 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6743 {
6744 inst.error = _("instruction does not accept scaled register index");
6745 return;
6746 }
6747
6748 encode_arm_addr_mode_common (i, is_t);
6749
6750 if (inst.operands[i].immisreg)
6751 {
6752 inst.instruction |= inst.operands[i].imm;
6753 if (!inst.operands[i].negative)
6754 inst.instruction |= INDEX_UP;
6755 }
6756 else /* immediate offset in inst.reloc */
6757 {
6758 inst.instruction |= HWOFFSET_IMM;
6759 if (inst.reloc.type == BFD_RELOC_UNUSED)
6760 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6761 }
6762 }
6763
6764 /* inst.operands[i] was set up by parse_address. Encode it into an
6765 ARM-format instruction. Reject all forms which cannot be encoded
6766 into a coprocessor load/store instruction. If wb_ok is false,
6767 reject use of writeback; if unind_ok is false, reject use of
6768 unindexed addressing. If reloc_override is not 0, use it instead
6769 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6770 (in which case it is preserved). */
6771
6772 static int
6773 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6774 {
6775 inst.instruction |= inst.operands[i].reg << 16;
6776
6777 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
6778
6779 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6780 {
6781 gas_assert (!inst.operands[i].writeback);
6782 if (!unind_ok)
6783 {
6784 inst.error = _("instruction does not support unindexed addressing");
6785 return FAIL;
6786 }
6787 inst.instruction |= inst.operands[i].imm;
6788 inst.instruction |= INDEX_UP;
6789 return SUCCESS;
6790 }
6791
6792 if (inst.operands[i].preind)
6793 inst.instruction |= PRE_INDEX;
6794
6795 if (inst.operands[i].writeback)
6796 {
6797 if (inst.operands[i].reg == REG_PC)
6798 {
6799 inst.error = _("pc may not be used with write-back");
6800 return FAIL;
6801 }
6802 if (!wb_ok)
6803 {
6804 inst.error = _("instruction does not support writeback");
6805 return FAIL;
6806 }
6807 inst.instruction |= WRITE_BACK;
6808 }
6809
6810 if (reloc_override)
6811 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
6812 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6813 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6814 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6815 {
6816 if (thumb_mode)
6817 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6818 else
6819 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6820 }
6821
6822 return SUCCESS;
6823 }
6824
6825 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6826 Determine whether it can be performed with a move instruction; if
6827 it can, convert inst.instruction to that move instruction and
6828 return TRUE; if it can't, convert inst.instruction to a literal-pool
6829 load and return FALSE. If this is not a valid thing to do in the
6830 current context, set inst.error and return TRUE.
6831
6832 inst.operands[i] describes the destination register. */
6833
6834 static bfd_boolean
6835 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6836 {
6837 unsigned long tbit;
6838
6839 if (thumb_p)
6840 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6841 else
6842 tbit = LOAD_BIT;
6843
6844 if ((inst.instruction & tbit) == 0)
6845 {
6846 inst.error = _("invalid pseudo operation");
6847 return TRUE;
6848 }
6849 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6850 {
6851 inst.error = _("constant expression expected");
6852 return TRUE;
6853 }
6854 if (inst.reloc.exp.X_op == O_constant)
6855 {
6856 if (thumb_p)
6857 {
6858 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6859 {
6860 /* This can be done with a mov(1) instruction. */
6861 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6862 inst.instruction |= inst.reloc.exp.X_add_number;
6863 return TRUE;
6864 }
6865 }
6866 else
6867 {
6868 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6869 if (value != FAIL)
6870 {
6871 /* This can be done with a mov instruction. */
6872 inst.instruction &= LITERAL_MASK;
6873 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6874 inst.instruction |= value & 0xfff;
6875 return TRUE;
6876 }
6877
6878 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6879 if (value != FAIL)
6880 {
6881 /* This can be done with a mvn instruction. */
6882 inst.instruction &= LITERAL_MASK;
6883 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6884 inst.instruction |= value & 0xfff;
6885 return TRUE;
6886 }
6887 }
6888 }
6889
6890 if (add_to_lit_pool () == FAIL)
6891 {
6892 inst.error = _("literal pool insertion failed");
6893 return TRUE;
6894 }
6895 inst.operands[1].reg = REG_PC;
6896 inst.operands[1].isreg = 1;
6897 inst.operands[1].preind = 1;
6898 inst.reloc.pc_rel = 1;
6899 inst.reloc.type = (thumb_p
6900 ? BFD_RELOC_ARM_THUMB_OFFSET
6901 : (mode_3
6902 ? BFD_RELOC_ARM_HWLITERAL
6903 : BFD_RELOC_ARM_LITERAL));
6904 return FALSE;
6905 }
6906
6907 /* Functions for instruction encoding, sorted by sub-architecture.
6908 First some generics; their names are taken from the conventional
6909 bit positions for register arguments in ARM format instructions. */
6910
6911 static void
6912 do_noargs (void)
6913 {
6914 }
6915
6916 static void
6917 do_rd (void)
6918 {
6919 inst.instruction |= inst.operands[0].reg << 12;
6920 }
6921
6922 static void
6923 do_rd_rm (void)
6924 {
6925 inst.instruction |= inst.operands[0].reg << 12;
6926 inst.instruction |= inst.operands[1].reg;
6927 }
6928
6929 static void
6930 do_rd_rn (void)
6931 {
6932 inst.instruction |= inst.operands[0].reg << 12;
6933 inst.instruction |= inst.operands[1].reg << 16;
6934 }
6935
6936 static void
6937 do_rn_rd (void)
6938 {
6939 inst.instruction |= inst.operands[0].reg << 16;
6940 inst.instruction |= inst.operands[1].reg << 12;
6941 }
6942
6943 static void
6944 do_rd_rm_rn (void)
6945 {
6946 unsigned Rn = inst.operands[2].reg;
6947 /* Enforce restrictions on SWP instruction. */
6948 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6949 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6950 _("Rn must not overlap other operands"));
6951 inst.instruction |= inst.operands[0].reg << 12;
6952 inst.instruction |= inst.operands[1].reg;
6953 inst.instruction |= Rn << 16;
6954 }
6955
6956 static void
6957 do_rd_rn_rm (void)
6958 {
6959 inst.instruction |= inst.operands[0].reg << 12;
6960 inst.instruction |= inst.operands[1].reg << 16;
6961 inst.instruction |= inst.operands[2].reg;
6962 }
6963
6964 static void
6965 do_rm_rd_rn (void)
6966 {
6967 inst.instruction |= inst.operands[0].reg;
6968 inst.instruction |= inst.operands[1].reg << 12;
6969 inst.instruction |= inst.operands[2].reg << 16;
6970 }
6971
6972 static void
6973 do_imm0 (void)
6974 {
6975 inst.instruction |= inst.operands[0].imm;
6976 }
6977
6978 static void
6979 do_rd_cpaddr (void)
6980 {
6981 inst.instruction |= inst.operands[0].reg << 12;
6982 encode_arm_cp_address (1, TRUE, TRUE, 0);
6983 }
6984
6985 /* ARM instructions, in alphabetical order by function name (except
6986 that wrapper functions appear immediately after the function they
6987 wrap). */
6988
6989 /* This is a pseudo-op of the form "adr rd, label" to be converted
6990 into a relative address of the form "add rd, pc, #label-.-8". */
6991
6992 static void
6993 do_adr (void)
6994 {
6995 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6996
6997 /* Frag hacking will turn this into a sub instruction if the offset turns
6998 out to be negative. */
6999 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7000 inst.reloc.pc_rel = 1;
7001 inst.reloc.exp.X_add_number -= 8;
7002 }
7003
7004 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7005 into a relative address of the form:
7006 add rd, pc, #low(label-.-8)"
7007 add rd, rd, #high(label-.-8)" */
7008
7009 static void
7010 do_adrl (void)
7011 {
7012 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7013
7014 /* Frag hacking will turn this into a sub instruction if the offset turns
7015 out to be negative. */
7016 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7017 inst.reloc.pc_rel = 1;
7018 inst.size = INSN_SIZE * 2;
7019 inst.reloc.exp.X_add_number -= 8;
7020 }
7021
7022 static void
7023 do_arit (void)
7024 {
7025 if (!inst.operands[1].present)
7026 inst.operands[1].reg = inst.operands[0].reg;
7027 inst.instruction |= inst.operands[0].reg << 12;
7028 inst.instruction |= inst.operands[1].reg << 16;
7029 encode_arm_shifter_operand (2);
7030 }
7031
7032 static void
7033 do_barrier (void)
7034 {
7035 if (inst.operands[0].present)
7036 {
7037 constraint ((inst.instruction & 0xf0) != 0x40
7038 && inst.operands[0].imm != 0xf,
7039 _("bad barrier type"));
7040 inst.instruction |= inst.operands[0].imm;
7041 }
7042 else
7043 inst.instruction |= 0xf;
7044 }
7045
7046 static void
7047 do_bfc (void)
7048 {
7049 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7050 constraint (msb > 32, _("bit-field extends past end of register"));
7051 /* The instruction encoding stores the LSB and MSB,
7052 not the LSB and width. */
7053 inst.instruction |= inst.operands[0].reg << 12;
7054 inst.instruction |= inst.operands[1].imm << 7;
7055 inst.instruction |= (msb - 1) << 16;
7056 }
7057
7058 static void
7059 do_bfi (void)
7060 {
7061 unsigned int msb;
7062
7063 /* #0 in second position is alternative syntax for bfc, which is
7064 the same instruction but with REG_PC in the Rm field. */
7065 if (!inst.operands[1].isreg)
7066 inst.operands[1].reg = REG_PC;
7067
7068 msb = inst.operands[2].imm + inst.operands[3].imm;
7069 constraint (msb > 32, _("bit-field extends past end of register"));
7070 /* The instruction encoding stores the LSB and MSB,
7071 not the LSB and width. */
7072 inst.instruction |= inst.operands[0].reg << 12;
7073 inst.instruction |= inst.operands[1].reg;
7074 inst.instruction |= inst.operands[2].imm << 7;
7075 inst.instruction |= (msb - 1) << 16;
7076 }
7077
7078 static void
7079 do_bfx (void)
7080 {
7081 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7082 _("bit-field extends past end of register"));
7083 inst.instruction |= inst.operands[0].reg << 12;
7084 inst.instruction |= inst.operands[1].reg;
7085 inst.instruction |= inst.operands[2].imm << 7;
7086 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7087 }
7088
7089 /* ARM V5 breakpoint instruction (argument parse)
7090 BKPT <16 bit unsigned immediate>
7091 Instruction is not conditional.
7092 The bit pattern given in insns[] has the COND_ALWAYS condition,
7093 and it is an error if the caller tried to override that. */
7094
7095 static void
7096 do_bkpt (void)
7097 {
7098 /* Top 12 of 16 bits to bits 19:8. */
7099 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7100
7101 /* Bottom 4 of 16 bits to bits 3:0. */
7102 inst.instruction |= inst.operands[0].imm & 0xf;
7103 }
7104
7105 static void
7106 encode_branch (int default_reloc)
7107 {
7108 if (inst.operands[0].hasreloc)
7109 {
7110 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
7111 _("the only suffix valid here is '(plt)'"));
7112 inst.reloc.type = BFD_RELOC_ARM_PLT32;
7113 }
7114 else
7115 {
7116 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7117 }
7118 inst.reloc.pc_rel = 1;
7119 }
7120
7121 static void
7122 do_branch (void)
7123 {
7124 #ifdef OBJ_ELF
7125 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7126 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7127 else
7128 #endif
7129 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7130 }
7131
7132 static void
7133 do_bl (void)
7134 {
7135 #ifdef OBJ_ELF
7136 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7137 {
7138 if (inst.cond == COND_ALWAYS)
7139 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7140 else
7141 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7142 }
7143 else
7144 #endif
7145 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7146 }
7147
7148 /* ARM V5 branch-link-exchange instruction (argument parse)
7149 BLX <target_addr> ie BLX(1)
7150 BLX{<condition>} <Rm> ie BLX(2)
7151 Unfortunately, there are two different opcodes for this mnemonic.
7152 So, the insns[].value is not used, and the code here zaps values
7153 into inst.instruction.
7154 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7155
7156 static void
7157 do_blx (void)
7158 {
7159 if (inst.operands[0].isreg)
7160 {
7161 /* Arg is a register; the opcode provided by insns[] is correct.
7162 It is not illegal to do "blx pc", just useless. */
7163 if (inst.operands[0].reg == REG_PC)
7164 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7165
7166 inst.instruction |= inst.operands[0].reg;
7167 }
7168 else
7169 {
7170 /* Arg is an address; this instruction cannot be executed
7171 conditionally, and the opcode must be adjusted.
7172 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7173 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7174 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7175 inst.instruction = 0xfa000000;
7176 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7177 }
7178 }
7179
7180 static void
7181 do_bx (void)
7182 {
7183 bfd_boolean want_reloc;
7184
7185 if (inst.operands[0].reg == REG_PC)
7186 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7187
7188 inst.instruction |= inst.operands[0].reg;
7189 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7190 it is for ARMv4t or earlier. */
7191 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7192 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7193 want_reloc = TRUE;
7194
7195 #ifdef OBJ_ELF
7196 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7197 #endif
7198 want_reloc = FALSE;
7199
7200 if (want_reloc)
7201 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7202 }
7203
7204
7205 /* ARM v5TEJ. Jump to Jazelle code. */
7206
7207 static void
7208 do_bxj (void)
7209 {
7210 if (inst.operands[0].reg == REG_PC)
7211 as_tsktsk (_("use of r15 in bxj is not really useful"));
7212
7213 inst.instruction |= inst.operands[0].reg;
7214 }
7215
7216 /* Co-processor data operation:
7217 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7218 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7219 static void
7220 do_cdp (void)
7221 {
7222 inst.instruction |= inst.operands[0].reg << 8;
7223 inst.instruction |= inst.operands[1].imm << 20;
7224 inst.instruction |= inst.operands[2].reg << 12;
7225 inst.instruction |= inst.operands[3].reg << 16;
7226 inst.instruction |= inst.operands[4].reg;
7227 inst.instruction |= inst.operands[5].imm << 5;
7228 }
7229
7230 static void
7231 do_cmp (void)
7232 {
7233 inst.instruction |= inst.operands[0].reg << 16;
7234 encode_arm_shifter_operand (1);
7235 }
7236
7237 /* Transfer between coprocessor and ARM registers.
7238 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7239 MRC2
7240 MCR{cond}
7241 MCR2
7242
7243 No special properties. */
7244
7245 static void
7246 do_co_reg (void)
7247 {
7248 unsigned Rd;
7249
7250 Rd = inst.operands[2].reg;
7251 if (thumb_mode)
7252 {
7253 if (inst.instruction == 0xee000010
7254 || inst.instruction == 0xfe000010)
7255 /* MCR, MCR2 */
7256 reject_bad_reg (Rd);
7257 else
7258 /* MRC, MRC2 */
7259 constraint (Rd == REG_SP, BAD_SP);
7260 }
7261 else
7262 {
7263 /* MCR */
7264 if (inst.instruction == 0xe000010)
7265 constraint (Rd == REG_PC, BAD_PC);
7266 }
7267
7268
7269 inst.instruction |= inst.operands[0].reg << 8;
7270 inst.instruction |= inst.operands[1].imm << 21;
7271 inst.instruction |= Rd << 12;
7272 inst.instruction |= inst.operands[3].reg << 16;
7273 inst.instruction |= inst.operands[4].reg;
7274 inst.instruction |= inst.operands[5].imm << 5;
7275 }
7276
7277 /* Transfer between coprocessor register and pair of ARM registers.
7278 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7279 MCRR2
7280 MRRC{cond}
7281 MRRC2
7282
7283 Two XScale instructions are special cases of these:
7284
7285 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7286 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7287
7288 Result unpredictable if Rd or Rn is R15. */
7289
7290 static void
7291 do_co_reg2c (void)
7292 {
7293 unsigned Rd, Rn;
7294
7295 Rd = inst.operands[2].reg;
7296 Rn = inst.operands[3].reg;
7297
7298 if (thumb_mode)
7299 {
7300 reject_bad_reg (Rd);
7301 reject_bad_reg (Rn);
7302 }
7303 else
7304 {
7305 constraint (Rd == REG_PC, BAD_PC);
7306 constraint (Rn == REG_PC, BAD_PC);
7307 }
7308
7309 inst.instruction |= inst.operands[0].reg << 8;
7310 inst.instruction |= inst.operands[1].imm << 4;
7311 inst.instruction |= Rd << 12;
7312 inst.instruction |= Rn << 16;
7313 inst.instruction |= inst.operands[4].reg;
7314 }
7315
7316 static void
7317 do_cpsi (void)
7318 {
7319 inst.instruction |= inst.operands[0].imm << 6;
7320 if (inst.operands[1].present)
7321 {
7322 inst.instruction |= CPSI_MMOD;
7323 inst.instruction |= inst.operands[1].imm;
7324 }
7325 }
7326
7327 static void
7328 do_dbg (void)
7329 {
7330 inst.instruction |= inst.operands[0].imm;
7331 }
7332
7333 static void
7334 do_it (void)
7335 {
7336 /* There is no IT instruction in ARM mode. We
7337 process it to do the validation as if in
7338 thumb mode, just in case the code gets
7339 assembled for thumb using the unified syntax. */
7340
7341 inst.size = 0;
7342 if (unified_syntax)
7343 {
7344 set_it_insn_type (IT_INSN);
7345 now_it.mask = (inst.instruction & 0xf) | 0x10;
7346 now_it.cc = inst.operands[0].imm;
7347 }
7348 }
7349
7350 static void
7351 do_ldmstm (void)
7352 {
7353 int base_reg = inst.operands[0].reg;
7354 int range = inst.operands[1].imm;
7355
7356 inst.instruction |= base_reg << 16;
7357 inst.instruction |= range;
7358
7359 if (inst.operands[1].writeback)
7360 inst.instruction |= LDM_TYPE_2_OR_3;
7361
7362 if (inst.operands[0].writeback)
7363 {
7364 inst.instruction |= WRITE_BACK;
7365 /* Check for unpredictable uses of writeback. */
7366 if (inst.instruction & LOAD_BIT)
7367 {
7368 /* Not allowed in LDM type 2. */
7369 if ((inst.instruction & LDM_TYPE_2_OR_3)
7370 && ((range & (1 << REG_PC)) == 0))
7371 as_warn (_("writeback of base register is UNPREDICTABLE"));
7372 /* Only allowed if base reg not in list for other types. */
7373 else if (range & (1 << base_reg))
7374 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7375 }
7376 else /* STM. */
7377 {
7378 /* Not allowed for type 2. */
7379 if (inst.instruction & LDM_TYPE_2_OR_3)
7380 as_warn (_("writeback of base register is UNPREDICTABLE"));
7381 /* Only allowed if base reg not in list, or first in list. */
7382 else if ((range & (1 << base_reg))
7383 && (range & ((1 << base_reg) - 1)))
7384 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7385 }
7386 }
7387 }
7388
7389 /* ARMv5TE load-consecutive (argument parse)
7390 Mode is like LDRH.
7391
7392 LDRccD R, mode
7393 STRccD R, mode. */
7394
7395 static void
7396 do_ldrd (void)
7397 {
7398 constraint (inst.operands[0].reg % 2 != 0,
7399 _("first destination register must be even"));
7400 constraint (inst.operands[1].present
7401 && inst.operands[1].reg != inst.operands[0].reg + 1,
7402 _("can only load two consecutive registers"));
7403 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7404 constraint (!inst.operands[2].isreg, _("'[' expected"));
7405
7406 if (!inst.operands[1].present)
7407 inst.operands[1].reg = inst.operands[0].reg + 1;
7408
7409 if (inst.instruction & LOAD_BIT)
7410 {
7411 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7412 register and the first register written; we have to diagnose
7413 overlap between the base and the second register written here. */
7414
7415 if (inst.operands[2].reg == inst.operands[1].reg
7416 && (inst.operands[2].writeback || inst.operands[2].postind))
7417 as_warn (_("base register written back, and overlaps "
7418 "second destination register"));
7419
7420 /* For an index-register load, the index register must not overlap the
7421 destination (even if not write-back). */
7422 else if (inst.operands[2].immisreg
7423 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7424 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7425 as_warn (_("index register overlaps destination register"));
7426 }
7427
7428 inst.instruction |= inst.operands[0].reg << 12;
7429 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7430 }
7431
7432 static void
7433 do_ldrex (void)
7434 {
7435 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7436 || inst.operands[1].postind || inst.operands[1].writeback
7437 || inst.operands[1].immisreg || inst.operands[1].shifted
7438 || inst.operands[1].negative
7439 /* This can arise if the programmer has written
7440 strex rN, rM, foo
7441 or if they have mistakenly used a register name as the last
7442 operand, eg:
7443 strex rN, rM, rX
7444 It is very difficult to distinguish between these two cases
7445 because "rX" might actually be a label. ie the register
7446 name has been occluded by a symbol of the same name. So we
7447 just generate a general 'bad addressing mode' type error
7448 message and leave it up to the programmer to discover the
7449 true cause and fix their mistake. */
7450 || (inst.operands[1].reg == REG_PC),
7451 BAD_ADDR_MODE);
7452
7453 constraint (inst.reloc.exp.X_op != O_constant
7454 || inst.reloc.exp.X_add_number != 0,
7455 _("offset must be zero in ARM encoding"));
7456
7457 inst.instruction |= inst.operands[0].reg << 12;
7458 inst.instruction |= inst.operands[1].reg << 16;
7459 inst.reloc.type = BFD_RELOC_UNUSED;
7460 }
7461
7462 static void
7463 do_ldrexd (void)
7464 {
7465 constraint (inst.operands[0].reg % 2 != 0,
7466 _("even register required"));
7467 constraint (inst.operands[1].present
7468 && inst.operands[1].reg != inst.operands[0].reg + 1,
7469 _("can only load two consecutive registers"));
7470 /* If op 1 were present and equal to PC, this function wouldn't
7471 have been called in the first place. */
7472 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7473
7474 inst.instruction |= inst.operands[0].reg << 12;
7475 inst.instruction |= inst.operands[2].reg << 16;
7476 }
7477
7478 static void
7479 do_ldst (void)
7480 {
7481 inst.instruction |= inst.operands[0].reg << 12;
7482 if (!inst.operands[1].isreg)
7483 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7484 return;
7485 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7486 }
7487
7488 static void
7489 do_ldstt (void)
7490 {
7491 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7492 reject [Rn,...]. */
7493 if (inst.operands[1].preind)
7494 {
7495 constraint (inst.reloc.exp.X_op != O_constant
7496 || inst.reloc.exp.X_add_number != 0,
7497 _("this instruction requires a post-indexed address"));
7498
7499 inst.operands[1].preind = 0;
7500 inst.operands[1].postind = 1;
7501 inst.operands[1].writeback = 1;
7502 }
7503 inst.instruction |= inst.operands[0].reg << 12;
7504 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
7505 }
7506
7507 /* Halfword and signed-byte load/store operations. */
7508
7509 static void
7510 do_ldstv4 (void)
7511 {
7512 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7513 inst.instruction |= inst.operands[0].reg << 12;
7514 if (!inst.operands[1].isreg)
7515 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7516 return;
7517 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7518 }
7519
7520 static void
7521 do_ldsttv4 (void)
7522 {
7523 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7524 reject [Rn,...]. */
7525 if (inst.operands[1].preind)
7526 {
7527 constraint (inst.reloc.exp.X_op != O_constant
7528 || inst.reloc.exp.X_add_number != 0,
7529 _("this instruction requires a post-indexed address"));
7530
7531 inst.operands[1].preind = 0;
7532 inst.operands[1].postind = 1;
7533 inst.operands[1].writeback = 1;
7534 }
7535 inst.instruction |= inst.operands[0].reg << 12;
7536 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7537 }
7538
7539 /* Co-processor register load/store.
7540 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7541 static void
7542 do_lstc (void)
7543 {
7544 inst.instruction |= inst.operands[0].reg << 8;
7545 inst.instruction |= inst.operands[1].reg << 12;
7546 encode_arm_cp_address (2, TRUE, TRUE, 0);
7547 }
7548
7549 static void
7550 do_mlas (void)
7551 {
7552 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7553 if (inst.operands[0].reg == inst.operands[1].reg
7554 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
7555 && !(inst.instruction & 0x00400000))
7556 as_tsktsk (_("Rd and Rm should be different in mla"));
7557
7558 inst.instruction |= inst.operands[0].reg << 16;
7559 inst.instruction |= inst.operands[1].reg;
7560 inst.instruction |= inst.operands[2].reg << 8;
7561 inst.instruction |= inst.operands[3].reg << 12;
7562 }
7563
7564 static void
7565 do_mov (void)
7566 {
7567 inst.instruction |= inst.operands[0].reg << 12;
7568 encode_arm_shifter_operand (1);
7569 }
7570
7571 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7572 static void
7573 do_mov16 (void)
7574 {
7575 bfd_vma imm;
7576 bfd_boolean top;
7577
7578 top = (inst.instruction & 0x00400000) != 0;
7579 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7580 _(":lower16: not allowed this instruction"));
7581 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7582 _(":upper16: not allowed instruction"));
7583 inst.instruction |= inst.operands[0].reg << 12;
7584 if (inst.reloc.type == BFD_RELOC_UNUSED)
7585 {
7586 imm = inst.reloc.exp.X_add_number;
7587 /* The value is in two pieces: 0:11, 16:19. */
7588 inst.instruction |= (imm & 0x00000fff);
7589 inst.instruction |= (imm & 0x0000f000) << 4;
7590 }
7591 }
7592
7593 static void do_vfp_nsyn_opcode (const char *);
7594
7595 static int
7596 do_vfp_nsyn_mrs (void)
7597 {
7598 if (inst.operands[0].isvec)
7599 {
7600 if (inst.operands[1].reg != 1)
7601 first_error (_("operand 1 must be FPSCR"));
7602 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7603 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7604 do_vfp_nsyn_opcode ("fmstat");
7605 }
7606 else if (inst.operands[1].isvec)
7607 do_vfp_nsyn_opcode ("fmrx");
7608 else
7609 return FAIL;
7610
7611 return SUCCESS;
7612 }
7613
7614 static int
7615 do_vfp_nsyn_msr (void)
7616 {
7617 if (inst.operands[0].isvec)
7618 do_vfp_nsyn_opcode ("fmxr");
7619 else
7620 return FAIL;
7621
7622 return SUCCESS;
7623 }
7624
7625 static void
7626 do_vmrs (void)
7627 {
7628 unsigned Rt = inst.operands[0].reg;
7629
7630 if (thumb_mode && inst.operands[0].reg == REG_SP)
7631 {
7632 inst.error = BAD_SP;
7633 return;
7634 }
7635
7636 /* APSR_ sets isvec. All other refs to PC are illegal. */
7637 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
7638 {
7639 inst.error = BAD_PC;
7640 return;
7641 }
7642
7643 if (inst.operands[1].reg != 1)
7644 first_error (_("operand 1 must be FPSCR"));
7645
7646 inst.instruction |= (Rt << 12);
7647 }
7648
7649 static void
7650 do_vmsr (void)
7651 {
7652 unsigned Rt = inst.operands[1].reg;
7653
7654 if (thumb_mode)
7655 reject_bad_reg (Rt);
7656 else if (Rt == REG_PC)
7657 {
7658 inst.error = BAD_PC;
7659 return;
7660 }
7661
7662 if (inst.operands[0].reg != 1)
7663 first_error (_("operand 0 must be FPSCR"));
7664
7665 inst.instruction |= (Rt << 12);
7666 }
7667
7668 static void
7669 do_mrs (void)
7670 {
7671 if (do_vfp_nsyn_mrs () == SUCCESS)
7672 return;
7673
7674 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7675 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7676 != (PSR_c|PSR_f),
7677 _("'CPSR' or 'SPSR' expected"));
7678 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
7679 inst.instruction |= inst.operands[0].reg << 12;
7680 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7681 }
7682
7683 /* Two possible forms:
7684 "{C|S}PSR_<field>, Rm",
7685 "{C|S}PSR_f, #expression". */
7686
7687 static void
7688 do_msr (void)
7689 {
7690 if (do_vfp_nsyn_msr () == SUCCESS)
7691 return;
7692
7693 inst.instruction |= inst.operands[0].imm;
7694 if (inst.operands[1].isreg)
7695 inst.instruction |= inst.operands[1].reg;
7696 else
7697 {
7698 inst.instruction |= INST_IMMEDIATE;
7699 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7700 inst.reloc.pc_rel = 0;
7701 }
7702 }
7703
7704 static void
7705 do_mul (void)
7706 {
7707 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
7708
7709 if (!inst.operands[2].present)
7710 inst.operands[2].reg = inst.operands[0].reg;
7711 inst.instruction |= inst.operands[0].reg << 16;
7712 inst.instruction |= inst.operands[1].reg;
7713 inst.instruction |= inst.operands[2].reg << 8;
7714
7715 if (inst.operands[0].reg == inst.operands[1].reg
7716 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7717 as_tsktsk (_("Rd and Rm should be different in mul"));
7718 }
7719
7720 /* Long Multiply Parser
7721 UMULL RdLo, RdHi, Rm, Rs
7722 SMULL RdLo, RdHi, Rm, Rs
7723 UMLAL RdLo, RdHi, Rm, Rs
7724 SMLAL RdLo, RdHi, Rm, Rs. */
7725
7726 static void
7727 do_mull (void)
7728 {
7729 inst.instruction |= inst.operands[0].reg << 12;
7730 inst.instruction |= inst.operands[1].reg << 16;
7731 inst.instruction |= inst.operands[2].reg;
7732 inst.instruction |= inst.operands[3].reg << 8;
7733
7734 /* rdhi and rdlo must be different. */
7735 if (inst.operands[0].reg == inst.operands[1].reg)
7736 as_tsktsk (_("rdhi and rdlo must be different"));
7737
7738 /* rdhi, rdlo and rm must all be different before armv6. */
7739 if ((inst.operands[0].reg == inst.operands[2].reg
7740 || inst.operands[1].reg == inst.operands[2].reg)
7741 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7742 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7743 }
7744
7745 static void
7746 do_nop (void)
7747 {
7748 if (inst.operands[0].present
7749 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
7750 {
7751 /* Architectural NOP hints are CPSR sets with no bits selected. */
7752 inst.instruction &= 0xf0000000;
7753 inst.instruction |= 0x0320f000;
7754 if (inst.operands[0].present)
7755 inst.instruction |= inst.operands[0].imm;
7756 }
7757 }
7758
7759 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7760 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7761 Condition defaults to COND_ALWAYS.
7762 Error if Rd, Rn or Rm are R15. */
7763
7764 static void
7765 do_pkhbt (void)
7766 {
7767 inst.instruction |= inst.operands[0].reg << 12;
7768 inst.instruction |= inst.operands[1].reg << 16;
7769 inst.instruction |= inst.operands[2].reg;
7770 if (inst.operands[3].present)
7771 encode_arm_shift (3);
7772 }
7773
7774 /* ARM V6 PKHTB (Argument Parse). */
7775
7776 static void
7777 do_pkhtb (void)
7778 {
7779 if (!inst.operands[3].present)
7780 {
7781 /* If the shift specifier is omitted, turn the instruction
7782 into pkhbt rd, rm, rn. */
7783 inst.instruction &= 0xfff00010;
7784 inst.instruction |= inst.operands[0].reg << 12;
7785 inst.instruction |= inst.operands[1].reg;
7786 inst.instruction |= inst.operands[2].reg << 16;
7787 }
7788 else
7789 {
7790 inst.instruction |= inst.operands[0].reg << 12;
7791 inst.instruction |= inst.operands[1].reg << 16;
7792 inst.instruction |= inst.operands[2].reg;
7793 encode_arm_shift (3);
7794 }
7795 }
7796
7797 /* ARMv5TE: Preload-Cache
7798
7799 PLD <addr_mode>
7800
7801 Syntactically, like LDR with B=1, W=0, L=1. */
7802
7803 static void
7804 do_pld (void)
7805 {
7806 constraint (!inst.operands[0].isreg,
7807 _("'[' expected after PLD mnemonic"));
7808 constraint (inst.operands[0].postind,
7809 _("post-indexed expression used in preload instruction"));
7810 constraint (inst.operands[0].writeback,
7811 _("writeback used in preload instruction"));
7812 constraint (!inst.operands[0].preind,
7813 _("unindexed addressing used in preload instruction"));
7814 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7815 }
7816
7817 /* ARMv7: PLI <addr_mode> */
7818 static void
7819 do_pli (void)
7820 {
7821 constraint (!inst.operands[0].isreg,
7822 _("'[' expected after PLI mnemonic"));
7823 constraint (inst.operands[0].postind,
7824 _("post-indexed expression used in preload instruction"));
7825 constraint (inst.operands[0].writeback,
7826 _("writeback used in preload instruction"));
7827 constraint (!inst.operands[0].preind,
7828 _("unindexed addressing used in preload instruction"));
7829 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7830 inst.instruction &= ~PRE_INDEX;
7831 }
7832
7833 static void
7834 do_push_pop (void)
7835 {
7836 inst.operands[1] = inst.operands[0];
7837 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7838 inst.operands[0].isreg = 1;
7839 inst.operands[0].writeback = 1;
7840 inst.operands[0].reg = REG_SP;
7841 do_ldmstm ();
7842 }
7843
7844 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7845 word at the specified address and the following word
7846 respectively.
7847 Unconditionally executed.
7848 Error if Rn is R15. */
7849
7850 static void
7851 do_rfe (void)
7852 {
7853 inst.instruction |= inst.operands[0].reg << 16;
7854 if (inst.operands[0].writeback)
7855 inst.instruction |= WRITE_BACK;
7856 }
7857
7858 /* ARM V6 ssat (argument parse). */
7859
7860 static void
7861 do_ssat (void)
7862 {
7863 inst.instruction |= inst.operands[0].reg << 12;
7864 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7865 inst.instruction |= inst.operands[2].reg;
7866
7867 if (inst.operands[3].present)
7868 encode_arm_shift (3);
7869 }
7870
7871 /* ARM V6 usat (argument parse). */
7872
7873 static void
7874 do_usat (void)
7875 {
7876 inst.instruction |= inst.operands[0].reg << 12;
7877 inst.instruction |= inst.operands[1].imm << 16;
7878 inst.instruction |= inst.operands[2].reg;
7879
7880 if (inst.operands[3].present)
7881 encode_arm_shift (3);
7882 }
7883
7884 /* ARM V6 ssat16 (argument parse). */
7885
7886 static void
7887 do_ssat16 (void)
7888 {
7889 inst.instruction |= inst.operands[0].reg << 12;
7890 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7891 inst.instruction |= inst.operands[2].reg;
7892 }
7893
7894 static void
7895 do_usat16 (void)
7896 {
7897 inst.instruction |= inst.operands[0].reg << 12;
7898 inst.instruction |= inst.operands[1].imm << 16;
7899 inst.instruction |= inst.operands[2].reg;
7900 }
7901
7902 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7903 preserving the other bits.
7904
7905 setend <endian_specifier>, where <endian_specifier> is either
7906 BE or LE. */
7907
7908 static void
7909 do_setend (void)
7910 {
7911 if (inst.operands[0].imm)
7912 inst.instruction |= 0x200;
7913 }
7914
7915 static void
7916 do_shift (void)
7917 {
7918 unsigned int Rm = (inst.operands[1].present
7919 ? inst.operands[1].reg
7920 : inst.operands[0].reg);
7921
7922 inst.instruction |= inst.operands[0].reg << 12;
7923 inst.instruction |= Rm;
7924 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7925 {
7926 inst.instruction |= inst.operands[2].reg << 8;
7927 inst.instruction |= SHIFT_BY_REG;
7928 }
7929 else
7930 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7931 }
7932
7933 static void
7934 do_smc (void)
7935 {
7936 inst.reloc.type = BFD_RELOC_ARM_SMC;
7937 inst.reloc.pc_rel = 0;
7938 }
7939
7940 static void
7941 do_swi (void)
7942 {
7943 inst.reloc.type = BFD_RELOC_ARM_SWI;
7944 inst.reloc.pc_rel = 0;
7945 }
7946
7947 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7948 SMLAxy{cond} Rd,Rm,Rs,Rn
7949 SMLAWy{cond} Rd,Rm,Rs,Rn
7950 Error if any register is R15. */
7951
7952 static void
7953 do_smla (void)
7954 {
7955 inst.instruction |= inst.operands[0].reg << 16;
7956 inst.instruction |= inst.operands[1].reg;
7957 inst.instruction |= inst.operands[2].reg << 8;
7958 inst.instruction |= inst.operands[3].reg << 12;
7959 }
7960
7961 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7962 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7963 Error if any register is R15.
7964 Warning if Rdlo == Rdhi. */
7965
7966 static void
7967 do_smlal (void)
7968 {
7969 inst.instruction |= inst.operands[0].reg << 12;
7970 inst.instruction |= inst.operands[1].reg << 16;
7971 inst.instruction |= inst.operands[2].reg;
7972 inst.instruction |= inst.operands[3].reg << 8;
7973
7974 if (inst.operands[0].reg == inst.operands[1].reg)
7975 as_tsktsk (_("rdhi and rdlo must be different"));
7976 }
7977
7978 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7979 SMULxy{cond} Rd,Rm,Rs
7980 Error if any register is R15. */
7981
7982 static void
7983 do_smul (void)
7984 {
7985 inst.instruction |= inst.operands[0].reg << 16;
7986 inst.instruction |= inst.operands[1].reg;
7987 inst.instruction |= inst.operands[2].reg << 8;
7988 }
7989
7990 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7991 the same for both ARM and Thumb-2. */
7992
7993 static void
7994 do_srs (void)
7995 {
7996 int reg;
7997
7998 if (inst.operands[0].present)
7999 {
8000 reg = inst.operands[0].reg;
8001 constraint (reg != REG_SP, _("SRS base register must be r13"));
8002 }
8003 else
8004 reg = REG_SP;
8005
8006 inst.instruction |= reg << 16;
8007 inst.instruction |= inst.operands[1].imm;
8008 if (inst.operands[0].writeback || inst.operands[1].writeback)
8009 inst.instruction |= WRITE_BACK;
8010 }
8011
8012 /* ARM V6 strex (argument parse). */
8013
8014 static void
8015 do_strex (void)
8016 {
8017 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8018 || inst.operands[2].postind || inst.operands[2].writeback
8019 || inst.operands[2].immisreg || inst.operands[2].shifted
8020 || inst.operands[2].negative
8021 /* See comment in do_ldrex(). */
8022 || (inst.operands[2].reg == REG_PC),
8023 BAD_ADDR_MODE);
8024
8025 constraint (inst.operands[0].reg == inst.operands[1].reg
8026 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8027
8028 constraint (inst.reloc.exp.X_op != O_constant
8029 || inst.reloc.exp.X_add_number != 0,
8030 _("offset must be zero in ARM encoding"));
8031
8032 inst.instruction |= inst.operands[0].reg << 12;
8033 inst.instruction |= inst.operands[1].reg;
8034 inst.instruction |= inst.operands[2].reg << 16;
8035 inst.reloc.type = BFD_RELOC_UNUSED;
8036 }
8037
8038 static void
8039 do_strexd (void)
8040 {
8041 constraint (inst.operands[1].reg % 2 != 0,
8042 _("even register required"));
8043 constraint (inst.operands[2].present
8044 && inst.operands[2].reg != inst.operands[1].reg + 1,
8045 _("can only store two consecutive registers"));
8046 /* If op 2 were present and equal to PC, this function wouldn't
8047 have been called in the first place. */
8048 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8049
8050 constraint (inst.operands[0].reg == inst.operands[1].reg
8051 || inst.operands[0].reg == inst.operands[1].reg + 1
8052 || inst.operands[0].reg == inst.operands[3].reg,
8053 BAD_OVERLAP);
8054
8055 inst.instruction |= inst.operands[0].reg << 12;
8056 inst.instruction |= inst.operands[1].reg;
8057 inst.instruction |= inst.operands[3].reg << 16;
8058 }
8059
8060 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8061 extends it to 32-bits, and adds the result to a value in another
8062 register. You can specify a rotation by 0, 8, 16, or 24 bits
8063 before extracting the 16-bit value.
8064 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8065 Condition defaults to COND_ALWAYS.
8066 Error if any register uses R15. */
8067
8068 static void
8069 do_sxtah (void)
8070 {
8071 inst.instruction |= inst.operands[0].reg << 12;
8072 inst.instruction |= inst.operands[1].reg << 16;
8073 inst.instruction |= inst.operands[2].reg;
8074 inst.instruction |= inst.operands[3].imm << 10;
8075 }
8076
8077 /* ARM V6 SXTH.
8078
8079 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8080 Condition defaults to COND_ALWAYS.
8081 Error if any register uses R15. */
8082
8083 static void
8084 do_sxth (void)
8085 {
8086 inst.instruction |= inst.operands[0].reg << 12;
8087 inst.instruction |= inst.operands[1].reg;
8088 inst.instruction |= inst.operands[2].imm << 10;
8089 }
8090 \f
8091 /* VFP instructions. In a logical order: SP variant first, monad
8092 before dyad, arithmetic then move then load/store. */
8093
8094 static void
8095 do_vfp_sp_monadic (void)
8096 {
8097 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8098 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8099 }
8100
8101 static void
8102 do_vfp_sp_dyadic (void)
8103 {
8104 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8105 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8106 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8107 }
8108
8109 static void
8110 do_vfp_sp_compare_z (void)
8111 {
8112 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8113 }
8114
8115 static void
8116 do_vfp_dp_sp_cvt (void)
8117 {
8118 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8119 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8120 }
8121
8122 static void
8123 do_vfp_sp_dp_cvt (void)
8124 {
8125 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8126 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8127 }
8128
8129 static void
8130 do_vfp_reg_from_sp (void)
8131 {
8132 inst.instruction |= inst.operands[0].reg << 12;
8133 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8134 }
8135
8136 static void
8137 do_vfp_reg2_from_sp2 (void)
8138 {
8139 constraint (inst.operands[2].imm != 2,
8140 _("only two consecutive VFP SP registers allowed here"));
8141 inst.instruction |= inst.operands[0].reg << 12;
8142 inst.instruction |= inst.operands[1].reg << 16;
8143 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8144 }
8145
8146 static void
8147 do_vfp_sp_from_reg (void)
8148 {
8149 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8150 inst.instruction |= inst.operands[1].reg << 12;
8151 }
8152
8153 static void
8154 do_vfp_sp2_from_reg2 (void)
8155 {
8156 constraint (inst.operands[0].imm != 2,
8157 _("only two consecutive VFP SP registers allowed here"));
8158 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8159 inst.instruction |= inst.operands[1].reg << 12;
8160 inst.instruction |= inst.operands[2].reg << 16;
8161 }
8162
8163 static void
8164 do_vfp_sp_ldst (void)
8165 {
8166 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8167 encode_arm_cp_address (1, FALSE, TRUE, 0);
8168 }
8169
8170 static void
8171 do_vfp_dp_ldst (void)
8172 {
8173 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8174 encode_arm_cp_address (1, FALSE, TRUE, 0);
8175 }
8176
8177
8178 static void
8179 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8180 {
8181 if (inst.operands[0].writeback)
8182 inst.instruction |= WRITE_BACK;
8183 else
8184 constraint (ldstm_type != VFP_LDSTMIA,
8185 _("this addressing mode requires base-register writeback"));
8186 inst.instruction |= inst.operands[0].reg << 16;
8187 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8188 inst.instruction |= inst.operands[1].imm;
8189 }
8190
8191 static void
8192 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8193 {
8194 int count;
8195
8196 if (inst.operands[0].writeback)
8197 inst.instruction |= WRITE_BACK;
8198 else
8199 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8200 _("this addressing mode requires base-register writeback"));
8201
8202 inst.instruction |= inst.operands[0].reg << 16;
8203 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8204
8205 count = inst.operands[1].imm << 1;
8206 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8207 count += 1;
8208
8209 inst.instruction |= count;
8210 }
8211
8212 static void
8213 do_vfp_sp_ldstmia (void)
8214 {
8215 vfp_sp_ldstm (VFP_LDSTMIA);
8216 }
8217
8218 static void
8219 do_vfp_sp_ldstmdb (void)
8220 {
8221 vfp_sp_ldstm (VFP_LDSTMDB);
8222 }
8223
8224 static void
8225 do_vfp_dp_ldstmia (void)
8226 {
8227 vfp_dp_ldstm (VFP_LDSTMIA);
8228 }
8229
8230 static void
8231 do_vfp_dp_ldstmdb (void)
8232 {
8233 vfp_dp_ldstm (VFP_LDSTMDB);
8234 }
8235
8236 static void
8237 do_vfp_xp_ldstmia (void)
8238 {
8239 vfp_dp_ldstm (VFP_LDSTMIAX);
8240 }
8241
8242 static void
8243 do_vfp_xp_ldstmdb (void)
8244 {
8245 vfp_dp_ldstm (VFP_LDSTMDBX);
8246 }
8247
8248 static void
8249 do_vfp_dp_rd_rm (void)
8250 {
8251 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8252 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8253 }
8254
8255 static void
8256 do_vfp_dp_rn_rd (void)
8257 {
8258 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8259 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8260 }
8261
8262 static void
8263 do_vfp_dp_rd_rn (void)
8264 {
8265 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8266 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8267 }
8268
8269 static void
8270 do_vfp_dp_rd_rn_rm (void)
8271 {
8272 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8273 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8274 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8275 }
8276
8277 static void
8278 do_vfp_dp_rd (void)
8279 {
8280 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8281 }
8282
8283 static void
8284 do_vfp_dp_rm_rd_rn (void)
8285 {
8286 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8287 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8288 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8289 }
8290
8291 /* VFPv3 instructions. */
8292 static void
8293 do_vfp_sp_const (void)
8294 {
8295 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8296 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8297 inst.instruction |= (inst.operands[1].imm & 0x0f);
8298 }
8299
8300 static void
8301 do_vfp_dp_const (void)
8302 {
8303 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8304 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8305 inst.instruction |= (inst.operands[1].imm & 0x0f);
8306 }
8307
8308 static void
8309 vfp_conv (int srcsize)
8310 {
8311 unsigned immbits = srcsize - inst.operands[1].imm;
8312 inst.instruction |= (immbits & 1) << 5;
8313 inst.instruction |= (immbits >> 1);
8314 }
8315
8316 static void
8317 do_vfp_sp_conv_16 (void)
8318 {
8319 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8320 vfp_conv (16);
8321 }
8322
8323 static void
8324 do_vfp_dp_conv_16 (void)
8325 {
8326 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8327 vfp_conv (16);
8328 }
8329
8330 static void
8331 do_vfp_sp_conv_32 (void)
8332 {
8333 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8334 vfp_conv (32);
8335 }
8336
8337 static void
8338 do_vfp_dp_conv_32 (void)
8339 {
8340 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8341 vfp_conv (32);
8342 }
8343 \f
8344 /* FPA instructions. Also in a logical order. */
8345
8346 static void
8347 do_fpa_cmp (void)
8348 {
8349 inst.instruction |= inst.operands[0].reg << 16;
8350 inst.instruction |= inst.operands[1].reg;
8351 }
8352
8353 static void
8354 do_fpa_ldmstm (void)
8355 {
8356 inst.instruction |= inst.operands[0].reg << 12;
8357 switch (inst.operands[1].imm)
8358 {
8359 case 1: inst.instruction |= CP_T_X; break;
8360 case 2: inst.instruction |= CP_T_Y; break;
8361 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8362 case 4: break;
8363 default: abort ();
8364 }
8365
8366 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8367 {
8368 /* The instruction specified "ea" or "fd", so we can only accept
8369 [Rn]{!}. The instruction does not really support stacking or
8370 unstacking, so we have to emulate these by setting appropriate
8371 bits and offsets. */
8372 constraint (inst.reloc.exp.X_op != O_constant
8373 || inst.reloc.exp.X_add_number != 0,
8374 _("this instruction does not support indexing"));
8375
8376 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8377 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8378
8379 if (!(inst.instruction & INDEX_UP))
8380 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8381
8382 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8383 {
8384 inst.operands[2].preind = 0;
8385 inst.operands[2].postind = 1;
8386 }
8387 }
8388
8389 encode_arm_cp_address (2, TRUE, TRUE, 0);
8390 }
8391 \f
8392 /* iWMMXt instructions: strictly in alphabetical order. */
8393
8394 static void
8395 do_iwmmxt_tandorc (void)
8396 {
8397 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8398 }
8399
8400 static void
8401 do_iwmmxt_textrc (void)
8402 {
8403 inst.instruction |= inst.operands[0].reg << 12;
8404 inst.instruction |= inst.operands[1].imm;
8405 }
8406
8407 static void
8408 do_iwmmxt_textrm (void)
8409 {
8410 inst.instruction |= inst.operands[0].reg << 12;
8411 inst.instruction |= inst.operands[1].reg << 16;
8412 inst.instruction |= inst.operands[2].imm;
8413 }
8414
8415 static void
8416 do_iwmmxt_tinsr (void)
8417 {
8418 inst.instruction |= inst.operands[0].reg << 16;
8419 inst.instruction |= inst.operands[1].reg << 12;
8420 inst.instruction |= inst.operands[2].imm;
8421 }
8422
8423 static void
8424 do_iwmmxt_tmia (void)
8425 {
8426 inst.instruction |= inst.operands[0].reg << 5;
8427 inst.instruction |= inst.operands[1].reg;
8428 inst.instruction |= inst.operands[2].reg << 12;
8429 }
8430
8431 static void
8432 do_iwmmxt_waligni (void)
8433 {
8434 inst.instruction |= inst.operands[0].reg << 12;
8435 inst.instruction |= inst.operands[1].reg << 16;
8436 inst.instruction |= inst.operands[2].reg;
8437 inst.instruction |= inst.operands[3].imm << 20;
8438 }
8439
8440 static void
8441 do_iwmmxt_wmerge (void)
8442 {
8443 inst.instruction |= inst.operands[0].reg << 12;
8444 inst.instruction |= inst.operands[1].reg << 16;
8445 inst.instruction |= inst.operands[2].reg;
8446 inst.instruction |= inst.operands[3].imm << 21;
8447 }
8448
8449 static void
8450 do_iwmmxt_wmov (void)
8451 {
8452 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8453 inst.instruction |= inst.operands[0].reg << 12;
8454 inst.instruction |= inst.operands[1].reg << 16;
8455 inst.instruction |= inst.operands[1].reg;
8456 }
8457
8458 static void
8459 do_iwmmxt_wldstbh (void)
8460 {
8461 int reloc;
8462 inst.instruction |= inst.operands[0].reg << 12;
8463 if (thumb_mode)
8464 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
8465 else
8466 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
8467 encode_arm_cp_address (1, TRUE, FALSE, reloc);
8468 }
8469
8470 static void
8471 do_iwmmxt_wldstw (void)
8472 {
8473 /* RIWR_RIWC clears .isreg for a control register. */
8474 if (!inst.operands[0].isreg)
8475 {
8476 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8477 inst.instruction |= 0xf0000000;
8478 }
8479
8480 inst.instruction |= inst.operands[0].reg << 12;
8481 encode_arm_cp_address (1, TRUE, TRUE, 0);
8482 }
8483
8484 static void
8485 do_iwmmxt_wldstd (void)
8486 {
8487 inst.instruction |= inst.operands[0].reg << 12;
8488 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
8489 && inst.operands[1].immisreg)
8490 {
8491 inst.instruction &= ~0x1a000ff;
8492 inst.instruction |= (0xf << 28);
8493 if (inst.operands[1].preind)
8494 inst.instruction |= PRE_INDEX;
8495 if (!inst.operands[1].negative)
8496 inst.instruction |= INDEX_UP;
8497 if (inst.operands[1].writeback)
8498 inst.instruction |= WRITE_BACK;
8499 inst.instruction |= inst.operands[1].reg << 16;
8500 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8501 inst.instruction |= inst.operands[1].imm;
8502 }
8503 else
8504 encode_arm_cp_address (1, TRUE, FALSE, 0);
8505 }
8506
8507 static void
8508 do_iwmmxt_wshufh (void)
8509 {
8510 inst.instruction |= inst.operands[0].reg << 12;
8511 inst.instruction |= inst.operands[1].reg << 16;
8512 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
8513 inst.instruction |= (inst.operands[2].imm & 0x0f);
8514 }
8515
8516 static void
8517 do_iwmmxt_wzero (void)
8518 {
8519 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8520 inst.instruction |= inst.operands[0].reg;
8521 inst.instruction |= inst.operands[0].reg << 12;
8522 inst.instruction |= inst.operands[0].reg << 16;
8523 }
8524
8525 static void
8526 do_iwmmxt_wrwrwr_or_imm5 (void)
8527 {
8528 if (inst.operands[2].isreg)
8529 do_rd_rn_rm ();
8530 else {
8531 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
8532 _("immediate operand requires iWMMXt2"));
8533 do_rd_rn ();
8534 if (inst.operands[2].imm == 0)
8535 {
8536 switch ((inst.instruction >> 20) & 0xf)
8537 {
8538 case 4:
8539 case 5:
8540 case 6:
8541 case 7:
8542 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8543 inst.operands[2].imm = 16;
8544 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
8545 break;
8546 case 8:
8547 case 9:
8548 case 10:
8549 case 11:
8550 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8551 inst.operands[2].imm = 32;
8552 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
8553 break;
8554 case 12:
8555 case 13:
8556 case 14:
8557 case 15:
8558 {
8559 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8560 unsigned long wrn;
8561 wrn = (inst.instruction >> 16) & 0xf;
8562 inst.instruction &= 0xff0fff0f;
8563 inst.instruction |= wrn;
8564 /* Bail out here; the instruction is now assembled. */
8565 return;
8566 }
8567 }
8568 }
8569 /* Map 32 -> 0, etc. */
8570 inst.operands[2].imm &= 0x1f;
8571 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8572 }
8573 }
8574 \f
8575 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8576 operations first, then control, shift, and load/store. */
8577
8578 /* Insns like "foo X,Y,Z". */
8579
8580 static void
8581 do_mav_triple (void)
8582 {
8583 inst.instruction |= inst.operands[0].reg << 16;
8584 inst.instruction |= inst.operands[1].reg;
8585 inst.instruction |= inst.operands[2].reg << 12;
8586 }
8587
8588 /* Insns like "foo W,X,Y,Z".
8589 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8590
8591 static void
8592 do_mav_quad (void)
8593 {
8594 inst.instruction |= inst.operands[0].reg << 5;
8595 inst.instruction |= inst.operands[1].reg << 12;
8596 inst.instruction |= inst.operands[2].reg << 16;
8597 inst.instruction |= inst.operands[3].reg;
8598 }
8599
8600 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8601 static void
8602 do_mav_dspsc (void)
8603 {
8604 inst.instruction |= inst.operands[1].reg << 12;
8605 }
8606
8607 /* Maverick shift immediate instructions.
8608 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8609 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8610
8611 static void
8612 do_mav_shift (void)
8613 {
8614 int imm = inst.operands[2].imm;
8615
8616 inst.instruction |= inst.operands[0].reg << 12;
8617 inst.instruction |= inst.operands[1].reg << 16;
8618
8619 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8620 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8621 Bit 4 should be 0. */
8622 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8623
8624 inst.instruction |= imm;
8625 }
8626 \f
8627 /* XScale instructions. Also sorted arithmetic before move. */
8628
8629 /* Xscale multiply-accumulate (argument parse)
8630 MIAcc acc0,Rm,Rs
8631 MIAPHcc acc0,Rm,Rs
8632 MIAxycc acc0,Rm,Rs. */
8633
8634 static void
8635 do_xsc_mia (void)
8636 {
8637 inst.instruction |= inst.operands[1].reg;
8638 inst.instruction |= inst.operands[2].reg << 12;
8639 }
8640
8641 /* Xscale move-accumulator-register (argument parse)
8642
8643 MARcc acc0,RdLo,RdHi. */
8644
8645 static void
8646 do_xsc_mar (void)
8647 {
8648 inst.instruction |= inst.operands[1].reg << 12;
8649 inst.instruction |= inst.operands[2].reg << 16;
8650 }
8651
8652 /* Xscale move-register-accumulator (argument parse)
8653
8654 MRAcc RdLo,RdHi,acc0. */
8655
8656 static void
8657 do_xsc_mra (void)
8658 {
8659 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8660 inst.instruction |= inst.operands[0].reg << 12;
8661 inst.instruction |= inst.operands[1].reg << 16;
8662 }
8663 \f
8664 /* Encoding functions relevant only to Thumb. */
8665
8666 /* inst.operands[i] is a shifted-register operand; encode
8667 it into inst.instruction in the format used by Thumb32. */
8668
8669 static void
8670 encode_thumb32_shifted_operand (int i)
8671 {
8672 unsigned int value = inst.reloc.exp.X_add_number;
8673 unsigned int shift = inst.operands[i].shift_kind;
8674
8675 constraint (inst.operands[i].immisreg,
8676 _("shift by register not allowed in thumb mode"));
8677 inst.instruction |= inst.operands[i].reg;
8678 if (shift == SHIFT_RRX)
8679 inst.instruction |= SHIFT_ROR << 4;
8680 else
8681 {
8682 constraint (inst.reloc.exp.X_op != O_constant,
8683 _("expression too complex"));
8684
8685 constraint (value > 32
8686 || (value == 32 && (shift == SHIFT_LSL
8687 || shift == SHIFT_ROR)),
8688 _("shift expression is too large"));
8689
8690 if (value == 0)
8691 shift = SHIFT_LSL;
8692 else if (value == 32)
8693 value = 0;
8694
8695 inst.instruction |= shift << 4;
8696 inst.instruction |= (value & 0x1c) << 10;
8697 inst.instruction |= (value & 0x03) << 6;
8698 }
8699 }
8700
8701
8702 /* inst.operands[i] was set up by parse_address. Encode it into a
8703 Thumb32 format load or store instruction. Reject forms that cannot
8704 be used with such instructions. If is_t is true, reject forms that
8705 cannot be used with a T instruction; if is_d is true, reject forms
8706 that cannot be used with a D instruction. */
8707
8708 static void
8709 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8710 {
8711 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8712
8713 constraint (!inst.operands[i].isreg,
8714 _("Instruction does not support =N addresses"));
8715
8716 inst.instruction |= inst.operands[i].reg << 16;
8717 if (inst.operands[i].immisreg)
8718 {
8719 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8720 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8721 constraint (inst.operands[i].negative,
8722 _("Thumb does not support negative register indexing"));
8723 constraint (inst.operands[i].postind,
8724 _("Thumb does not support register post-indexing"));
8725 constraint (inst.operands[i].writeback,
8726 _("Thumb does not support register indexing with writeback"));
8727 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8728 _("Thumb supports only LSL in shifted register indexing"));
8729
8730 inst.instruction |= inst.operands[i].imm;
8731 if (inst.operands[i].shifted)
8732 {
8733 constraint (inst.reloc.exp.X_op != O_constant,
8734 _("expression too complex"));
8735 constraint (inst.reloc.exp.X_add_number < 0
8736 || inst.reloc.exp.X_add_number > 3,
8737 _("shift out of range"));
8738 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8739 }
8740 inst.reloc.type = BFD_RELOC_UNUSED;
8741 }
8742 else if (inst.operands[i].preind)
8743 {
8744 constraint (is_pc && inst.operands[i].writeback,
8745 _("cannot use writeback with PC-relative addressing"));
8746 constraint (is_t && inst.operands[i].writeback,
8747 _("cannot use writeback with this instruction"));
8748
8749 if (is_d)
8750 {
8751 inst.instruction |= 0x01000000;
8752 if (inst.operands[i].writeback)
8753 inst.instruction |= 0x00200000;
8754 }
8755 else
8756 {
8757 inst.instruction |= 0x00000c00;
8758 if (inst.operands[i].writeback)
8759 inst.instruction |= 0x00000100;
8760 }
8761 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8762 }
8763 else if (inst.operands[i].postind)
8764 {
8765 gas_assert (inst.operands[i].writeback);
8766 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8767 constraint (is_t, _("cannot use post-indexing with this instruction"));
8768
8769 if (is_d)
8770 inst.instruction |= 0x00200000;
8771 else
8772 inst.instruction |= 0x00000900;
8773 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8774 }
8775 else /* unindexed - only for coprocessor */
8776 inst.error = _("instruction does not accept unindexed addressing");
8777 }
8778
8779 /* Table of Thumb instructions which exist in both 16- and 32-bit
8780 encodings (the latter only in post-V6T2 cores). The index is the
8781 value used in the insns table below. When there is more than one
8782 possible 16-bit encoding for the instruction, this table always
8783 holds variant (1).
8784 Also contains several pseudo-instructions used during relaxation. */
8785 #define T16_32_TAB \
8786 X(_adc, 4140, eb400000), \
8787 X(_adcs, 4140, eb500000), \
8788 X(_add, 1c00, eb000000), \
8789 X(_adds, 1c00, eb100000), \
8790 X(_addi, 0000, f1000000), \
8791 X(_addis, 0000, f1100000), \
8792 X(_add_pc,000f, f20f0000), \
8793 X(_add_sp,000d, f10d0000), \
8794 X(_adr, 000f, f20f0000), \
8795 X(_and, 4000, ea000000), \
8796 X(_ands, 4000, ea100000), \
8797 X(_asr, 1000, fa40f000), \
8798 X(_asrs, 1000, fa50f000), \
8799 X(_b, e000, f000b000), \
8800 X(_bcond, d000, f0008000), \
8801 X(_bic, 4380, ea200000), \
8802 X(_bics, 4380, ea300000), \
8803 X(_cmn, 42c0, eb100f00), \
8804 X(_cmp, 2800, ebb00f00), \
8805 X(_cpsie, b660, f3af8400), \
8806 X(_cpsid, b670, f3af8600), \
8807 X(_cpy, 4600, ea4f0000), \
8808 X(_dec_sp,80dd, f1ad0d00), \
8809 X(_eor, 4040, ea800000), \
8810 X(_eors, 4040, ea900000), \
8811 X(_inc_sp,00dd, f10d0d00), \
8812 X(_ldmia, c800, e8900000), \
8813 X(_ldr, 6800, f8500000), \
8814 X(_ldrb, 7800, f8100000), \
8815 X(_ldrh, 8800, f8300000), \
8816 X(_ldrsb, 5600, f9100000), \
8817 X(_ldrsh, 5e00, f9300000), \
8818 X(_ldr_pc,4800, f85f0000), \
8819 X(_ldr_pc2,4800, f85f0000), \
8820 X(_ldr_sp,9800, f85d0000), \
8821 X(_lsl, 0000, fa00f000), \
8822 X(_lsls, 0000, fa10f000), \
8823 X(_lsr, 0800, fa20f000), \
8824 X(_lsrs, 0800, fa30f000), \
8825 X(_mov, 2000, ea4f0000), \
8826 X(_movs, 2000, ea5f0000), \
8827 X(_mul, 4340, fb00f000), \
8828 X(_muls, 4340, ffffffff), /* no 32b muls */ \
8829 X(_mvn, 43c0, ea6f0000), \
8830 X(_mvns, 43c0, ea7f0000), \
8831 X(_neg, 4240, f1c00000), /* rsb #0 */ \
8832 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
8833 X(_orr, 4300, ea400000), \
8834 X(_orrs, 4300, ea500000), \
8835 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8836 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
8837 X(_rev, ba00, fa90f080), \
8838 X(_rev16, ba40, fa90f090), \
8839 X(_revsh, bac0, fa90f0b0), \
8840 X(_ror, 41c0, fa60f000), \
8841 X(_rors, 41c0, fa70f000), \
8842 X(_sbc, 4180, eb600000), \
8843 X(_sbcs, 4180, eb700000), \
8844 X(_stmia, c000, e8800000), \
8845 X(_str, 6000, f8400000), \
8846 X(_strb, 7000, f8000000), \
8847 X(_strh, 8000, f8200000), \
8848 X(_str_sp,9000, f84d0000), \
8849 X(_sub, 1e00, eba00000), \
8850 X(_subs, 1e00, ebb00000), \
8851 X(_subi, 8000, f1a00000), \
8852 X(_subis, 8000, f1b00000), \
8853 X(_sxtb, b240, fa4ff080), \
8854 X(_sxth, b200, fa0ff080), \
8855 X(_tst, 4200, ea100f00), \
8856 X(_uxtb, b2c0, fa5ff080), \
8857 X(_uxth, b280, fa1ff080), \
8858 X(_nop, bf00, f3af8000), \
8859 X(_yield, bf10, f3af8001), \
8860 X(_wfe, bf20, f3af8002), \
8861 X(_wfi, bf30, f3af8003), \
8862 X(_sev, bf40, f3af8004),
8863
8864 /* To catch errors in encoding functions, the codes are all offset by
8865 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8866 as 16-bit instructions. */
8867 #define X(a,b,c) T_MNEM##a
8868 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8869 #undef X
8870
8871 #define X(a,b,c) 0x##b
8872 static const unsigned short thumb_op16[] = { T16_32_TAB };
8873 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8874 #undef X
8875
8876 #define X(a,b,c) 0x##c
8877 static const unsigned int thumb_op32[] = { T16_32_TAB };
8878 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8879 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8880 #undef X
8881 #undef T16_32_TAB
8882
8883 /* Thumb instruction encoders, in alphabetical order. */
8884
8885 /* ADDW or SUBW. */
8886
8887 static void
8888 do_t_add_sub_w (void)
8889 {
8890 int Rd, Rn;
8891
8892 Rd = inst.operands[0].reg;
8893 Rn = inst.operands[1].reg;
8894
8895 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
8896 is the SP-{plus,minus}-immediate form of the instruction. */
8897 if (Rn == REG_SP)
8898 constraint (Rd == REG_PC, BAD_PC);
8899 else
8900 reject_bad_reg (Rd);
8901
8902 inst.instruction |= (Rn << 16) | (Rd << 8);
8903 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8904 }
8905
8906 /* Parse an add or subtract instruction. We get here with inst.instruction
8907 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8908
8909 static void
8910 do_t_add_sub (void)
8911 {
8912 int Rd, Rs, Rn;
8913
8914 Rd = inst.operands[0].reg;
8915 Rs = (inst.operands[1].present
8916 ? inst.operands[1].reg /* Rd, Rs, foo */
8917 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8918
8919 if (Rd == REG_PC)
8920 set_it_insn_type_last ();
8921
8922 if (unified_syntax)
8923 {
8924 bfd_boolean flags;
8925 bfd_boolean narrow;
8926 int opcode;
8927
8928 flags = (inst.instruction == T_MNEM_adds
8929 || inst.instruction == T_MNEM_subs);
8930 if (flags)
8931 narrow = !in_it_block ();
8932 else
8933 narrow = in_it_block ();
8934 if (!inst.operands[2].isreg)
8935 {
8936 int add;
8937
8938 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
8939
8940 add = (inst.instruction == T_MNEM_add
8941 || inst.instruction == T_MNEM_adds);
8942 opcode = 0;
8943 if (inst.size_req != 4)
8944 {
8945 /* Attempt to use a narrow opcode, with relaxation if
8946 appropriate. */
8947 if (Rd == REG_SP && Rs == REG_SP && !flags)
8948 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8949 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8950 opcode = T_MNEM_add_sp;
8951 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8952 opcode = T_MNEM_add_pc;
8953 else if (Rd <= 7 && Rs <= 7 && narrow)
8954 {
8955 if (flags)
8956 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8957 else
8958 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8959 }
8960 if (opcode)
8961 {
8962 inst.instruction = THUMB_OP16(opcode);
8963 inst.instruction |= (Rd << 4) | Rs;
8964 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8965 if (inst.size_req != 2)
8966 inst.relax = opcode;
8967 }
8968 else
8969 constraint (inst.size_req == 2, BAD_HIREG);
8970 }
8971 if (inst.size_req == 4
8972 || (inst.size_req != 2 && !opcode))
8973 {
8974 if (Rd == REG_PC)
8975 {
8976 constraint (add, BAD_PC);
8977 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
8978 _("only SUBS PC, LR, #const allowed"));
8979 constraint (inst.reloc.exp.X_op != O_constant,
8980 _("expression too complex"));
8981 constraint (inst.reloc.exp.X_add_number < 0
8982 || inst.reloc.exp.X_add_number > 0xff,
8983 _("immediate value out of range"));
8984 inst.instruction = T2_SUBS_PC_LR
8985 | inst.reloc.exp.X_add_number;
8986 inst.reloc.type = BFD_RELOC_UNUSED;
8987 return;
8988 }
8989 else if (Rs == REG_PC)
8990 {
8991 /* Always use addw/subw. */
8992 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8993 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8994 }
8995 else
8996 {
8997 inst.instruction = THUMB_OP32 (inst.instruction);
8998 inst.instruction = (inst.instruction & 0xe1ffffff)
8999 | 0x10000000;
9000 if (flags)
9001 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9002 else
9003 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9004 }
9005 inst.instruction |= Rd << 8;
9006 inst.instruction |= Rs << 16;
9007 }
9008 }
9009 else
9010 {
9011 Rn = inst.operands[2].reg;
9012 /* See if we can do this with a 16-bit instruction. */
9013 if (!inst.operands[2].shifted && inst.size_req != 4)
9014 {
9015 if (Rd > 7 || Rs > 7 || Rn > 7)
9016 narrow = FALSE;
9017
9018 if (narrow)
9019 {
9020 inst.instruction = ((inst.instruction == T_MNEM_adds
9021 || inst.instruction == T_MNEM_add)
9022 ? T_OPCODE_ADD_R3
9023 : T_OPCODE_SUB_R3);
9024 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9025 return;
9026 }
9027
9028 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9029 {
9030 /* Thumb-1 cores (except v6-M) require at least one high
9031 register in a narrow non flag setting add. */
9032 if (Rd > 7 || Rn > 7
9033 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9034 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9035 {
9036 if (Rd == Rn)
9037 {
9038 Rn = Rs;
9039 Rs = Rd;
9040 }
9041 inst.instruction = T_OPCODE_ADD_HI;
9042 inst.instruction |= (Rd & 8) << 4;
9043 inst.instruction |= (Rd & 7);
9044 inst.instruction |= Rn << 3;
9045 return;
9046 }
9047 }
9048 }
9049
9050 constraint (Rd == REG_PC, BAD_PC);
9051 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9052 constraint (Rs == REG_PC, BAD_PC);
9053 reject_bad_reg (Rn);
9054
9055 /* If we get here, it can't be done in 16 bits. */
9056 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9057 _("shift must be constant"));
9058 inst.instruction = THUMB_OP32 (inst.instruction);
9059 inst.instruction |= Rd << 8;
9060 inst.instruction |= Rs << 16;
9061 encode_thumb32_shifted_operand (2);
9062 }
9063 }
9064 else
9065 {
9066 constraint (inst.instruction == T_MNEM_adds
9067 || inst.instruction == T_MNEM_subs,
9068 BAD_THUMB32);
9069
9070 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9071 {
9072 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9073 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9074 BAD_HIREG);
9075
9076 inst.instruction = (inst.instruction == T_MNEM_add
9077 ? 0x0000 : 0x8000);
9078 inst.instruction |= (Rd << 4) | Rs;
9079 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9080 return;
9081 }
9082
9083 Rn = inst.operands[2].reg;
9084 constraint (inst.operands[2].shifted, _("unshifted register required"));
9085
9086 /* We now have Rd, Rs, and Rn set to registers. */
9087 if (Rd > 7 || Rs > 7 || Rn > 7)
9088 {
9089 /* Can't do this for SUB. */
9090 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9091 inst.instruction = T_OPCODE_ADD_HI;
9092 inst.instruction |= (Rd & 8) << 4;
9093 inst.instruction |= (Rd & 7);
9094 if (Rs == Rd)
9095 inst.instruction |= Rn << 3;
9096 else if (Rn == Rd)
9097 inst.instruction |= Rs << 3;
9098 else
9099 constraint (1, _("dest must overlap one source register"));
9100 }
9101 else
9102 {
9103 inst.instruction = (inst.instruction == T_MNEM_add
9104 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9105 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9106 }
9107 }
9108 }
9109
9110 static void
9111 do_t_adr (void)
9112 {
9113 unsigned Rd;
9114
9115 Rd = inst.operands[0].reg;
9116 reject_bad_reg (Rd);
9117
9118 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9119 {
9120 /* Defer to section relaxation. */
9121 inst.relax = inst.instruction;
9122 inst.instruction = THUMB_OP16 (inst.instruction);
9123 inst.instruction |= Rd << 4;
9124 }
9125 else if (unified_syntax && inst.size_req != 2)
9126 {
9127 /* Generate a 32-bit opcode. */
9128 inst.instruction = THUMB_OP32 (inst.instruction);
9129 inst.instruction |= Rd << 8;
9130 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9131 inst.reloc.pc_rel = 1;
9132 }
9133 else
9134 {
9135 /* Generate a 16-bit opcode. */
9136 inst.instruction = THUMB_OP16 (inst.instruction);
9137 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9138 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9139 inst.reloc.pc_rel = 1;
9140
9141 inst.instruction |= Rd << 4;
9142 }
9143 }
9144
9145 /* Arithmetic instructions for which there is just one 16-bit
9146 instruction encoding, and it allows only two low registers.
9147 For maximal compatibility with ARM syntax, we allow three register
9148 operands even when Thumb-32 instructions are not available, as long
9149 as the first two are identical. For instance, both "sbc r0,r1" and
9150 "sbc r0,r0,r1" are allowed. */
9151 static void
9152 do_t_arit3 (void)
9153 {
9154 int Rd, Rs, Rn;
9155
9156 Rd = inst.operands[0].reg;
9157 Rs = (inst.operands[1].present
9158 ? inst.operands[1].reg /* Rd, Rs, foo */
9159 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9160 Rn = inst.operands[2].reg;
9161
9162 reject_bad_reg (Rd);
9163 reject_bad_reg (Rs);
9164 if (inst.operands[2].isreg)
9165 reject_bad_reg (Rn);
9166
9167 if (unified_syntax)
9168 {
9169 if (!inst.operands[2].isreg)
9170 {
9171 /* For an immediate, we always generate a 32-bit opcode;
9172 section relaxation will shrink it later if possible. */
9173 inst.instruction = THUMB_OP32 (inst.instruction);
9174 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9175 inst.instruction |= Rd << 8;
9176 inst.instruction |= Rs << 16;
9177 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9178 }
9179 else
9180 {
9181 bfd_boolean narrow;
9182
9183 /* See if we can do this with a 16-bit instruction. */
9184 if (THUMB_SETS_FLAGS (inst.instruction))
9185 narrow = !in_it_block ();
9186 else
9187 narrow = in_it_block ();
9188
9189 if (Rd > 7 || Rn > 7 || Rs > 7)
9190 narrow = FALSE;
9191 if (inst.operands[2].shifted)
9192 narrow = FALSE;
9193 if (inst.size_req == 4)
9194 narrow = FALSE;
9195
9196 if (narrow
9197 && Rd == Rs)
9198 {
9199 inst.instruction = THUMB_OP16 (inst.instruction);
9200 inst.instruction |= Rd;
9201 inst.instruction |= Rn << 3;
9202 return;
9203 }
9204
9205 /* If we get here, it can't be done in 16 bits. */
9206 constraint (inst.operands[2].shifted
9207 && inst.operands[2].immisreg,
9208 _("shift must be constant"));
9209 inst.instruction = THUMB_OP32 (inst.instruction);
9210 inst.instruction |= Rd << 8;
9211 inst.instruction |= Rs << 16;
9212 encode_thumb32_shifted_operand (2);
9213 }
9214 }
9215 else
9216 {
9217 /* On its face this is a lie - the instruction does set the
9218 flags. However, the only supported mnemonic in this mode
9219 says it doesn't. */
9220 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9221
9222 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9223 _("unshifted register required"));
9224 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9225 constraint (Rd != Rs,
9226 _("dest and source1 must be the same register"));
9227
9228 inst.instruction = THUMB_OP16 (inst.instruction);
9229 inst.instruction |= Rd;
9230 inst.instruction |= Rn << 3;
9231 }
9232 }
9233
9234 /* Similarly, but for instructions where the arithmetic operation is
9235 commutative, so we can allow either of them to be different from
9236 the destination operand in a 16-bit instruction. For instance, all
9237 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9238 accepted. */
9239 static void
9240 do_t_arit3c (void)
9241 {
9242 int Rd, Rs, Rn;
9243
9244 Rd = inst.operands[0].reg;
9245 Rs = (inst.operands[1].present
9246 ? inst.operands[1].reg /* Rd, Rs, foo */
9247 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9248 Rn = inst.operands[2].reg;
9249
9250 reject_bad_reg (Rd);
9251 reject_bad_reg (Rs);
9252 if (inst.operands[2].isreg)
9253 reject_bad_reg (Rn);
9254
9255 if (unified_syntax)
9256 {
9257 if (!inst.operands[2].isreg)
9258 {
9259 /* For an immediate, we always generate a 32-bit opcode;
9260 section relaxation will shrink it later if possible. */
9261 inst.instruction = THUMB_OP32 (inst.instruction);
9262 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9263 inst.instruction |= Rd << 8;
9264 inst.instruction |= Rs << 16;
9265 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9266 }
9267 else
9268 {
9269 bfd_boolean narrow;
9270
9271 /* See if we can do this with a 16-bit instruction. */
9272 if (THUMB_SETS_FLAGS (inst.instruction))
9273 narrow = !in_it_block ();
9274 else
9275 narrow = in_it_block ();
9276
9277 if (Rd > 7 || Rn > 7 || Rs > 7)
9278 narrow = FALSE;
9279 if (inst.operands[2].shifted)
9280 narrow = FALSE;
9281 if (inst.size_req == 4)
9282 narrow = FALSE;
9283
9284 if (narrow)
9285 {
9286 if (Rd == Rs)
9287 {
9288 inst.instruction = THUMB_OP16 (inst.instruction);
9289 inst.instruction |= Rd;
9290 inst.instruction |= Rn << 3;
9291 return;
9292 }
9293 if (Rd == Rn)
9294 {
9295 inst.instruction = THUMB_OP16 (inst.instruction);
9296 inst.instruction |= Rd;
9297 inst.instruction |= Rs << 3;
9298 return;
9299 }
9300 }
9301
9302 /* If we get here, it can't be done in 16 bits. */
9303 constraint (inst.operands[2].shifted
9304 && inst.operands[2].immisreg,
9305 _("shift must be constant"));
9306 inst.instruction = THUMB_OP32 (inst.instruction);
9307 inst.instruction |= Rd << 8;
9308 inst.instruction |= Rs << 16;
9309 encode_thumb32_shifted_operand (2);
9310 }
9311 }
9312 else
9313 {
9314 /* On its face this is a lie - the instruction does set the
9315 flags. However, the only supported mnemonic in this mode
9316 says it doesn't. */
9317 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9318
9319 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9320 _("unshifted register required"));
9321 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9322
9323 inst.instruction = THUMB_OP16 (inst.instruction);
9324 inst.instruction |= Rd;
9325
9326 if (Rd == Rs)
9327 inst.instruction |= Rn << 3;
9328 else if (Rd == Rn)
9329 inst.instruction |= Rs << 3;
9330 else
9331 constraint (1, _("dest must overlap one source register"));
9332 }
9333 }
9334
9335 static void
9336 do_t_barrier (void)
9337 {
9338 if (inst.operands[0].present)
9339 {
9340 constraint ((inst.instruction & 0xf0) != 0x40
9341 && inst.operands[0].imm != 0xf,
9342 _("bad barrier type"));
9343 inst.instruction |= inst.operands[0].imm;
9344 }
9345 else
9346 inst.instruction |= 0xf;
9347 }
9348
9349 static void
9350 do_t_bfc (void)
9351 {
9352 unsigned Rd;
9353 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9354 constraint (msb > 32, _("bit-field extends past end of register"));
9355 /* The instruction encoding stores the LSB and MSB,
9356 not the LSB and width. */
9357 Rd = inst.operands[0].reg;
9358 reject_bad_reg (Rd);
9359 inst.instruction |= Rd << 8;
9360 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9361 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9362 inst.instruction |= msb - 1;
9363 }
9364
9365 static void
9366 do_t_bfi (void)
9367 {
9368 int Rd, Rn;
9369 unsigned int msb;
9370
9371 Rd = inst.operands[0].reg;
9372 reject_bad_reg (Rd);
9373
9374 /* #0 in second position is alternative syntax for bfc, which is
9375 the same instruction but with REG_PC in the Rm field. */
9376 if (!inst.operands[1].isreg)
9377 Rn = REG_PC;
9378 else
9379 {
9380 Rn = inst.operands[1].reg;
9381 reject_bad_reg (Rn);
9382 }
9383
9384 msb = inst.operands[2].imm + inst.operands[3].imm;
9385 constraint (msb > 32, _("bit-field extends past end of register"));
9386 /* The instruction encoding stores the LSB and MSB,
9387 not the LSB and width. */
9388 inst.instruction |= Rd << 8;
9389 inst.instruction |= Rn << 16;
9390 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9391 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9392 inst.instruction |= msb - 1;
9393 }
9394
9395 static void
9396 do_t_bfx (void)
9397 {
9398 unsigned Rd, Rn;
9399
9400 Rd = inst.operands[0].reg;
9401 Rn = inst.operands[1].reg;
9402
9403 reject_bad_reg (Rd);
9404 reject_bad_reg (Rn);
9405
9406 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9407 _("bit-field extends past end of register"));
9408 inst.instruction |= Rd << 8;
9409 inst.instruction |= Rn << 16;
9410 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9411 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9412 inst.instruction |= inst.operands[3].imm - 1;
9413 }
9414
9415 /* ARM V5 Thumb BLX (argument parse)
9416 BLX <target_addr> which is BLX(1)
9417 BLX <Rm> which is BLX(2)
9418 Unfortunately, there are two different opcodes for this mnemonic.
9419 So, the insns[].value is not used, and the code here zaps values
9420 into inst.instruction.
9421
9422 ??? How to take advantage of the additional two bits of displacement
9423 available in Thumb32 mode? Need new relocation? */
9424
9425 static void
9426 do_t_blx (void)
9427 {
9428 set_it_insn_type_last ();
9429
9430 if (inst.operands[0].isreg)
9431 {
9432 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9433 /* We have a register, so this is BLX(2). */
9434 inst.instruction |= inst.operands[0].reg << 3;
9435 }
9436 else
9437 {
9438 /* No register. This must be BLX(1). */
9439 inst.instruction = 0xf000e800;
9440 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
9441 inst.reloc.pc_rel = 1;
9442 }
9443 }
9444
9445 static void
9446 do_t_branch (void)
9447 {
9448 int opcode;
9449 int cond;
9450
9451 cond = inst.cond;
9452 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
9453
9454 if (in_it_block ())
9455 {
9456 /* Conditional branches inside IT blocks are encoded as unconditional
9457 branches. */
9458 cond = COND_ALWAYS;
9459 }
9460 else
9461 cond = inst.cond;
9462
9463 if (cond != COND_ALWAYS)
9464 opcode = T_MNEM_bcond;
9465 else
9466 opcode = inst.instruction;
9467
9468 if (unified_syntax && inst.size_req == 4)
9469 {
9470 inst.instruction = THUMB_OP32(opcode);
9471 if (cond == COND_ALWAYS)
9472 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
9473 else
9474 {
9475 gas_assert (cond != 0xF);
9476 inst.instruction |= cond << 22;
9477 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
9478 }
9479 }
9480 else
9481 {
9482 inst.instruction = THUMB_OP16(opcode);
9483 if (cond == COND_ALWAYS)
9484 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
9485 else
9486 {
9487 inst.instruction |= cond << 8;
9488 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
9489 }
9490 /* Allow section relaxation. */
9491 if (unified_syntax && inst.size_req != 2)
9492 inst.relax = opcode;
9493 }
9494
9495 inst.reloc.pc_rel = 1;
9496 }
9497
9498 static void
9499 do_t_bkpt (void)
9500 {
9501 constraint (inst.cond != COND_ALWAYS,
9502 _("instruction is always unconditional"));
9503 if (inst.operands[0].present)
9504 {
9505 constraint (inst.operands[0].imm > 255,
9506 _("immediate value out of range"));
9507 inst.instruction |= inst.operands[0].imm;
9508 set_it_insn_type (NEUTRAL_IT_INSN);
9509 }
9510 }
9511
9512 static void
9513 do_t_branch23 (void)
9514 {
9515 set_it_insn_type_last ();
9516 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
9517 inst.reloc.pc_rel = 1;
9518
9519 #if defined(OBJ_COFF)
9520 /* If the destination of the branch is a defined symbol which does not have
9521 the THUMB_FUNC attribute, then we must be calling a function which has
9522 the (interfacearm) attribute. We look for the Thumb entry point to that
9523 function and change the branch to refer to that function instead. */
9524 if ( inst.reloc.exp.X_op == O_symbol
9525 && inst.reloc.exp.X_add_symbol != NULL
9526 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
9527 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
9528 inst.reloc.exp.X_add_symbol =
9529 find_real_start (inst.reloc.exp.X_add_symbol);
9530 #endif
9531 }
9532
9533 static void
9534 do_t_bx (void)
9535 {
9536 set_it_insn_type_last ();
9537 inst.instruction |= inst.operands[0].reg << 3;
9538 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9539 should cause the alignment to be checked once it is known. This is
9540 because BX PC only works if the instruction is word aligned. */
9541 }
9542
9543 static void
9544 do_t_bxj (void)
9545 {
9546 int Rm;
9547
9548 set_it_insn_type_last ();
9549 Rm = inst.operands[0].reg;
9550 reject_bad_reg (Rm);
9551 inst.instruction |= Rm << 16;
9552 }
9553
9554 static void
9555 do_t_clz (void)
9556 {
9557 unsigned Rd;
9558 unsigned Rm;
9559
9560 Rd = inst.operands[0].reg;
9561 Rm = inst.operands[1].reg;
9562
9563 reject_bad_reg (Rd);
9564 reject_bad_reg (Rm);
9565
9566 inst.instruction |= Rd << 8;
9567 inst.instruction |= Rm << 16;
9568 inst.instruction |= Rm;
9569 }
9570
9571 static void
9572 do_t_cps (void)
9573 {
9574 set_it_insn_type (OUTSIDE_IT_INSN);
9575 inst.instruction |= inst.operands[0].imm;
9576 }
9577
9578 static void
9579 do_t_cpsi (void)
9580 {
9581 set_it_insn_type (OUTSIDE_IT_INSN);
9582 if (unified_syntax
9583 && (inst.operands[1].present || inst.size_req == 4)
9584 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
9585 {
9586 unsigned int imod = (inst.instruction & 0x0030) >> 4;
9587 inst.instruction = 0xf3af8000;
9588 inst.instruction |= imod << 9;
9589 inst.instruction |= inst.operands[0].imm << 5;
9590 if (inst.operands[1].present)
9591 inst.instruction |= 0x100 | inst.operands[1].imm;
9592 }
9593 else
9594 {
9595 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
9596 && (inst.operands[0].imm & 4),
9597 _("selected processor does not support 'A' form "
9598 "of this instruction"));
9599 constraint (inst.operands[1].present || inst.size_req == 4,
9600 _("Thumb does not support the 2-argument "
9601 "form of this instruction"));
9602 inst.instruction |= inst.operands[0].imm;
9603 }
9604 }
9605
9606 /* THUMB CPY instruction (argument parse). */
9607
9608 static void
9609 do_t_cpy (void)
9610 {
9611 if (inst.size_req == 4)
9612 {
9613 inst.instruction = THUMB_OP32 (T_MNEM_mov);
9614 inst.instruction |= inst.operands[0].reg << 8;
9615 inst.instruction |= inst.operands[1].reg;
9616 }
9617 else
9618 {
9619 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9620 inst.instruction |= (inst.operands[0].reg & 0x7);
9621 inst.instruction |= inst.operands[1].reg << 3;
9622 }
9623 }
9624
9625 static void
9626 do_t_cbz (void)
9627 {
9628 set_it_insn_type (OUTSIDE_IT_INSN);
9629 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9630 inst.instruction |= inst.operands[0].reg;
9631 inst.reloc.pc_rel = 1;
9632 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
9633 }
9634
9635 static void
9636 do_t_dbg (void)
9637 {
9638 inst.instruction |= inst.operands[0].imm;
9639 }
9640
9641 static void
9642 do_t_div (void)
9643 {
9644 unsigned Rd, Rn, Rm;
9645
9646 Rd = inst.operands[0].reg;
9647 Rn = (inst.operands[1].present
9648 ? inst.operands[1].reg : Rd);
9649 Rm = inst.operands[2].reg;
9650
9651 reject_bad_reg (Rd);
9652 reject_bad_reg (Rn);
9653 reject_bad_reg (Rm);
9654
9655 inst.instruction |= Rd << 8;
9656 inst.instruction |= Rn << 16;
9657 inst.instruction |= Rm;
9658 }
9659
9660 static void
9661 do_t_hint (void)
9662 {
9663 if (unified_syntax && inst.size_req == 4)
9664 inst.instruction = THUMB_OP32 (inst.instruction);
9665 else
9666 inst.instruction = THUMB_OP16 (inst.instruction);
9667 }
9668
9669 static void
9670 do_t_it (void)
9671 {
9672 unsigned int cond = inst.operands[0].imm;
9673
9674 set_it_insn_type (IT_INSN);
9675 now_it.mask = (inst.instruction & 0xf) | 0x10;
9676 now_it.cc = cond;
9677
9678 /* If the condition is a negative condition, invert the mask. */
9679 if ((cond & 0x1) == 0x0)
9680 {
9681 unsigned int mask = inst.instruction & 0x000f;
9682
9683 if ((mask & 0x7) == 0)
9684 /* no conversion needed */;
9685 else if ((mask & 0x3) == 0)
9686 mask ^= 0x8;
9687 else if ((mask & 0x1) == 0)
9688 mask ^= 0xC;
9689 else
9690 mask ^= 0xE;
9691
9692 inst.instruction &= 0xfff0;
9693 inst.instruction |= mask;
9694 }
9695
9696 inst.instruction |= cond << 4;
9697 }
9698
9699 /* Helper function used for both push/pop and ldm/stm. */
9700 static void
9701 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
9702 {
9703 bfd_boolean load;
9704
9705 load = (inst.instruction & (1 << 20)) != 0;
9706
9707 if (mask & (1 << 13))
9708 inst.error = _("SP not allowed in register list");
9709 if (load)
9710 {
9711 if (mask & (1 << 15))
9712 {
9713 if (mask & (1 << 14))
9714 inst.error = _("LR and PC should not both be in register list");
9715 else
9716 set_it_insn_type_last ();
9717 }
9718
9719 if ((mask & (1 << base)) != 0
9720 && writeback)
9721 as_warn (_("base register should not be in register list "
9722 "when written back"));
9723 }
9724 else
9725 {
9726 if (mask & (1 << 15))
9727 inst.error = _("PC not allowed in register list");
9728
9729 if (mask & (1 << base))
9730 as_warn (_("value stored for r%d is UNPREDICTABLE"), base);
9731 }
9732
9733 if ((mask & (mask - 1)) == 0)
9734 {
9735 /* Single register transfers implemented as str/ldr. */
9736 if (writeback)
9737 {
9738 if (inst.instruction & (1 << 23))
9739 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
9740 else
9741 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
9742 }
9743 else
9744 {
9745 if (inst.instruction & (1 << 23))
9746 inst.instruction = 0x00800000; /* ia -> [base] */
9747 else
9748 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
9749 }
9750
9751 inst.instruction |= 0xf8400000;
9752 if (load)
9753 inst.instruction |= 0x00100000;
9754
9755 mask = ffs (mask) - 1;
9756 mask <<= 12;
9757 }
9758 else if (writeback)
9759 inst.instruction |= WRITE_BACK;
9760
9761 inst.instruction |= mask;
9762 inst.instruction |= base << 16;
9763 }
9764
9765 static void
9766 do_t_ldmstm (void)
9767 {
9768 /* This really doesn't seem worth it. */
9769 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9770 _("expression too complex"));
9771 constraint (inst.operands[1].writeback,
9772 _("Thumb load/store multiple does not support {reglist}^"));
9773
9774 if (unified_syntax)
9775 {
9776 bfd_boolean narrow;
9777 unsigned mask;
9778
9779 narrow = FALSE;
9780 /* See if we can use a 16-bit instruction. */
9781 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9782 && inst.size_req != 4
9783 && !(inst.operands[1].imm & ~0xff))
9784 {
9785 mask = 1 << inst.operands[0].reg;
9786
9787 if (inst.operands[0].reg <= 7
9788 && (inst.instruction == T_MNEM_stmia
9789 ? inst.operands[0].writeback
9790 : (inst.operands[0].writeback
9791 == !(inst.operands[1].imm & mask))))
9792 {
9793 if (inst.instruction == T_MNEM_stmia
9794 && (inst.operands[1].imm & mask)
9795 && (inst.operands[1].imm & (mask - 1)))
9796 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9797 inst.operands[0].reg);
9798
9799 inst.instruction = THUMB_OP16 (inst.instruction);
9800 inst.instruction |= inst.operands[0].reg << 8;
9801 inst.instruction |= inst.operands[1].imm;
9802 narrow = TRUE;
9803 }
9804 else if (inst.operands[0] .reg == REG_SP
9805 && inst.operands[0].writeback)
9806 {
9807 inst.instruction = THUMB_OP16 (inst.instruction == T_MNEM_stmia
9808 ? T_MNEM_push : T_MNEM_pop);
9809 inst.instruction |= inst.operands[1].imm;
9810 narrow = TRUE;
9811 }
9812 }
9813
9814 if (!narrow)
9815 {
9816 if (inst.instruction < 0xffff)
9817 inst.instruction = THUMB_OP32 (inst.instruction);
9818
9819 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
9820 inst.operands[0].writeback);
9821 }
9822 }
9823 else
9824 {
9825 constraint (inst.operands[0].reg > 7
9826 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9827 constraint (inst.instruction != T_MNEM_ldmia
9828 && inst.instruction != T_MNEM_stmia,
9829 _("Thumb-2 instruction only valid in unified syntax"));
9830 if (inst.instruction == T_MNEM_stmia)
9831 {
9832 if (!inst.operands[0].writeback)
9833 as_warn (_("this instruction will write back the base register"));
9834 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9835 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9836 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9837 inst.operands[0].reg);
9838 }
9839 else
9840 {
9841 if (!inst.operands[0].writeback
9842 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9843 as_warn (_("this instruction will write back the base register"));
9844 else if (inst.operands[0].writeback
9845 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9846 as_warn (_("this instruction will not write back the base register"));
9847 }
9848
9849 inst.instruction = THUMB_OP16 (inst.instruction);
9850 inst.instruction |= inst.operands[0].reg << 8;
9851 inst.instruction |= inst.operands[1].imm;
9852 }
9853 }
9854
9855 static void
9856 do_t_ldrex (void)
9857 {
9858 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9859 || inst.operands[1].postind || inst.operands[1].writeback
9860 || inst.operands[1].immisreg || inst.operands[1].shifted
9861 || inst.operands[1].negative,
9862 BAD_ADDR_MODE);
9863
9864 inst.instruction |= inst.operands[0].reg << 12;
9865 inst.instruction |= inst.operands[1].reg << 16;
9866 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9867 }
9868
9869 static void
9870 do_t_ldrexd (void)
9871 {
9872 if (!inst.operands[1].present)
9873 {
9874 constraint (inst.operands[0].reg == REG_LR,
9875 _("r14 not allowed as first register "
9876 "when second register is omitted"));
9877 inst.operands[1].reg = inst.operands[0].reg + 1;
9878 }
9879 constraint (inst.operands[0].reg == inst.operands[1].reg,
9880 BAD_OVERLAP);
9881
9882 inst.instruction |= inst.operands[0].reg << 12;
9883 inst.instruction |= inst.operands[1].reg << 8;
9884 inst.instruction |= inst.operands[2].reg << 16;
9885 }
9886
9887 static void
9888 do_t_ldst (void)
9889 {
9890 unsigned long opcode;
9891 int Rn;
9892
9893 if (inst.operands[0].isreg
9894 && !inst.operands[0].preind
9895 && inst.operands[0].reg == REG_PC)
9896 set_it_insn_type_last ();
9897
9898 opcode = inst.instruction;
9899 if (unified_syntax)
9900 {
9901 if (!inst.operands[1].isreg)
9902 {
9903 if (opcode <= 0xffff)
9904 inst.instruction = THUMB_OP32 (opcode);
9905 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9906 return;
9907 }
9908 if (inst.operands[1].isreg
9909 && !inst.operands[1].writeback
9910 && !inst.operands[1].shifted && !inst.operands[1].postind
9911 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9912 && opcode <= 0xffff
9913 && inst.size_req != 4)
9914 {
9915 /* Insn may have a 16-bit form. */
9916 Rn = inst.operands[1].reg;
9917 if (inst.operands[1].immisreg)
9918 {
9919 inst.instruction = THUMB_OP16 (opcode);
9920 /* [Rn, Rik] */
9921 if (Rn <= 7 && inst.operands[1].imm <= 7)
9922 goto op16;
9923 }
9924 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9925 && opcode != T_MNEM_ldrsb)
9926 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9927 || (Rn == REG_SP && opcode == T_MNEM_str))
9928 {
9929 /* [Rn, #const] */
9930 if (Rn > 7)
9931 {
9932 if (Rn == REG_PC)
9933 {
9934 if (inst.reloc.pc_rel)
9935 opcode = T_MNEM_ldr_pc2;
9936 else
9937 opcode = T_MNEM_ldr_pc;
9938 }
9939 else
9940 {
9941 if (opcode == T_MNEM_ldr)
9942 opcode = T_MNEM_ldr_sp;
9943 else
9944 opcode = T_MNEM_str_sp;
9945 }
9946 inst.instruction = inst.operands[0].reg << 8;
9947 }
9948 else
9949 {
9950 inst.instruction = inst.operands[0].reg;
9951 inst.instruction |= inst.operands[1].reg << 3;
9952 }
9953 inst.instruction |= THUMB_OP16 (opcode);
9954 if (inst.size_req == 2)
9955 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9956 else
9957 inst.relax = opcode;
9958 return;
9959 }
9960 }
9961 /* Definitely a 32-bit variant. */
9962 inst.instruction = THUMB_OP32 (opcode);
9963 inst.instruction |= inst.operands[0].reg << 12;
9964 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9965 return;
9966 }
9967
9968 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9969
9970 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9971 {
9972 /* Only [Rn,Rm] is acceptable. */
9973 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9974 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9975 || inst.operands[1].postind || inst.operands[1].shifted
9976 || inst.operands[1].negative,
9977 _("Thumb does not support this addressing mode"));
9978 inst.instruction = THUMB_OP16 (inst.instruction);
9979 goto op16;
9980 }
9981
9982 inst.instruction = THUMB_OP16 (inst.instruction);
9983 if (!inst.operands[1].isreg)
9984 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9985 return;
9986
9987 constraint (!inst.operands[1].preind
9988 || inst.operands[1].shifted
9989 || inst.operands[1].writeback,
9990 _("Thumb does not support this addressing mode"));
9991 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9992 {
9993 constraint (inst.instruction & 0x0600,
9994 _("byte or halfword not valid for base register"));
9995 constraint (inst.operands[1].reg == REG_PC
9996 && !(inst.instruction & THUMB_LOAD_BIT),
9997 _("r15 based store not allowed"));
9998 constraint (inst.operands[1].immisreg,
9999 _("invalid base register for register offset"));
10000
10001 if (inst.operands[1].reg == REG_PC)
10002 inst.instruction = T_OPCODE_LDR_PC;
10003 else if (inst.instruction & THUMB_LOAD_BIT)
10004 inst.instruction = T_OPCODE_LDR_SP;
10005 else
10006 inst.instruction = T_OPCODE_STR_SP;
10007
10008 inst.instruction |= inst.operands[0].reg << 8;
10009 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10010 return;
10011 }
10012
10013 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10014 if (!inst.operands[1].immisreg)
10015 {
10016 /* Immediate offset. */
10017 inst.instruction |= inst.operands[0].reg;
10018 inst.instruction |= inst.operands[1].reg << 3;
10019 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10020 return;
10021 }
10022
10023 /* Register offset. */
10024 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10025 constraint (inst.operands[1].negative,
10026 _("Thumb does not support this addressing mode"));
10027
10028 op16:
10029 switch (inst.instruction)
10030 {
10031 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10032 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10033 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10034 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10035 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10036 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10037 case 0x5600 /* ldrsb */:
10038 case 0x5e00 /* ldrsh */: break;
10039 default: abort ();
10040 }
10041
10042 inst.instruction |= inst.operands[0].reg;
10043 inst.instruction |= inst.operands[1].reg << 3;
10044 inst.instruction |= inst.operands[1].imm << 6;
10045 }
10046
10047 static void
10048 do_t_ldstd (void)
10049 {
10050 if (!inst.operands[1].present)
10051 {
10052 inst.operands[1].reg = inst.operands[0].reg + 1;
10053 constraint (inst.operands[0].reg == REG_LR,
10054 _("r14 not allowed here"));
10055 }
10056 inst.instruction |= inst.operands[0].reg << 12;
10057 inst.instruction |= inst.operands[1].reg << 8;
10058 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10059 }
10060
10061 static void
10062 do_t_ldstt (void)
10063 {
10064 inst.instruction |= inst.operands[0].reg << 12;
10065 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10066 }
10067
10068 static void
10069 do_t_mla (void)
10070 {
10071 unsigned Rd, Rn, Rm, Ra;
10072
10073 Rd = inst.operands[0].reg;
10074 Rn = inst.operands[1].reg;
10075 Rm = inst.operands[2].reg;
10076 Ra = inst.operands[3].reg;
10077
10078 reject_bad_reg (Rd);
10079 reject_bad_reg (Rn);
10080 reject_bad_reg (Rm);
10081 reject_bad_reg (Ra);
10082
10083 inst.instruction |= Rd << 8;
10084 inst.instruction |= Rn << 16;
10085 inst.instruction |= Rm;
10086 inst.instruction |= Ra << 12;
10087 }
10088
10089 static void
10090 do_t_mlal (void)
10091 {
10092 unsigned RdLo, RdHi, Rn, Rm;
10093
10094 RdLo = inst.operands[0].reg;
10095 RdHi = inst.operands[1].reg;
10096 Rn = inst.operands[2].reg;
10097 Rm = inst.operands[3].reg;
10098
10099 reject_bad_reg (RdLo);
10100 reject_bad_reg (RdHi);
10101 reject_bad_reg (Rn);
10102 reject_bad_reg (Rm);
10103
10104 inst.instruction |= RdLo << 12;
10105 inst.instruction |= RdHi << 8;
10106 inst.instruction |= Rn << 16;
10107 inst.instruction |= Rm;
10108 }
10109
10110 static void
10111 do_t_mov_cmp (void)
10112 {
10113 unsigned Rn, Rm;
10114
10115 Rn = inst.operands[0].reg;
10116 Rm = inst.operands[1].reg;
10117
10118 if (Rn == REG_PC)
10119 set_it_insn_type_last ();
10120
10121 if (unified_syntax)
10122 {
10123 int r0off = (inst.instruction == T_MNEM_mov
10124 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10125 unsigned long opcode;
10126 bfd_boolean narrow;
10127 bfd_boolean low_regs;
10128
10129 low_regs = (Rn <= 7 && Rm <= 7);
10130 opcode = inst.instruction;
10131 if (in_it_block ())
10132 narrow = opcode != T_MNEM_movs;
10133 else
10134 narrow = opcode != T_MNEM_movs || low_regs;
10135 if (inst.size_req == 4
10136 || inst.operands[1].shifted)
10137 narrow = FALSE;
10138
10139 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10140 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10141 && !inst.operands[1].shifted
10142 && Rn == REG_PC
10143 && Rm == REG_LR)
10144 {
10145 inst.instruction = T2_SUBS_PC_LR;
10146 return;
10147 }
10148
10149 if (opcode == T_MNEM_cmp)
10150 {
10151 constraint (Rn == REG_PC, BAD_PC);
10152 if (narrow)
10153 {
10154 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10155 but valid. */
10156 warn_deprecated_sp (Rm);
10157 /* R15 was documented as a valid choice for Rm in ARMv6,
10158 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10159 tools reject R15, so we do too. */
10160 constraint (Rm == REG_PC, BAD_PC);
10161 }
10162 else
10163 reject_bad_reg (Rm);
10164 }
10165 else if (opcode == T_MNEM_mov
10166 || opcode == T_MNEM_movs)
10167 {
10168 if (inst.operands[1].isreg)
10169 {
10170 if (opcode == T_MNEM_movs)
10171 {
10172 reject_bad_reg (Rn);
10173 reject_bad_reg (Rm);
10174 }
10175 else if ((Rn == REG_SP || Rn == REG_PC)
10176 && (Rm == REG_SP || Rm == REG_PC))
10177 reject_bad_reg (Rm);
10178 }
10179 else
10180 reject_bad_reg (Rn);
10181 }
10182
10183 if (!inst.operands[1].isreg)
10184 {
10185 /* Immediate operand. */
10186 if (!in_it_block () && opcode == T_MNEM_mov)
10187 narrow = 0;
10188 if (low_regs && narrow)
10189 {
10190 inst.instruction = THUMB_OP16 (opcode);
10191 inst.instruction |= Rn << 8;
10192 if (inst.size_req == 2)
10193 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10194 else
10195 inst.relax = opcode;
10196 }
10197 else
10198 {
10199 inst.instruction = THUMB_OP32 (inst.instruction);
10200 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10201 inst.instruction |= Rn << r0off;
10202 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10203 }
10204 }
10205 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10206 && (inst.instruction == T_MNEM_mov
10207 || inst.instruction == T_MNEM_movs))
10208 {
10209 /* Register shifts are encoded as separate shift instructions. */
10210 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10211
10212 if (in_it_block ())
10213 narrow = !flags;
10214 else
10215 narrow = flags;
10216
10217 if (inst.size_req == 4)
10218 narrow = FALSE;
10219
10220 if (!low_regs || inst.operands[1].imm > 7)
10221 narrow = FALSE;
10222
10223 if (Rn != Rm)
10224 narrow = FALSE;
10225
10226 switch (inst.operands[1].shift_kind)
10227 {
10228 case SHIFT_LSL:
10229 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10230 break;
10231 case SHIFT_ASR:
10232 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10233 break;
10234 case SHIFT_LSR:
10235 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10236 break;
10237 case SHIFT_ROR:
10238 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10239 break;
10240 default:
10241 abort ();
10242 }
10243
10244 inst.instruction = opcode;
10245 if (narrow)
10246 {
10247 inst.instruction |= Rn;
10248 inst.instruction |= inst.operands[1].imm << 3;
10249 }
10250 else
10251 {
10252 if (flags)
10253 inst.instruction |= CONDS_BIT;
10254
10255 inst.instruction |= Rn << 8;
10256 inst.instruction |= Rm << 16;
10257 inst.instruction |= inst.operands[1].imm;
10258 }
10259 }
10260 else if (!narrow)
10261 {
10262 /* Some mov with immediate shift have narrow variants.
10263 Register shifts are handled above. */
10264 if (low_regs && inst.operands[1].shifted
10265 && (inst.instruction == T_MNEM_mov
10266 || inst.instruction == T_MNEM_movs))
10267 {
10268 if (in_it_block ())
10269 narrow = (inst.instruction == T_MNEM_mov);
10270 else
10271 narrow = (inst.instruction == T_MNEM_movs);
10272 }
10273
10274 if (narrow)
10275 {
10276 switch (inst.operands[1].shift_kind)
10277 {
10278 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10279 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10280 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10281 default: narrow = FALSE; break;
10282 }
10283 }
10284
10285 if (narrow)
10286 {
10287 inst.instruction |= Rn;
10288 inst.instruction |= Rm << 3;
10289 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10290 }
10291 else
10292 {
10293 inst.instruction = THUMB_OP32 (inst.instruction);
10294 inst.instruction |= Rn << r0off;
10295 encode_thumb32_shifted_operand (1);
10296 }
10297 }
10298 else
10299 switch (inst.instruction)
10300 {
10301 case T_MNEM_mov:
10302 inst.instruction = T_OPCODE_MOV_HR;
10303 inst.instruction |= (Rn & 0x8) << 4;
10304 inst.instruction |= (Rn & 0x7);
10305 inst.instruction |= Rm << 3;
10306 break;
10307
10308 case T_MNEM_movs:
10309 /* We know we have low registers at this point.
10310 Generate ADD Rd, Rs, #0. */
10311 inst.instruction = T_OPCODE_ADD_I3;
10312 inst.instruction |= Rn;
10313 inst.instruction |= Rm << 3;
10314 break;
10315
10316 case T_MNEM_cmp:
10317 if (low_regs)
10318 {
10319 inst.instruction = T_OPCODE_CMP_LR;
10320 inst.instruction |= Rn;
10321 inst.instruction |= Rm << 3;
10322 }
10323 else
10324 {
10325 inst.instruction = T_OPCODE_CMP_HR;
10326 inst.instruction |= (Rn & 0x8) << 4;
10327 inst.instruction |= (Rn & 0x7);
10328 inst.instruction |= Rm << 3;
10329 }
10330 break;
10331 }
10332 return;
10333 }
10334
10335 inst.instruction = THUMB_OP16 (inst.instruction);
10336
10337 /* PR 10443: Do not silently ignore shifted operands. */
10338 constraint (inst.operands[1].shifted,
10339 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10340
10341 if (inst.operands[1].isreg)
10342 {
10343 if (Rn < 8 && Rm < 8)
10344 {
10345 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10346 since a MOV instruction produces unpredictable results. */
10347 if (inst.instruction == T_OPCODE_MOV_I8)
10348 inst.instruction = T_OPCODE_ADD_I3;
10349 else
10350 inst.instruction = T_OPCODE_CMP_LR;
10351
10352 inst.instruction |= Rn;
10353 inst.instruction |= Rm << 3;
10354 }
10355 else
10356 {
10357 if (inst.instruction == T_OPCODE_MOV_I8)
10358 inst.instruction = T_OPCODE_MOV_HR;
10359 else
10360 inst.instruction = T_OPCODE_CMP_HR;
10361 do_t_cpy ();
10362 }
10363 }
10364 else
10365 {
10366 constraint (Rn > 7,
10367 _("only lo regs allowed with immediate"));
10368 inst.instruction |= Rn << 8;
10369 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10370 }
10371 }
10372
10373 static void
10374 do_t_mov16 (void)
10375 {
10376 unsigned Rd;
10377 bfd_vma imm;
10378 bfd_boolean top;
10379
10380 top = (inst.instruction & 0x00800000) != 0;
10381 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
10382 {
10383 constraint (top, _(":lower16: not allowed this instruction"));
10384 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
10385 }
10386 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
10387 {
10388 constraint (!top, _(":upper16: not allowed this instruction"));
10389 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
10390 }
10391
10392 Rd = inst.operands[0].reg;
10393 reject_bad_reg (Rd);
10394
10395 inst.instruction |= Rd << 8;
10396 if (inst.reloc.type == BFD_RELOC_UNUSED)
10397 {
10398 imm = inst.reloc.exp.X_add_number;
10399 inst.instruction |= (imm & 0xf000) << 4;
10400 inst.instruction |= (imm & 0x0800) << 15;
10401 inst.instruction |= (imm & 0x0700) << 4;
10402 inst.instruction |= (imm & 0x00ff);
10403 }
10404 }
10405
10406 static void
10407 do_t_mvn_tst (void)
10408 {
10409 unsigned Rn, Rm;
10410
10411 Rn = inst.operands[0].reg;
10412 Rm = inst.operands[1].reg;
10413
10414 if (inst.instruction == T_MNEM_cmp
10415 || inst.instruction == T_MNEM_cmn)
10416 constraint (Rn == REG_PC, BAD_PC);
10417 else
10418 reject_bad_reg (Rn);
10419 reject_bad_reg (Rm);
10420
10421 if (unified_syntax)
10422 {
10423 int r0off = (inst.instruction == T_MNEM_mvn
10424 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
10425 bfd_boolean narrow;
10426
10427 if (inst.size_req == 4
10428 || inst.instruction > 0xffff
10429 || inst.operands[1].shifted
10430 || Rn > 7 || Rm > 7)
10431 narrow = FALSE;
10432 else if (inst.instruction == T_MNEM_cmn)
10433 narrow = TRUE;
10434 else if (THUMB_SETS_FLAGS (inst.instruction))
10435 narrow = !in_it_block ();
10436 else
10437 narrow = in_it_block ();
10438
10439 if (!inst.operands[1].isreg)
10440 {
10441 /* For an immediate, we always generate a 32-bit opcode;
10442 section relaxation will shrink it later if possible. */
10443 if (inst.instruction < 0xffff)
10444 inst.instruction = THUMB_OP32 (inst.instruction);
10445 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10446 inst.instruction |= Rn << r0off;
10447 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10448 }
10449 else
10450 {
10451 /* See if we can do this with a 16-bit instruction. */
10452 if (narrow)
10453 {
10454 inst.instruction = THUMB_OP16 (inst.instruction);
10455 inst.instruction |= Rn;
10456 inst.instruction |= Rm << 3;
10457 }
10458 else
10459 {
10460 constraint (inst.operands[1].shifted
10461 && inst.operands[1].immisreg,
10462 _("shift must be constant"));
10463 if (inst.instruction < 0xffff)
10464 inst.instruction = THUMB_OP32 (inst.instruction);
10465 inst.instruction |= Rn << r0off;
10466 encode_thumb32_shifted_operand (1);
10467 }
10468 }
10469 }
10470 else
10471 {
10472 constraint (inst.instruction > 0xffff
10473 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
10474 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
10475 _("unshifted register required"));
10476 constraint (Rn > 7 || Rm > 7,
10477 BAD_HIREG);
10478
10479 inst.instruction = THUMB_OP16 (inst.instruction);
10480 inst.instruction |= Rn;
10481 inst.instruction |= Rm << 3;
10482 }
10483 }
10484
10485 static void
10486 do_t_mrs (void)
10487 {
10488 unsigned Rd;
10489 int flags;
10490
10491 if (do_vfp_nsyn_mrs () == SUCCESS)
10492 return;
10493
10494 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
10495 if (flags == 0)
10496 {
10497 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10498 _("selected processor does not support "
10499 "requested special purpose register"));
10500 }
10501 else
10502 {
10503 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10504 _("selected processor does not support "
10505 "requested special purpose register"));
10506 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10507 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
10508 _("'CPSR' or 'SPSR' expected"));
10509 }
10510
10511 Rd = inst.operands[0].reg;
10512 reject_bad_reg (Rd);
10513
10514 inst.instruction |= Rd << 8;
10515 inst.instruction |= (flags & SPSR_BIT) >> 2;
10516 inst.instruction |= inst.operands[1].imm & 0xff;
10517 }
10518
10519 static void
10520 do_t_msr (void)
10521 {
10522 int flags;
10523 unsigned Rn;
10524
10525 if (do_vfp_nsyn_msr () == SUCCESS)
10526 return;
10527
10528 constraint (!inst.operands[1].isreg,
10529 _("Thumb encoding does not support an immediate here"));
10530 flags = inst.operands[0].imm;
10531 if (flags & ~0xff)
10532 {
10533 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
10534 _("selected processor does not support "
10535 "requested special purpose register"));
10536 }
10537 else
10538 {
10539 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m),
10540 _("selected processor does not support "
10541 "requested special purpose register"));
10542 flags |= PSR_f;
10543 }
10544
10545 Rn = inst.operands[1].reg;
10546 reject_bad_reg (Rn);
10547
10548 inst.instruction |= (flags & SPSR_BIT) >> 2;
10549 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
10550 inst.instruction |= (flags & 0xff);
10551 inst.instruction |= Rn << 16;
10552 }
10553
10554 static void
10555 do_t_mul (void)
10556 {
10557 bfd_boolean narrow;
10558 unsigned Rd, Rn, Rm;
10559
10560 if (!inst.operands[2].present)
10561 inst.operands[2].reg = inst.operands[0].reg;
10562
10563 Rd = inst.operands[0].reg;
10564 Rn = inst.operands[1].reg;
10565 Rm = inst.operands[2].reg;
10566
10567 if (unified_syntax)
10568 {
10569 if (inst.size_req == 4
10570 || (Rd != Rn
10571 && Rd != Rm)
10572 || Rn > 7
10573 || Rm > 7)
10574 narrow = FALSE;
10575 else if (inst.instruction == T_MNEM_muls)
10576 narrow = !in_it_block ();
10577 else
10578 narrow = in_it_block ();
10579 }
10580 else
10581 {
10582 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
10583 constraint (Rn > 7 || Rm > 7,
10584 BAD_HIREG);
10585 narrow = TRUE;
10586 }
10587
10588 if (narrow)
10589 {
10590 /* 16-bit MULS/Conditional MUL. */
10591 inst.instruction = THUMB_OP16 (inst.instruction);
10592 inst.instruction |= Rd;
10593
10594 if (Rd == Rn)
10595 inst.instruction |= Rm << 3;
10596 else if (Rd == Rm)
10597 inst.instruction |= Rn << 3;
10598 else
10599 constraint (1, _("dest must overlap one source register"));
10600 }
10601 else
10602 {
10603 constraint (inst.instruction != T_MNEM_mul,
10604 _("Thumb-2 MUL must not set flags"));
10605 /* 32-bit MUL. */
10606 inst.instruction = THUMB_OP32 (inst.instruction);
10607 inst.instruction |= Rd << 8;
10608 inst.instruction |= Rn << 16;
10609 inst.instruction |= Rm << 0;
10610
10611 reject_bad_reg (Rd);
10612 reject_bad_reg (Rn);
10613 reject_bad_reg (Rm);
10614 }
10615 }
10616
10617 static void
10618 do_t_mull (void)
10619 {
10620 unsigned RdLo, RdHi, Rn, Rm;
10621
10622 RdLo = inst.operands[0].reg;
10623 RdHi = inst.operands[1].reg;
10624 Rn = inst.operands[2].reg;
10625 Rm = inst.operands[3].reg;
10626
10627 reject_bad_reg (RdLo);
10628 reject_bad_reg (RdHi);
10629 reject_bad_reg (Rn);
10630 reject_bad_reg (Rm);
10631
10632 inst.instruction |= RdLo << 12;
10633 inst.instruction |= RdHi << 8;
10634 inst.instruction |= Rn << 16;
10635 inst.instruction |= Rm;
10636
10637 if (RdLo == RdHi)
10638 as_tsktsk (_("rdhi and rdlo must be different"));
10639 }
10640
10641 static void
10642 do_t_nop (void)
10643 {
10644 set_it_insn_type (NEUTRAL_IT_INSN);
10645
10646 if (unified_syntax)
10647 {
10648 if (inst.size_req == 4 || inst.operands[0].imm > 15)
10649 {
10650 inst.instruction = THUMB_OP32 (inst.instruction);
10651 inst.instruction |= inst.operands[0].imm;
10652 }
10653 else
10654 {
10655 /* PR9722: Check for Thumb2 availability before
10656 generating a thumb2 nop instruction. */
10657 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
10658 {
10659 inst.instruction = THUMB_OP16 (inst.instruction);
10660 inst.instruction |= inst.operands[0].imm << 4;
10661 }
10662 else
10663 inst.instruction = 0x46c0;
10664 }
10665 }
10666 else
10667 {
10668 constraint (inst.operands[0].present,
10669 _("Thumb does not support NOP with hints"));
10670 inst.instruction = 0x46c0;
10671 }
10672 }
10673
10674 static void
10675 do_t_neg (void)
10676 {
10677 if (unified_syntax)
10678 {
10679 bfd_boolean narrow;
10680
10681 if (THUMB_SETS_FLAGS (inst.instruction))
10682 narrow = !in_it_block ();
10683 else
10684 narrow = in_it_block ();
10685 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10686 narrow = FALSE;
10687 if (inst.size_req == 4)
10688 narrow = FALSE;
10689
10690 if (!narrow)
10691 {
10692 inst.instruction = THUMB_OP32 (inst.instruction);
10693 inst.instruction |= inst.operands[0].reg << 8;
10694 inst.instruction |= inst.operands[1].reg << 16;
10695 }
10696 else
10697 {
10698 inst.instruction = THUMB_OP16 (inst.instruction);
10699 inst.instruction |= inst.operands[0].reg;
10700 inst.instruction |= inst.operands[1].reg << 3;
10701 }
10702 }
10703 else
10704 {
10705 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
10706 BAD_HIREG);
10707 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10708
10709 inst.instruction = THUMB_OP16 (inst.instruction);
10710 inst.instruction |= inst.operands[0].reg;
10711 inst.instruction |= inst.operands[1].reg << 3;
10712 }
10713 }
10714
10715 static void
10716 do_t_orn (void)
10717 {
10718 unsigned Rd, Rn;
10719
10720 Rd = inst.operands[0].reg;
10721 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
10722
10723 reject_bad_reg (Rd);
10724 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10725 reject_bad_reg (Rn);
10726
10727 inst.instruction |= Rd << 8;
10728 inst.instruction |= Rn << 16;
10729
10730 if (!inst.operands[2].isreg)
10731 {
10732 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10733 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10734 }
10735 else
10736 {
10737 unsigned Rm;
10738
10739 Rm = inst.operands[2].reg;
10740 reject_bad_reg (Rm);
10741
10742 constraint (inst.operands[2].shifted
10743 && inst.operands[2].immisreg,
10744 _("shift must be constant"));
10745 encode_thumb32_shifted_operand (2);
10746 }
10747 }
10748
10749 static void
10750 do_t_pkhbt (void)
10751 {
10752 unsigned Rd, Rn, Rm;
10753
10754 Rd = inst.operands[0].reg;
10755 Rn = inst.operands[1].reg;
10756 Rm = inst.operands[2].reg;
10757
10758 reject_bad_reg (Rd);
10759 reject_bad_reg (Rn);
10760 reject_bad_reg (Rm);
10761
10762 inst.instruction |= Rd << 8;
10763 inst.instruction |= Rn << 16;
10764 inst.instruction |= Rm;
10765 if (inst.operands[3].present)
10766 {
10767 unsigned int val = inst.reloc.exp.X_add_number;
10768 constraint (inst.reloc.exp.X_op != O_constant,
10769 _("expression too complex"));
10770 inst.instruction |= (val & 0x1c) << 10;
10771 inst.instruction |= (val & 0x03) << 6;
10772 }
10773 }
10774
10775 static void
10776 do_t_pkhtb (void)
10777 {
10778 if (!inst.operands[3].present)
10779 {
10780 unsigned Rtmp;
10781
10782 inst.instruction &= ~0x00000020;
10783
10784 /* PR 10168. Swap the Rm and Rn registers. */
10785 Rtmp = inst.operands[1].reg;
10786 inst.operands[1].reg = inst.operands[2].reg;
10787 inst.operands[2].reg = Rtmp;
10788 }
10789 do_t_pkhbt ();
10790 }
10791
10792 static void
10793 do_t_pld (void)
10794 {
10795 if (inst.operands[0].immisreg)
10796 reject_bad_reg (inst.operands[0].imm);
10797
10798 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
10799 }
10800
10801 static void
10802 do_t_push_pop (void)
10803 {
10804 unsigned mask;
10805
10806 constraint (inst.operands[0].writeback,
10807 _("push/pop do not support {reglist}^"));
10808 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10809 _("expression too complex"));
10810
10811 mask = inst.operands[0].imm;
10812 if ((mask & ~0xff) == 0)
10813 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
10814 else if ((inst.instruction == T_MNEM_push
10815 && (mask & ~0xff) == 1 << REG_LR)
10816 || (inst.instruction == T_MNEM_pop
10817 && (mask & ~0xff) == 1 << REG_PC))
10818 {
10819 inst.instruction = THUMB_OP16 (inst.instruction);
10820 inst.instruction |= THUMB_PP_PC_LR;
10821 inst.instruction |= mask & 0xff;
10822 }
10823 else if (unified_syntax)
10824 {
10825 inst.instruction = THUMB_OP32 (inst.instruction);
10826 encode_thumb2_ldmstm (13, mask, TRUE);
10827 }
10828 else
10829 {
10830 inst.error = _("invalid register list to push/pop instruction");
10831 return;
10832 }
10833 }
10834
10835 static void
10836 do_t_rbit (void)
10837 {
10838 unsigned Rd, Rm;
10839
10840 Rd = inst.operands[0].reg;
10841 Rm = inst.operands[1].reg;
10842
10843 reject_bad_reg (Rd);
10844 reject_bad_reg (Rm);
10845
10846 inst.instruction |= Rd << 8;
10847 inst.instruction |= Rm << 16;
10848 inst.instruction |= Rm;
10849 }
10850
10851 static void
10852 do_t_rev (void)
10853 {
10854 unsigned Rd, Rm;
10855
10856 Rd = inst.operands[0].reg;
10857 Rm = inst.operands[1].reg;
10858
10859 reject_bad_reg (Rd);
10860 reject_bad_reg (Rm);
10861
10862 if (Rd <= 7 && Rm <= 7
10863 && inst.size_req != 4)
10864 {
10865 inst.instruction = THUMB_OP16 (inst.instruction);
10866 inst.instruction |= Rd;
10867 inst.instruction |= Rm << 3;
10868 }
10869 else if (unified_syntax)
10870 {
10871 inst.instruction = THUMB_OP32 (inst.instruction);
10872 inst.instruction |= Rd << 8;
10873 inst.instruction |= Rm << 16;
10874 inst.instruction |= Rm;
10875 }
10876 else
10877 inst.error = BAD_HIREG;
10878 }
10879
10880 static void
10881 do_t_rrx (void)
10882 {
10883 unsigned Rd, Rm;
10884
10885 Rd = inst.operands[0].reg;
10886 Rm = inst.operands[1].reg;
10887
10888 reject_bad_reg (Rd);
10889 reject_bad_reg (Rm);
10890
10891 inst.instruction |= Rd << 8;
10892 inst.instruction |= Rm;
10893 }
10894
10895 static void
10896 do_t_rsb (void)
10897 {
10898 unsigned Rd, Rs;
10899
10900 Rd = inst.operands[0].reg;
10901 Rs = (inst.operands[1].present
10902 ? inst.operands[1].reg /* Rd, Rs, foo */
10903 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10904
10905 reject_bad_reg (Rd);
10906 reject_bad_reg (Rs);
10907 if (inst.operands[2].isreg)
10908 reject_bad_reg (inst.operands[2].reg);
10909
10910 inst.instruction |= Rd << 8;
10911 inst.instruction |= Rs << 16;
10912 if (!inst.operands[2].isreg)
10913 {
10914 bfd_boolean narrow;
10915
10916 if ((inst.instruction & 0x00100000) != 0)
10917 narrow = !in_it_block ();
10918 else
10919 narrow = in_it_block ();
10920
10921 if (Rd > 7 || Rs > 7)
10922 narrow = FALSE;
10923
10924 if (inst.size_req == 4 || !unified_syntax)
10925 narrow = FALSE;
10926
10927 if (inst.reloc.exp.X_op != O_constant
10928 || inst.reloc.exp.X_add_number != 0)
10929 narrow = FALSE;
10930
10931 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10932 relaxation, but it doesn't seem worth the hassle. */
10933 if (narrow)
10934 {
10935 inst.reloc.type = BFD_RELOC_UNUSED;
10936 inst.instruction = THUMB_OP16 (T_MNEM_negs);
10937 inst.instruction |= Rs << 3;
10938 inst.instruction |= Rd;
10939 }
10940 else
10941 {
10942 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10943 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10944 }
10945 }
10946 else
10947 encode_thumb32_shifted_operand (2);
10948 }
10949
10950 static void
10951 do_t_setend (void)
10952 {
10953 set_it_insn_type (OUTSIDE_IT_INSN);
10954 if (inst.operands[0].imm)
10955 inst.instruction |= 0x8;
10956 }
10957
10958 static void
10959 do_t_shift (void)
10960 {
10961 if (!inst.operands[1].present)
10962 inst.operands[1].reg = inst.operands[0].reg;
10963
10964 if (unified_syntax)
10965 {
10966 bfd_boolean narrow;
10967 int shift_kind;
10968
10969 switch (inst.instruction)
10970 {
10971 case T_MNEM_asr:
10972 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
10973 case T_MNEM_lsl:
10974 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
10975 case T_MNEM_lsr:
10976 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
10977 case T_MNEM_ror:
10978 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
10979 default: abort ();
10980 }
10981
10982 if (THUMB_SETS_FLAGS (inst.instruction))
10983 narrow = !in_it_block ();
10984 else
10985 narrow = in_it_block ();
10986 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
10987 narrow = FALSE;
10988 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
10989 narrow = FALSE;
10990 if (inst.operands[2].isreg
10991 && (inst.operands[1].reg != inst.operands[0].reg
10992 || inst.operands[2].reg > 7))
10993 narrow = FALSE;
10994 if (inst.size_req == 4)
10995 narrow = FALSE;
10996
10997 reject_bad_reg (inst.operands[0].reg);
10998 reject_bad_reg (inst.operands[1].reg);
10999
11000 if (!narrow)
11001 {
11002 if (inst.operands[2].isreg)
11003 {
11004 reject_bad_reg (inst.operands[2].reg);
11005 inst.instruction = THUMB_OP32 (inst.instruction);
11006 inst.instruction |= inst.operands[0].reg << 8;
11007 inst.instruction |= inst.operands[1].reg << 16;
11008 inst.instruction |= inst.operands[2].reg;
11009 }
11010 else
11011 {
11012 inst.operands[1].shifted = 1;
11013 inst.operands[1].shift_kind = shift_kind;
11014 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11015 ? T_MNEM_movs : T_MNEM_mov);
11016 inst.instruction |= inst.operands[0].reg << 8;
11017 encode_thumb32_shifted_operand (1);
11018 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11019 inst.reloc.type = BFD_RELOC_UNUSED;
11020 }
11021 }
11022 else
11023 {
11024 if (inst.operands[2].isreg)
11025 {
11026 switch (shift_kind)
11027 {
11028 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11029 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11030 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11031 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11032 default: abort ();
11033 }
11034
11035 inst.instruction |= inst.operands[0].reg;
11036 inst.instruction |= inst.operands[2].reg << 3;
11037 }
11038 else
11039 {
11040 switch (shift_kind)
11041 {
11042 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11043 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11044 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11045 default: abort ();
11046 }
11047 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11048 inst.instruction |= inst.operands[0].reg;
11049 inst.instruction |= inst.operands[1].reg << 3;
11050 }
11051 }
11052 }
11053 else
11054 {
11055 constraint (inst.operands[0].reg > 7
11056 || inst.operands[1].reg > 7, BAD_HIREG);
11057 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11058
11059 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11060 {
11061 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11062 constraint (inst.operands[0].reg != inst.operands[1].reg,
11063 _("source1 and dest must be same register"));
11064
11065 switch (inst.instruction)
11066 {
11067 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11068 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11069 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11070 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11071 default: abort ();
11072 }
11073
11074 inst.instruction |= inst.operands[0].reg;
11075 inst.instruction |= inst.operands[2].reg << 3;
11076 }
11077 else
11078 {
11079 switch (inst.instruction)
11080 {
11081 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11082 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11083 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11084 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11085 default: abort ();
11086 }
11087 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11088 inst.instruction |= inst.operands[0].reg;
11089 inst.instruction |= inst.operands[1].reg << 3;
11090 }
11091 }
11092 }
11093
11094 static void
11095 do_t_simd (void)
11096 {
11097 unsigned Rd, Rn, Rm;
11098
11099 Rd = inst.operands[0].reg;
11100 Rn = inst.operands[1].reg;
11101 Rm = inst.operands[2].reg;
11102
11103 reject_bad_reg (Rd);
11104 reject_bad_reg (Rn);
11105 reject_bad_reg (Rm);
11106
11107 inst.instruction |= Rd << 8;
11108 inst.instruction |= Rn << 16;
11109 inst.instruction |= Rm;
11110 }
11111
11112 static void
11113 do_t_simd2 (void)
11114 {
11115 unsigned Rd, Rn, Rm;
11116
11117 Rd = inst.operands[0].reg;
11118 Rm = inst.operands[1].reg;
11119 Rn = inst.operands[2].reg;
11120
11121 reject_bad_reg (Rd);
11122 reject_bad_reg (Rn);
11123 reject_bad_reg (Rm);
11124
11125 inst.instruction |= Rd << 8;
11126 inst.instruction |= Rn << 16;
11127 inst.instruction |= Rm;
11128 }
11129
11130 static void
11131 do_t_smc (void)
11132 {
11133 unsigned int value = inst.reloc.exp.X_add_number;
11134 constraint (inst.reloc.exp.X_op != O_constant,
11135 _("expression too complex"));
11136 inst.reloc.type = BFD_RELOC_UNUSED;
11137 inst.instruction |= (value & 0xf000) >> 12;
11138 inst.instruction |= (value & 0x0ff0);
11139 inst.instruction |= (value & 0x000f) << 16;
11140 }
11141
11142 static void
11143 do_t_ssat_usat (int bias)
11144 {
11145 unsigned Rd, Rn;
11146
11147 Rd = inst.operands[0].reg;
11148 Rn = inst.operands[2].reg;
11149
11150 reject_bad_reg (Rd);
11151 reject_bad_reg (Rn);
11152
11153 inst.instruction |= Rd << 8;
11154 inst.instruction |= inst.operands[1].imm - bias;
11155 inst.instruction |= Rn << 16;
11156
11157 if (inst.operands[3].present)
11158 {
11159 offsetT shift_amount = inst.reloc.exp.X_add_number;
11160
11161 inst.reloc.type = BFD_RELOC_UNUSED;
11162
11163 constraint (inst.reloc.exp.X_op != O_constant,
11164 _("expression too complex"));
11165
11166 if (shift_amount != 0)
11167 {
11168 constraint (shift_amount > 31,
11169 _("shift expression is too large"));
11170
11171 if (inst.operands[3].shift_kind == SHIFT_ASR)
11172 inst.instruction |= 0x00200000; /* sh bit. */
11173
11174 inst.instruction |= (shift_amount & 0x1c) << 10;
11175 inst.instruction |= (shift_amount & 0x03) << 6;
11176 }
11177 }
11178 }
11179
11180 static void
11181 do_t_ssat (void)
11182 {
11183 do_t_ssat_usat (1);
11184 }
11185
11186 static void
11187 do_t_ssat16 (void)
11188 {
11189 unsigned Rd, Rn;
11190
11191 Rd = inst.operands[0].reg;
11192 Rn = inst.operands[2].reg;
11193
11194 reject_bad_reg (Rd);
11195 reject_bad_reg (Rn);
11196
11197 inst.instruction |= Rd << 8;
11198 inst.instruction |= inst.operands[1].imm - 1;
11199 inst.instruction |= Rn << 16;
11200 }
11201
11202 static void
11203 do_t_strex (void)
11204 {
11205 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11206 || inst.operands[2].postind || inst.operands[2].writeback
11207 || inst.operands[2].immisreg || inst.operands[2].shifted
11208 || inst.operands[2].negative,
11209 BAD_ADDR_MODE);
11210
11211 inst.instruction |= inst.operands[0].reg << 8;
11212 inst.instruction |= inst.operands[1].reg << 12;
11213 inst.instruction |= inst.operands[2].reg << 16;
11214 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11215 }
11216
11217 static void
11218 do_t_strexd (void)
11219 {
11220 if (!inst.operands[2].present)
11221 inst.operands[2].reg = inst.operands[1].reg + 1;
11222
11223 constraint (inst.operands[0].reg == inst.operands[1].reg
11224 || inst.operands[0].reg == inst.operands[2].reg
11225 || inst.operands[0].reg == inst.operands[3].reg
11226 || inst.operands[1].reg == inst.operands[2].reg,
11227 BAD_OVERLAP);
11228
11229 inst.instruction |= inst.operands[0].reg;
11230 inst.instruction |= inst.operands[1].reg << 12;
11231 inst.instruction |= inst.operands[2].reg << 8;
11232 inst.instruction |= inst.operands[3].reg << 16;
11233 }
11234
11235 static void
11236 do_t_sxtah (void)
11237 {
11238 unsigned Rd, Rn, Rm;
11239
11240 Rd = inst.operands[0].reg;
11241 Rn = inst.operands[1].reg;
11242 Rm = inst.operands[2].reg;
11243
11244 reject_bad_reg (Rd);
11245 reject_bad_reg (Rn);
11246 reject_bad_reg (Rm);
11247
11248 inst.instruction |= Rd << 8;
11249 inst.instruction |= Rn << 16;
11250 inst.instruction |= Rm;
11251 inst.instruction |= inst.operands[3].imm << 4;
11252 }
11253
11254 static void
11255 do_t_sxth (void)
11256 {
11257 unsigned Rd, Rm;
11258
11259 Rd = inst.operands[0].reg;
11260 Rm = inst.operands[1].reg;
11261
11262 reject_bad_reg (Rd);
11263 reject_bad_reg (Rm);
11264
11265 if (inst.instruction <= 0xffff
11266 && inst.size_req != 4
11267 && Rd <= 7 && Rm <= 7
11268 && (!inst.operands[2].present || inst.operands[2].imm == 0))
11269 {
11270 inst.instruction = THUMB_OP16 (inst.instruction);
11271 inst.instruction |= Rd;
11272 inst.instruction |= Rm << 3;
11273 }
11274 else if (unified_syntax)
11275 {
11276 if (inst.instruction <= 0xffff)
11277 inst.instruction = THUMB_OP32 (inst.instruction);
11278 inst.instruction |= Rd << 8;
11279 inst.instruction |= Rm;
11280 inst.instruction |= inst.operands[2].imm << 4;
11281 }
11282 else
11283 {
11284 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
11285 _("Thumb encoding does not support rotation"));
11286 constraint (1, BAD_HIREG);
11287 }
11288 }
11289
11290 static void
11291 do_t_swi (void)
11292 {
11293 inst.reloc.type = BFD_RELOC_ARM_SWI;
11294 }
11295
11296 static void
11297 do_t_tb (void)
11298 {
11299 unsigned Rn, Rm;
11300 int half;
11301
11302 half = (inst.instruction & 0x10) != 0;
11303 set_it_insn_type_last ();
11304 constraint (inst.operands[0].immisreg,
11305 _("instruction requires register index"));
11306
11307 Rn = inst.operands[0].reg;
11308 Rm = inst.operands[0].imm;
11309
11310 constraint (Rn == REG_SP, BAD_SP);
11311 reject_bad_reg (Rm);
11312
11313 constraint (!half && inst.operands[0].shifted,
11314 _("instruction does not allow shifted index"));
11315 inst.instruction |= (Rn << 16) | Rm;
11316 }
11317
11318 static void
11319 do_t_usat (void)
11320 {
11321 do_t_ssat_usat (0);
11322 }
11323
11324 static void
11325 do_t_usat16 (void)
11326 {
11327 unsigned Rd, Rn;
11328
11329 Rd = inst.operands[0].reg;
11330 Rn = inst.operands[2].reg;
11331
11332 reject_bad_reg (Rd);
11333 reject_bad_reg (Rn);
11334
11335 inst.instruction |= Rd << 8;
11336 inst.instruction |= inst.operands[1].imm;
11337 inst.instruction |= Rn << 16;
11338 }
11339
11340 /* Neon instruction encoder helpers. */
11341
11342 /* Encodings for the different types for various Neon opcodes. */
11343
11344 /* An "invalid" code for the following tables. */
11345 #define N_INV -1u
11346
11347 struct neon_tab_entry
11348 {
11349 unsigned integer;
11350 unsigned float_or_poly;
11351 unsigned scalar_or_imm;
11352 };
11353
11354 /* Map overloaded Neon opcodes to their respective encodings. */
11355 #define NEON_ENC_TAB \
11356 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11357 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11358 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11359 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11360 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11361 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11362 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11363 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11364 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11365 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11366 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11367 /* Register variants of the following two instructions are encoded as
11368 vcge / vcgt with the operands reversed. */ \
11369 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11370 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11371 X(vfma, N_INV, 0x0000c10, N_INV), \
11372 X(vfms, N_INV, 0x0200c10, N_INV), \
11373 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11374 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11375 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11376 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11377 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11378 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11379 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11380 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11381 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11382 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11383 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11384 X(vshl, 0x0000400, N_INV, 0x0800510), \
11385 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11386 X(vand, 0x0000110, N_INV, 0x0800030), \
11387 X(vbic, 0x0100110, N_INV, 0x0800030), \
11388 X(veor, 0x1000110, N_INV, N_INV), \
11389 X(vorn, 0x0300110, N_INV, 0x0800010), \
11390 X(vorr, 0x0200110, N_INV, 0x0800010), \
11391 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11392 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11393 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11394 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11395 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11396 X(vst1, 0x0000000, 0x0800000, N_INV), \
11397 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11398 X(vst2, 0x0000100, 0x0800100, N_INV), \
11399 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11400 X(vst3, 0x0000200, 0x0800200, N_INV), \
11401 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11402 X(vst4, 0x0000300, 0x0800300, N_INV), \
11403 X(vmovn, 0x1b20200, N_INV, N_INV), \
11404 X(vtrn, 0x1b20080, N_INV, N_INV), \
11405 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11406 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11407 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11408 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11409 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11410 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11411 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11412 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11413 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11414 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11415 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11416
11417 enum neon_opc
11418 {
11419 #define X(OPC,I,F,S) N_MNEM_##OPC
11420 NEON_ENC_TAB
11421 #undef X
11422 };
11423
11424 static const struct neon_tab_entry neon_enc_tab[] =
11425 {
11426 #define X(OPC,I,F,S) { (I), (F), (S) }
11427 NEON_ENC_TAB
11428 #undef X
11429 };
11430
11431 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11432 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11433 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11434 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11435 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11436 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11437 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11438 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11439 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11440 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11441 #define NEON_ENC_SINGLE_(X) \
11442 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11443 #define NEON_ENC_DOUBLE_(X) \
11444 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11445
11446 #define NEON_ENCODE(type, inst) \
11447 do \
11448 { \
11449 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11450 inst.is_neon = 1; \
11451 } \
11452 while (0)
11453
11454 #define check_neon_suffixes \
11455 do \
11456 { \
11457 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11458 { \
11459 as_bad (_("invalid neon suffix for non neon instruction")); \
11460 return; \
11461 } \
11462 } \
11463 while (0)
11464
11465 /* Define shapes for instruction operands. The following mnemonic characters
11466 are used in this table:
11467
11468 F - VFP S<n> register
11469 D - Neon D<n> register
11470 Q - Neon Q<n> register
11471 I - Immediate
11472 S - Scalar
11473 R - ARM register
11474 L - D<n> register list
11475
11476 This table is used to generate various data:
11477 - enumerations of the form NS_DDR to be used as arguments to
11478 neon_select_shape.
11479 - a table classifying shapes into single, double, quad, mixed.
11480 - a table used to drive neon_select_shape. */
11481
11482 #define NEON_SHAPE_DEF \
11483 X(3, (D, D, D), DOUBLE), \
11484 X(3, (Q, Q, Q), QUAD), \
11485 X(3, (D, D, I), DOUBLE), \
11486 X(3, (Q, Q, I), QUAD), \
11487 X(3, (D, D, S), DOUBLE), \
11488 X(3, (Q, Q, S), QUAD), \
11489 X(2, (D, D), DOUBLE), \
11490 X(2, (Q, Q), QUAD), \
11491 X(2, (D, S), DOUBLE), \
11492 X(2, (Q, S), QUAD), \
11493 X(2, (D, R), DOUBLE), \
11494 X(2, (Q, R), QUAD), \
11495 X(2, (D, I), DOUBLE), \
11496 X(2, (Q, I), QUAD), \
11497 X(3, (D, L, D), DOUBLE), \
11498 X(2, (D, Q), MIXED), \
11499 X(2, (Q, D), MIXED), \
11500 X(3, (D, Q, I), MIXED), \
11501 X(3, (Q, D, I), MIXED), \
11502 X(3, (Q, D, D), MIXED), \
11503 X(3, (D, Q, Q), MIXED), \
11504 X(3, (Q, Q, D), MIXED), \
11505 X(3, (Q, D, S), MIXED), \
11506 X(3, (D, Q, S), MIXED), \
11507 X(4, (D, D, D, I), DOUBLE), \
11508 X(4, (Q, Q, Q, I), QUAD), \
11509 X(2, (F, F), SINGLE), \
11510 X(3, (F, F, F), SINGLE), \
11511 X(2, (F, I), SINGLE), \
11512 X(2, (F, D), MIXED), \
11513 X(2, (D, F), MIXED), \
11514 X(3, (F, F, I), MIXED), \
11515 X(4, (R, R, F, F), SINGLE), \
11516 X(4, (F, F, R, R), SINGLE), \
11517 X(3, (D, R, R), DOUBLE), \
11518 X(3, (R, R, D), DOUBLE), \
11519 X(2, (S, R), SINGLE), \
11520 X(2, (R, S), SINGLE), \
11521 X(2, (F, R), SINGLE), \
11522 X(2, (R, F), SINGLE)
11523
11524 #define S2(A,B) NS_##A##B
11525 #define S3(A,B,C) NS_##A##B##C
11526 #define S4(A,B,C,D) NS_##A##B##C##D
11527
11528 #define X(N, L, C) S##N L
11529
11530 enum neon_shape
11531 {
11532 NEON_SHAPE_DEF,
11533 NS_NULL
11534 };
11535
11536 #undef X
11537 #undef S2
11538 #undef S3
11539 #undef S4
11540
11541 enum neon_shape_class
11542 {
11543 SC_SINGLE,
11544 SC_DOUBLE,
11545 SC_QUAD,
11546 SC_MIXED
11547 };
11548
11549 #define X(N, L, C) SC_##C
11550
11551 static enum neon_shape_class neon_shape_class[] =
11552 {
11553 NEON_SHAPE_DEF
11554 };
11555
11556 #undef X
11557
11558 enum neon_shape_el
11559 {
11560 SE_F,
11561 SE_D,
11562 SE_Q,
11563 SE_I,
11564 SE_S,
11565 SE_R,
11566 SE_L
11567 };
11568
11569 /* Register widths of above. */
11570 static unsigned neon_shape_el_size[] =
11571 {
11572 32,
11573 64,
11574 128,
11575 0,
11576 32,
11577 32,
11578 0
11579 };
11580
11581 struct neon_shape_info
11582 {
11583 unsigned els;
11584 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
11585 };
11586
11587 #define S2(A,B) { SE_##A, SE_##B }
11588 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11589 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11590
11591 #define X(N, L, C) { N, S##N L }
11592
11593 static struct neon_shape_info neon_shape_tab[] =
11594 {
11595 NEON_SHAPE_DEF
11596 };
11597
11598 #undef X
11599 #undef S2
11600 #undef S3
11601 #undef S4
11602
11603 /* Bit masks used in type checking given instructions.
11604 'N_EQK' means the type must be the same as (or based on in some way) the key
11605 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11606 set, various other bits can be set as well in order to modify the meaning of
11607 the type constraint. */
11608
11609 enum neon_type_mask
11610 {
11611 N_S8 = 0x0000001,
11612 N_S16 = 0x0000002,
11613 N_S32 = 0x0000004,
11614 N_S64 = 0x0000008,
11615 N_U8 = 0x0000010,
11616 N_U16 = 0x0000020,
11617 N_U32 = 0x0000040,
11618 N_U64 = 0x0000080,
11619 N_I8 = 0x0000100,
11620 N_I16 = 0x0000200,
11621 N_I32 = 0x0000400,
11622 N_I64 = 0x0000800,
11623 N_8 = 0x0001000,
11624 N_16 = 0x0002000,
11625 N_32 = 0x0004000,
11626 N_64 = 0x0008000,
11627 N_P8 = 0x0010000,
11628 N_P16 = 0x0020000,
11629 N_F16 = 0x0040000,
11630 N_F32 = 0x0080000,
11631 N_F64 = 0x0100000,
11632 N_KEY = 0x1000000, /* Key element (main type specifier). */
11633 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
11634 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
11635 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
11636 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
11637 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11638 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11639 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11640 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
11641 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11642 N_UTYP = 0,
11643 N_MAX_NONSPECIAL = N_F64
11644 };
11645
11646 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11647
11648 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11649 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11650 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11651 #define N_SUF_32 (N_SU_32 | N_F32)
11652 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11653 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11654
11655 /* Pass this as the first type argument to neon_check_type to ignore types
11656 altogether. */
11657 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11658
11659 /* Select a "shape" for the current instruction (describing register types or
11660 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11661 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11662 function of operand parsing, so this function doesn't need to be called.
11663 Shapes should be listed in order of decreasing length. */
11664
11665 static enum neon_shape
11666 neon_select_shape (enum neon_shape shape, ...)
11667 {
11668 va_list ap;
11669 enum neon_shape first_shape = shape;
11670
11671 /* Fix missing optional operands. FIXME: we don't know at this point how
11672 many arguments we should have, so this makes the assumption that we have
11673 > 1. This is true of all current Neon opcodes, I think, but may not be
11674 true in the future. */
11675 if (!inst.operands[1].present)
11676 inst.operands[1] = inst.operands[0];
11677
11678 va_start (ap, shape);
11679
11680 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
11681 {
11682 unsigned j;
11683 int matches = 1;
11684
11685 for (j = 0; j < neon_shape_tab[shape].els; j++)
11686 {
11687 if (!inst.operands[j].present)
11688 {
11689 matches = 0;
11690 break;
11691 }
11692
11693 switch (neon_shape_tab[shape].el[j])
11694 {
11695 case SE_F:
11696 if (!(inst.operands[j].isreg
11697 && inst.operands[j].isvec
11698 && inst.operands[j].issingle
11699 && !inst.operands[j].isquad))
11700 matches = 0;
11701 break;
11702
11703 case SE_D:
11704 if (!(inst.operands[j].isreg
11705 && inst.operands[j].isvec
11706 && !inst.operands[j].isquad
11707 && !inst.operands[j].issingle))
11708 matches = 0;
11709 break;
11710
11711 case SE_R:
11712 if (!(inst.operands[j].isreg
11713 && !inst.operands[j].isvec))
11714 matches = 0;
11715 break;
11716
11717 case SE_Q:
11718 if (!(inst.operands[j].isreg
11719 && inst.operands[j].isvec
11720 && inst.operands[j].isquad
11721 && !inst.operands[j].issingle))
11722 matches = 0;
11723 break;
11724
11725 case SE_I:
11726 if (!(!inst.operands[j].isreg
11727 && !inst.operands[j].isscalar))
11728 matches = 0;
11729 break;
11730
11731 case SE_S:
11732 if (!(!inst.operands[j].isreg
11733 && inst.operands[j].isscalar))
11734 matches = 0;
11735 break;
11736
11737 case SE_L:
11738 break;
11739 }
11740 }
11741 if (matches)
11742 break;
11743 }
11744
11745 va_end (ap);
11746
11747 if (shape == NS_NULL && first_shape != NS_NULL)
11748 first_error (_("invalid instruction shape"));
11749
11750 return shape;
11751 }
11752
11753 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11754 means the Q bit should be set). */
11755
11756 static int
11757 neon_quad (enum neon_shape shape)
11758 {
11759 return neon_shape_class[shape] == SC_QUAD;
11760 }
11761
11762 static void
11763 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
11764 unsigned *g_size)
11765 {
11766 /* Allow modification to be made to types which are constrained to be
11767 based on the key element, based on bits set alongside N_EQK. */
11768 if ((typebits & N_EQK) != 0)
11769 {
11770 if ((typebits & N_HLF) != 0)
11771 *g_size /= 2;
11772 else if ((typebits & N_DBL) != 0)
11773 *g_size *= 2;
11774 if ((typebits & N_SGN) != 0)
11775 *g_type = NT_signed;
11776 else if ((typebits & N_UNS) != 0)
11777 *g_type = NT_unsigned;
11778 else if ((typebits & N_INT) != 0)
11779 *g_type = NT_integer;
11780 else if ((typebits & N_FLT) != 0)
11781 *g_type = NT_float;
11782 else if ((typebits & N_SIZ) != 0)
11783 *g_type = NT_untyped;
11784 }
11785 }
11786
11787 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11788 operand type, i.e. the single type specified in a Neon instruction when it
11789 is the only one given. */
11790
11791 static struct neon_type_el
11792 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
11793 {
11794 struct neon_type_el dest = *key;
11795
11796 gas_assert ((thisarg & N_EQK) != 0);
11797
11798 neon_modify_type_size (thisarg, &dest.type, &dest.size);
11799
11800 return dest;
11801 }
11802
11803 /* Convert Neon type and size into compact bitmask representation. */
11804
11805 static enum neon_type_mask
11806 type_chk_of_el_type (enum neon_el_type type, unsigned size)
11807 {
11808 switch (type)
11809 {
11810 case NT_untyped:
11811 switch (size)
11812 {
11813 case 8: return N_8;
11814 case 16: return N_16;
11815 case 32: return N_32;
11816 case 64: return N_64;
11817 default: ;
11818 }
11819 break;
11820
11821 case NT_integer:
11822 switch (size)
11823 {
11824 case 8: return N_I8;
11825 case 16: return N_I16;
11826 case 32: return N_I32;
11827 case 64: return N_I64;
11828 default: ;
11829 }
11830 break;
11831
11832 case NT_float:
11833 switch (size)
11834 {
11835 case 16: return N_F16;
11836 case 32: return N_F32;
11837 case 64: return N_F64;
11838 default: ;
11839 }
11840 break;
11841
11842 case NT_poly:
11843 switch (size)
11844 {
11845 case 8: return N_P8;
11846 case 16: return N_P16;
11847 default: ;
11848 }
11849 break;
11850
11851 case NT_signed:
11852 switch (size)
11853 {
11854 case 8: return N_S8;
11855 case 16: return N_S16;
11856 case 32: return N_S32;
11857 case 64: return N_S64;
11858 default: ;
11859 }
11860 break;
11861
11862 case NT_unsigned:
11863 switch (size)
11864 {
11865 case 8: return N_U8;
11866 case 16: return N_U16;
11867 case 32: return N_U32;
11868 case 64: return N_U64;
11869 default: ;
11870 }
11871 break;
11872
11873 default: ;
11874 }
11875
11876 return N_UTYP;
11877 }
11878
11879 /* Convert compact Neon bitmask type representation to a type and size. Only
11880 handles the case where a single bit is set in the mask. */
11881
11882 static int
11883 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
11884 enum neon_type_mask mask)
11885 {
11886 if ((mask & N_EQK) != 0)
11887 return FAIL;
11888
11889 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
11890 *size = 8;
11891 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
11892 *size = 16;
11893 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
11894 *size = 32;
11895 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
11896 *size = 64;
11897 else
11898 return FAIL;
11899
11900 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
11901 *type = NT_signed;
11902 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
11903 *type = NT_unsigned;
11904 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
11905 *type = NT_integer;
11906 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
11907 *type = NT_untyped;
11908 else if ((mask & (N_P8 | N_P16)) != 0)
11909 *type = NT_poly;
11910 else if ((mask & (N_F32 | N_F64)) != 0)
11911 *type = NT_float;
11912 else
11913 return FAIL;
11914
11915 return SUCCESS;
11916 }
11917
11918 /* Modify a bitmask of allowed types. This is only needed for type
11919 relaxation. */
11920
11921 static unsigned
11922 modify_types_allowed (unsigned allowed, unsigned mods)
11923 {
11924 unsigned size;
11925 enum neon_el_type type;
11926 unsigned destmask;
11927 int i;
11928
11929 destmask = 0;
11930
11931 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
11932 {
11933 if (el_type_of_type_chk (&type, &size,
11934 (enum neon_type_mask) (allowed & i)) == SUCCESS)
11935 {
11936 neon_modify_type_size (mods, &type, &size);
11937 destmask |= type_chk_of_el_type (type, size);
11938 }
11939 }
11940
11941 return destmask;
11942 }
11943
11944 /* Check type and return type classification.
11945 The manual states (paraphrase): If one datatype is given, it indicates the
11946 type given in:
11947 - the second operand, if there is one
11948 - the operand, if there is no second operand
11949 - the result, if there are no operands.
11950 This isn't quite good enough though, so we use a concept of a "key" datatype
11951 which is set on a per-instruction basis, which is the one which matters when
11952 only one data type is written.
11953 Note: this function has side-effects (e.g. filling in missing operands). All
11954 Neon instructions should call it before performing bit encoding. */
11955
11956 static struct neon_type_el
11957 neon_check_type (unsigned els, enum neon_shape ns, ...)
11958 {
11959 va_list ap;
11960 unsigned i, pass, key_el = 0;
11961 unsigned types[NEON_MAX_TYPE_ELS];
11962 enum neon_el_type k_type = NT_invtype;
11963 unsigned k_size = -1u;
11964 struct neon_type_el badtype = {NT_invtype, -1};
11965 unsigned key_allowed = 0;
11966
11967 /* Optional registers in Neon instructions are always (not) in operand 1.
11968 Fill in the missing operand here, if it was omitted. */
11969 if (els > 1 && !inst.operands[1].present)
11970 inst.operands[1] = inst.operands[0];
11971
11972 /* Suck up all the varargs. */
11973 va_start (ap, ns);
11974 for (i = 0; i < els; i++)
11975 {
11976 unsigned thisarg = va_arg (ap, unsigned);
11977 if (thisarg == N_IGNORE_TYPE)
11978 {
11979 va_end (ap);
11980 return badtype;
11981 }
11982 types[i] = thisarg;
11983 if ((thisarg & N_KEY) != 0)
11984 key_el = i;
11985 }
11986 va_end (ap);
11987
11988 if (inst.vectype.elems > 0)
11989 for (i = 0; i < els; i++)
11990 if (inst.operands[i].vectype.type != NT_invtype)
11991 {
11992 first_error (_("types specified in both the mnemonic and operands"));
11993 return badtype;
11994 }
11995
11996 /* Duplicate inst.vectype elements here as necessary.
11997 FIXME: No idea if this is exactly the same as the ARM assembler,
11998 particularly when an insn takes one register and one non-register
11999 operand. */
12000 if (inst.vectype.elems == 1 && els > 1)
12001 {
12002 unsigned j;
12003 inst.vectype.elems = els;
12004 inst.vectype.el[key_el] = inst.vectype.el[0];
12005 for (j = 0; j < els; j++)
12006 if (j != key_el)
12007 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12008 types[j]);
12009 }
12010 else if (inst.vectype.elems == 0 && els > 0)
12011 {
12012 unsigned j;
12013 /* No types were given after the mnemonic, so look for types specified
12014 after each operand. We allow some flexibility here; as long as the
12015 "key" operand has a type, we can infer the others. */
12016 for (j = 0; j < els; j++)
12017 if (inst.operands[j].vectype.type != NT_invtype)
12018 inst.vectype.el[j] = inst.operands[j].vectype;
12019
12020 if (inst.operands[key_el].vectype.type != NT_invtype)
12021 {
12022 for (j = 0; j < els; j++)
12023 if (inst.operands[j].vectype.type == NT_invtype)
12024 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12025 types[j]);
12026 }
12027 else
12028 {
12029 first_error (_("operand types can't be inferred"));
12030 return badtype;
12031 }
12032 }
12033 else if (inst.vectype.elems != els)
12034 {
12035 first_error (_("type specifier has the wrong number of parts"));
12036 return badtype;
12037 }
12038
12039 for (pass = 0; pass < 2; pass++)
12040 {
12041 for (i = 0; i < els; i++)
12042 {
12043 unsigned thisarg = types[i];
12044 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12045 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12046 enum neon_el_type g_type = inst.vectype.el[i].type;
12047 unsigned g_size = inst.vectype.el[i].size;
12048
12049 /* Decay more-specific signed & unsigned types to sign-insensitive
12050 integer types if sign-specific variants are unavailable. */
12051 if ((g_type == NT_signed || g_type == NT_unsigned)
12052 && (types_allowed & N_SU_ALL) == 0)
12053 g_type = NT_integer;
12054
12055 /* If only untyped args are allowed, decay any more specific types to
12056 them. Some instructions only care about signs for some element
12057 sizes, so handle that properly. */
12058 if ((g_size == 8 && (types_allowed & N_8) != 0)
12059 || (g_size == 16 && (types_allowed & N_16) != 0)
12060 || (g_size == 32 && (types_allowed & N_32) != 0)
12061 || (g_size == 64 && (types_allowed & N_64) != 0))
12062 g_type = NT_untyped;
12063
12064 if (pass == 0)
12065 {
12066 if ((thisarg & N_KEY) != 0)
12067 {
12068 k_type = g_type;
12069 k_size = g_size;
12070 key_allowed = thisarg & ~N_KEY;
12071 }
12072 }
12073 else
12074 {
12075 if ((thisarg & N_VFP) != 0)
12076 {
12077 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
12078 unsigned regwidth = neon_shape_el_size[regshape], match;
12079
12080 /* In VFP mode, operands must match register widths. If we
12081 have a key operand, use its width, else use the width of
12082 the current operand. */
12083 if (k_size != -1u)
12084 match = k_size;
12085 else
12086 match = g_size;
12087
12088 if (regwidth != match)
12089 {
12090 first_error (_("operand size must match register width"));
12091 return badtype;
12092 }
12093 }
12094
12095 if ((thisarg & N_EQK) == 0)
12096 {
12097 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12098
12099 if ((given_type & types_allowed) == 0)
12100 {
12101 first_error (_("bad type in Neon instruction"));
12102 return badtype;
12103 }
12104 }
12105 else
12106 {
12107 enum neon_el_type mod_k_type = k_type;
12108 unsigned mod_k_size = k_size;
12109 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12110 if (g_type != mod_k_type || g_size != mod_k_size)
12111 {
12112 first_error (_("inconsistent types in Neon instruction"));
12113 return badtype;
12114 }
12115 }
12116 }
12117 }
12118 }
12119
12120 return inst.vectype.el[key_el];
12121 }
12122
12123 /* Neon-style VFP instruction forwarding. */
12124
12125 /* Thumb VFP instructions have 0xE in the condition field. */
12126
12127 static void
12128 do_vfp_cond_or_thumb (void)
12129 {
12130 inst.is_neon = 1;
12131
12132 if (thumb_mode)
12133 inst.instruction |= 0xe0000000;
12134 else
12135 inst.instruction |= inst.cond << 28;
12136 }
12137
12138 /* Look up and encode a simple mnemonic, for use as a helper function for the
12139 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12140 etc. It is assumed that operand parsing has already been done, and that the
12141 operands are in the form expected by the given opcode (this isn't necessarily
12142 the same as the form in which they were parsed, hence some massaging must
12143 take place before this function is called).
12144 Checks current arch version against that in the looked-up opcode. */
12145
12146 static void
12147 do_vfp_nsyn_opcode (const char *opname)
12148 {
12149 const struct asm_opcode *opcode;
12150
12151 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12152
12153 if (!opcode)
12154 abort ();
12155
12156 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12157 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12158 _(BAD_FPU));
12159
12160 inst.is_neon = 1;
12161
12162 if (thumb_mode)
12163 {
12164 inst.instruction = opcode->tvalue;
12165 opcode->tencode ();
12166 }
12167 else
12168 {
12169 inst.instruction = (inst.cond << 28) | opcode->avalue;
12170 opcode->aencode ();
12171 }
12172 }
12173
12174 static void
12175 do_vfp_nsyn_add_sub (enum neon_shape rs)
12176 {
12177 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12178
12179 if (rs == NS_FFF)
12180 {
12181 if (is_add)
12182 do_vfp_nsyn_opcode ("fadds");
12183 else
12184 do_vfp_nsyn_opcode ("fsubs");
12185 }
12186 else
12187 {
12188 if (is_add)
12189 do_vfp_nsyn_opcode ("faddd");
12190 else
12191 do_vfp_nsyn_opcode ("fsubd");
12192 }
12193 }
12194
12195 /* Check operand types to see if this is a VFP instruction, and if so call
12196 PFN (). */
12197
12198 static int
12199 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12200 {
12201 enum neon_shape rs;
12202 struct neon_type_el et;
12203
12204 switch (args)
12205 {
12206 case 2:
12207 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12208 et = neon_check_type (2, rs,
12209 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12210 break;
12211
12212 case 3:
12213 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12214 et = neon_check_type (3, rs,
12215 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12216 break;
12217
12218 default:
12219 abort ();
12220 }
12221
12222 if (et.type != NT_invtype)
12223 {
12224 pfn (rs);
12225 return SUCCESS;
12226 }
12227 else
12228 inst.error = NULL;
12229
12230 return FAIL;
12231 }
12232
12233 static void
12234 do_vfp_nsyn_mla_mls (enum neon_shape rs)
12235 {
12236 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
12237
12238 if (rs == NS_FFF)
12239 {
12240 if (is_mla)
12241 do_vfp_nsyn_opcode ("fmacs");
12242 else
12243 do_vfp_nsyn_opcode ("fnmacs");
12244 }
12245 else
12246 {
12247 if (is_mla)
12248 do_vfp_nsyn_opcode ("fmacd");
12249 else
12250 do_vfp_nsyn_opcode ("fnmacd");
12251 }
12252 }
12253
12254 static void
12255 do_vfp_nsyn_fma_fms (enum neon_shape rs)
12256 {
12257 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
12258
12259 if (rs == NS_FFF)
12260 {
12261 if (is_fma)
12262 do_vfp_nsyn_opcode ("ffmas");
12263 else
12264 do_vfp_nsyn_opcode ("ffnmas");
12265 }
12266 else
12267 {
12268 if (is_fma)
12269 do_vfp_nsyn_opcode ("ffmad");
12270 else
12271 do_vfp_nsyn_opcode ("ffnmad");
12272 }
12273 }
12274
12275 static void
12276 do_vfp_nsyn_mul (enum neon_shape rs)
12277 {
12278 if (rs == NS_FFF)
12279 do_vfp_nsyn_opcode ("fmuls");
12280 else
12281 do_vfp_nsyn_opcode ("fmuld");
12282 }
12283
12284 static void
12285 do_vfp_nsyn_abs_neg (enum neon_shape rs)
12286 {
12287 int is_neg = (inst.instruction & 0x80) != 0;
12288 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
12289
12290 if (rs == NS_FF)
12291 {
12292 if (is_neg)
12293 do_vfp_nsyn_opcode ("fnegs");
12294 else
12295 do_vfp_nsyn_opcode ("fabss");
12296 }
12297 else
12298 {
12299 if (is_neg)
12300 do_vfp_nsyn_opcode ("fnegd");
12301 else
12302 do_vfp_nsyn_opcode ("fabsd");
12303 }
12304 }
12305
12306 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12307 insns belong to Neon, and are handled elsewhere. */
12308
12309 static void
12310 do_vfp_nsyn_ldm_stm (int is_dbmode)
12311 {
12312 int is_ldm = (inst.instruction & (1 << 20)) != 0;
12313 if (is_ldm)
12314 {
12315 if (is_dbmode)
12316 do_vfp_nsyn_opcode ("fldmdbs");
12317 else
12318 do_vfp_nsyn_opcode ("fldmias");
12319 }
12320 else
12321 {
12322 if (is_dbmode)
12323 do_vfp_nsyn_opcode ("fstmdbs");
12324 else
12325 do_vfp_nsyn_opcode ("fstmias");
12326 }
12327 }
12328
12329 static void
12330 do_vfp_nsyn_sqrt (void)
12331 {
12332 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12333 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12334
12335 if (rs == NS_FF)
12336 do_vfp_nsyn_opcode ("fsqrts");
12337 else
12338 do_vfp_nsyn_opcode ("fsqrtd");
12339 }
12340
12341 static void
12342 do_vfp_nsyn_div (void)
12343 {
12344 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12345 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12346 N_F32 | N_F64 | N_KEY | N_VFP);
12347
12348 if (rs == NS_FFF)
12349 do_vfp_nsyn_opcode ("fdivs");
12350 else
12351 do_vfp_nsyn_opcode ("fdivd");
12352 }
12353
12354 static void
12355 do_vfp_nsyn_nmul (void)
12356 {
12357 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12358 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
12359 N_F32 | N_F64 | N_KEY | N_VFP);
12360
12361 if (rs == NS_FFF)
12362 {
12363 NEON_ENCODE (SINGLE, inst);
12364 do_vfp_sp_dyadic ();
12365 }
12366 else
12367 {
12368 NEON_ENCODE (DOUBLE, inst);
12369 do_vfp_dp_rd_rn_rm ();
12370 }
12371 do_vfp_cond_or_thumb ();
12372 }
12373
12374 static void
12375 do_vfp_nsyn_cmp (void)
12376 {
12377 if (inst.operands[1].isreg)
12378 {
12379 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12380 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12381
12382 if (rs == NS_FF)
12383 {
12384 NEON_ENCODE (SINGLE, inst);
12385 do_vfp_sp_monadic ();
12386 }
12387 else
12388 {
12389 NEON_ENCODE (DOUBLE, inst);
12390 do_vfp_dp_rd_rm ();
12391 }
12392 }
12393 else
12394 {
12395 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
12396 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
12397
12398 switch (inst.instruction & 0x0fffffff)
12399 {
12400 case N_MNEM_vcmp:
12401 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
12402 break;
12403 case N_MNEM_vcmpe:
12404 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
12405 break;
12406 default:
12407 abort ();
12408 }
12409
12410 if (rs == NS_FI)
12411 {
12412 NEON_ENCODE (SINGLE, inst);
12413 do_vfp_sp_compare_z ();
12414 }
12415 else
12416 {
12417 NEON_ENCODE (DOUBLE, inst);
12418 do_vfp_dp_rd ();
12419 }
12420 }
12421 do_vfp_cond_or_thumb ();
12422 }
12423
12424 static void
12425 nsyn_insert_sp (void)
12426 {
12427 inst.operands[1] = inst.operands[0];
12428 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
12429 inst.operands[0].reg = REG_SP;
12430 inst.operands[0].isreg = 1;
12431 inst.operands[0].writeback = 1;
12432 inst.operands[0].present = 1;
12433 }
12434
12435 static void
12436 do_vfp_nsyn_push (void)
12437 {
12438 nsyn_insert_sp ();
12439 if (inst.operands[1].issingle)
12440 do_vfp_nsyn_opcode ("fstmdbs");
12441 else
12442 do_vfp_nsyn_opcode ("fstmdbd");
12443 }
12444
12445 static void
12446 do_vfp_nsyn_pop (void)
12447 {
12448 nsyn_insert_sp ();
12449 if (inst.operands[1].issingle)
12450 do_vfp_nsyn_opcode ("fldmias");
12451 else
12452 do_vfp_nsyn_opcode ("fldmiad");
12453 }
12454
12455 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12456 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12457
12458 static void
12459 neon_dp_fixup (struct arm_it* insn)
12460 {
12461 unsigned int i = insn->instruction;
12462 insn->is_neon = 1;
12463
12464 if (thumb_mode)
12465 {
12466 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12467 if (i & (1 << 24))
12468 i |= 1 << 28;
12469
12470 i &= ~(1 << 24);
12471
12472 i |= 0xef000000;
12473 }
12474 else
12475 i |= 0xf2000000;
12476
12477 insn->instruction = i;
12478 }
12479
12480 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12481 (0, 1, 2, 3). */
12482
12483 static unsigned
12484 neon_logbits (unsigned x)
12485 {
12486 return ffs (x) - 4;
12487 }
12488
12489 #define LOW4(R) ((R) & 0xf)
12490 #define HI1(R) (((R) >> 4) & 1)
12491
12492 /* Encode insns with bit pattern:
12493
12494 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12495 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12496
12497 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12498 different meaning for some instruction. */
12499
12500 static void
12501 neon_three_same (int isquad, int ubit, int size)
12502 {
12503 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12504 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12505 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12506 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12507 inst.instruction |= LOW4 (inst.operands[2].reg);
12508 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12509 inst.instruction |= (isquad != 0) << 6;
12510 inst.instruction |= (ubit != 0) << 24;
12511 if (size != -1)
12512 inst.instruction |= neon_logbits (size) << 20;
12513
12514 neon_dp_fixup (&inst);
12515 }
12516
12517 /* Encode instructions of the form:
12518
12519 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12520 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12521
12522 Don't write size if SIZE == -1. */
12523
12524 static void
12525 neon_two_same (int qbit, int ubit, int size)
12526 {
12527 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12528 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12529 inst.instruction |= LOW4 (inst.operands[1].reg);
12530 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12531 inst.instruction |= (qbit != 0) << 6;
12532 inst.instruction |= (ubit != 0) << 24;
12533
12534 if (size != -1)
12535 inst.instruction |= neon_logbits (size) << 18;
12536
12537 neon_dp_fixup (&inst);
12538 }
12539
12540 /* Neon instruction encoders, in approximate order of appearance. */
12541
12542 static void
12543 do_neon_dyadic_i_su (void)
12544 {
12545 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12546 struct neon_type_el et = neon_check_type (3, rs,
12547 N_EQK, N_EQK, N_SU_32 | N_KEY);
12548 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12549 }
12550
12551 static void
12552 do_neon_dyadic_i64_su (void)
12553 {
12554 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12555 struct neon_type_el et = neon_check_type (3, rs,
12556 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12557 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12558 }
12559
12560 static void
12561 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
12562 unsigned immbits)
12563 {
12564 unsigned size = et.size >> 3;
12565 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12566 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12567 inst.instruction |= LOW4 (inst.operands[1].reg);
12568 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12569 inst.instruction |= (isquad != 0) << 6;
12570 inst.instruction |= immbits << 16;
12571 inst.instruction |= (size >> 3) << 7;
12572 inst.instruction |= (size & 0x7) << 19;
12573 if (write_ubit)
12574 inst.instruction |= (uval != 0) << 24;
12575
12576 neon_dp_fixup (&inst);
12577 }
12578
12579 static void
12580 do_neon_shl_imm (void)
12581 {
12582 if (!inst.operands[2].isreg)
12583 {
12584 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12585 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
12586 NEON_ENCODE (IMMED, inst);
12587 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
12588 }
12589 else
12590 {
12591 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12592 struct neon_type_el et = neon_check_type (3, rs,
12593 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12594 unsigned int tmp;
12595
12596 /* VSHL/VQSHL 3-register variants have syntax such as:
12597 vshl.xx Dd, Dm, Dn
12598 whereas other 3-register operations encoded by neon_three_same have
12599 syntax like:
12600 vadd.xx Dd, Dn, Dm
12601 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12602 here. */
12603 tmp = inst.operands[2].reg;
12604 inst.operands[2].reg = inst.operands[1].reg;
12605 inst.operands[1].reg = tmp;
12606 NEON_ENCODE (INTEGER, inst);
12607 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12608 }
12609 }
12610
12611 static void
12612 do_neon_qshl_imm (void)
12613 {
12614 if (!inst.operands[2].isreg)
12615 {
12616 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12617 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
12618
12619 NEON_ENCODE (IMMED, inst);
12620 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
12621 inst.operands[2].imm);
12622 }
12623 else
12624 {
12625 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12626 struct neon_type_el et = neon_check_type (3, rs,
12627 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
12628 unsigned int tmp;
12629
12630 /* See note in do_neon_shl_imm. */
12631 tmp = inst.operands[2].reg;
12632 inst.operands[2].reg = inst.operands[1].reg;
12633 inst.operands[1].reg = tmp;
12634 NEON_ENCODE (INTEGER, inst);
12635 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12636 }
12637 }
12638
12639 static void
12640 do_neon_rshl (void)
12641 {
12642 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12643 struct neon_type_el et = neon_check_type (3, rs,
12644 N_EQK, N_EQK, N_SU_ALL | N_KEY);
12645 unsigned int tmp;
12646
12647 tmp = inst.operands[2].reg;
12648 inst.operands[2].reg = inst.operands[1].reg;
12649 inst.operands[1].reg = tmp;
12650 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
12651 }
12652
12653 static int
12654 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
12655 {
12656 /* Handle .I8 pseudo-instructions. */
12657 if (size == 8)
12658 {
12659 /* Unfortunately, this will make everything apart from zero out-of-range.
12660 FIXME is this the intended semantics? There doesn't seem much point in
12661 accepting .I8 if so. */
12662 immediate |= immediate << 8;
12663 size = 16;
12664 }
12665
12666 if (size >= 32)
12667 {
12668 if (immediate == (immediate & 0x000000ff))
12669 {
12670 *immbits = immediate;
12671 return 0x1;
12672 }
12673 else if (immediate == (immediate & 0x0000ff00))
12674 {
12675 *immbits = immediate >> 8;
12676 return 0x3;
12677 }
12678 else if (immediate == (immediate & 0x00ff0000))
12679 {
12680 *immbits = immediate >> 16;
12681 return 0x5;
12682 }
12683 else if (immediate == (immediate & 0xff000000))
12684 {
12685 *immbits = immediate >> 24;
12686 return 0x7;
12687 }
12688 if ((immediate & 0xffff) != (immediate >> 16))
12689 goto bad_immediate;
12690 immediate &= 0xffff;
12691 }
12692
12693 if (immediate == (immediate & 0x000000ff))
12694 {
12695 *immbits = immediate;
12696 return 0x9;
12697 }
12698 else if (immediate == (immediate & 0x0000ff00))
12699 {
12700 *immbits = immediate >> 8;
12701 return 0xb;
12702 }
12703
12704 bad_immediate:
12705 first_error (_("immediate value out of range"));
12706 return FAIL;
12707 }
12708
12709 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12710 A, B, C, D. */
12711
12712 static int
12713 neon_bits_same_in_bytes (unsigned imm)
12714 {
12715 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
12716 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
12717 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
12718 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
12719 }
12720
12721 /* For immediate of above form, return 0bABCD. */
12722
12723 static unsigned
12724 neon_squash_bits (unsigned imm)
12725 {
12726 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
12727 | ((imm & 0x01000000) >> 21);
12728 }
12729
12730 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12731
12732 static unsigned
12733 neon_qfloat_bits (unsigned imm)
12734 {
12735 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
12736 }
12737
12738 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12739 the instruction. *OP is passed as the initial value of the op field, and
12740 may be set to a different value depending on the constant (i.e.
12741 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12742 MVN). If the immediate looks like a repeated pattern then also
12743 try smaller element sizes. */
12744
12745 static int
12746 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
12747 unsigned *immbits, int *op, int size,
12748 enum neon_el_type type)
12749 {
12750 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12751 float. */
12752 if (type == NT_float && !float_p)
12753 return FAIL;
12754
12755 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
12756 {
12757 if (size != 32 || *op == 1)
12758 return FAIL;
12759 *immbits = neon_qfloat_bits (immlo);
12760 return 0xf;
12761 }
12762
12763 if (size == 64)
12764 {
12765 if (neon_bits_same_in_bytes (immhi)
12766 && neon_bits_same_in_bytes (immlo))
12767 {
12768 if (*op == 1)
12769 return FAIL;
12770 *immbits = (neon_squash_bits (immhi) << 4)
12771 | neon_squash_bits (immlo);
12772 *op = 1;
12773 return 0xe;
12774 }
12775
12776 if (immhi != immlo)
12777 return FAIL;
12778 }
12779
12780 if (size >= 32)
12781 {
12782 if (immlo == (immlo & 0x000000ff))
12783 {
12784 *immbits = immlo;
12785 return 0x0;
12786 }
12787 else if (immlo == (immlo & 0x0000ff00))
12788 {
12789 *immbits = immlo >> 8;
12790 return 0x2;
12791 }
12792 else if (immlo == (immlo & 0x00ff0000))
12793 {
12794 *immbits = immlo >> 16;
12795 return 0x4;
12796 }
12797 else if (immlo == (immlo & 0xff000000))
12798 {
12799 *immbits = immlo >> 24;
12800 return 0x6;
12801 }
12802 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
12803 {
12804 *immbits = (immlo >> 8) & 0xff;
12805 return 0xc;
12806 }
12807 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
12808 {
12809 *immbits = (immlo >> 16) & 0xff;
12810 return 0xd;
12811 }
12812
12813 if ((immlo & 0xffff) != (immlo >> 16))
12814 return FAIL;
12815 immlo &= 0xffff;
12816 }
12817
12818 if (size >= 16)
12819 {
12820 if (immlo == (immlo & 0x000000ff))
12821 {
12822 *immbits = immlo;
12823 return 0x8;
12824 }
12825 else if (immlo == (immlo & 0x0000ff00))
12826 {
12827 *immbits = immlo >> 8;
12828 return 0xa;
12829 }
12830
12831 if ((immlo & 0xff) != (immlo >> 8))
12832 return FAIL;
12833 immlo &= 0xff;
12834 }
12835
12836 if (immlo == (immlo & 0x000000ff))
12837 {
12838 /* Don't allow MVN with 8-bit immediate. */
12839 if (*op == 1)
12840 return FAIL;
12841 *immbits = immlo;
12842 return 0xe;
12843 }
12844
12845 return FAIL;
12846 }
12847
12848 /* Write immediate bits [7:0] to the following locations:
12849
12850 |28/24|23 19|18 16|15 4|3 0|
12851 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12852
12853 This function is used by VMOV/VMVN/VORR/VBIC. */
12854
12855 static void
12856 neon_write_immbits (unsigned immbits)
12857 {
12858 inst.instruction |= immbits & 0xf;
12859 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
12860 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
12861 }
12862
12863 /* Invert low-order SIZE bits of XHI:XLO. */
12864
12865 static void
12866 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
12867 {
12868 unsigned immlo = xlo ? *xlo : 0;
12869 unsigned immhi = xhi ? *xhi : 0;
12870
12871 switch (size)
12872 {
12873 case 8:
12874 immlo = (~immlo) & 0xff;
12875 break;
12876
12877 case 16:
12878 immlo = (~immlo) & 0xffff;
12879 break;
12880
12881 case 64:
12882 immhi = (~immhi) & 0xffffffff;
12883 /* fall through. */
12884
12885 case 32:
12886 immlo = (~immlo) & 0xffffffff;
12887 break;
12888
12889 default:
12890 abort ();
12891 }
12892
12893 if (xlo)
12894 *xlo = immlo;
12895
12896 if (xhi)
12897 *xhi = immhi;
12898 }
12899
12900 static void
12901 do_neon_logic (void)
12902 {
12903 if (inst.operands[2].present && inst.operands[2].isreg)
12904 {
12905 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12906 neon_check_type (3, rs, N_IGNORE_TYPE);
12907 /* U bit and size field were set as part of the bitmask. */
12908 NEON_ENCODE (INTEGER, inst);
12909 neon_three_same (neon_quad (rs), 0, -1);
12910 }
12911 else
12912 {
12913 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12914 struct neon_type_el et = neon_check_type (2, rs,
12915 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12916 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
12917 unsigned immbits;
12918 int cmode;
12919
12920 if (et.type == NT_invtype)
12921 return;
12922
12923 NEON_ENCODE (IMMED, inst);
12924
12925 immbits = inst.operands[1].imm;
12926 if (et.size == 64)
12927 {
12928 /* .i64 is a pseudo-op, so the immediate must be a repeating
12929 pattern. */
12930 if (immbits != (inst.operands[1].regisimm ?
12931 inst.operands[1].reg : 0))
12932 {
12933 /* Set immbits to an invalid constant. */
12934 immbits = 0xdeadbeef;
12935 }
12936 }
12937
12938 switch (opcode)
12939 {
12940 case N_MNEM_vbic:
12941 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12942 break;
12943
12944 case N_MNEM_vorr:
12945 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12946 break;
12947
12948 case N_MNEM_vand:
12949 /* Pseudo-instruction for VBIC. */
12950 neon_invert_size (&immbits, 0, et.size);
12951 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12952 break;
12953
12954 case N_MNEM_vorn:
12955 /* Pseudo-instruction for VORR. */
12956 neon_invert_size (&immbits, 0, et.size);
12957 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
12958 break;
12959
12960 default:
12961 abort ();
12962 }
12963
12964 if (cmode == FAIL)
12965 return;
12966
12967 inst.instruction |= neon_quad (rs) << 6;
12968 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12969 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12970 inst.instruction |= cmode << 8;
12971 neon_write_immbits (immbits);
12972
12973 neon_dp_fixup (&inst);
12974 }
12975 }
12976
12977 static void
12978 do_neon_bitfield (void)
12979 {
12980 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12981 neon_check_type (3, rs, N_IGNORE_TYPE);
12982 neon_three_same (neon_quad (rs), 0, -1);
12983 }
12984
12985 static void
12986 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
12987 unsigned destbits)
12988 {
12989 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12990 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
12991 types | N_KEY);
12992 if (et.type == NT_float)
12993 {
12994 NEON_ENCODE (FLOAT, inst);
12995 neon_three_same (neon_quad (rs), 0, -1);
12996 }
12997 else
12998 {
12999 NEON_ENCODE (INTEGER, inst);
13000 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13001 }
13002 }
13003
13004 static void
13005 do_neon_dyadic_if_su (void)
13006 {
13007 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13008 }
13009
13010 static void
13011 do_neon_dyadic_if_su_d (void)
13012 {
13013 /* This version only allow D registers, but that constraint is enforced during
13014 operand parsing so we don't need to do anything extra here. */
13015 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13016 }
13017
13018 static void
13019 do_neon_dyadic_if_i_d (void)
13020 {
13021 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13022 affected if we specify unsigned args. */
13023 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13024 }
13025
13026 enum vfp_or_neon_is_neon_bits
13027 {
13028 NEON_CHECK_CC = 1,
13029 NEON_CHECK_ARCH = 2
13030 };
13031
13032 /* Call this function if an instruction which may have belonged to the VFP or
13033 Neon instruction sets, but turned out to be a Neon instruction (due to the
13034 operand types involved, etc.). We have to check and/or fix-up a couple of
13035 things:
13036
13037 - Make sure the user hasn't attempted to make a Neon instruction
13038 conditional.
13039 - Alter the value in the condition code field if necessary.
13040 - Make sure that the arch supports Neon instructions.
13041
13042 Which of these operations take place depends on bits from enum
13043 vfp_or_neon_is_neon_bits.
13044
13045 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13046 current instruction's condition is COND_ALWAYS, the condition field is
13047 changed to inst.uncond_value. This is necessary because instructions shared
13048 between VFP and Neon may be conditional for the VFP variants only, and the
13049 unconditional Neon version must have, e.g., 0xF in the condition field. */
13050
13051 static int
13052 vfp_or_neon_is_neon (unsigned check)
13053 {
13054 /* Conditions are always legal in Thumb mode (IT blocks). */
13055 if (!thumb_mode && (check & NEON_CHECK_CC))
13056 {
13057 if (inst.cond != COND_ALWAYS)
13058 {
13059 first_error (_(BAD_COND));
13060 return FAIL;
13061 }
13062 if (inst.uncond_value != -1)
13063 inst.instruction |= inst.uncond_value << 28;
13064 }
13065
13066 if ((check & NEON_CHECK_ARCH)
13067 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13068 {
13069 first_error (_(BAD_FPU));
13070 return FAIL;
13071 }
13072
13073 return SUCCESS;
13074 }
13075
13076 static void
13077 do_neon_addsub_if_i (void)
13078 {
13079 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13080 return;
13081
13082 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13083 return;
13084
13085 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13086 affected if we specify unsigned args. */
13087 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13088 }
13089
13090 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13091 result to be:
13092 V<op> A,B (A is operand 0, B is operand 2)
13093 to mean:
13094 V<op> A,B,A
13095 not:
13096 V<op> A,B,B
13097 so handle that case specially. */
13098
13099 static void
13100 neon_exchange_operands (void)
13101 {
13102 void *scratch = alloca (sizeof (inst.operands[0]));
13103 if (inst.operands[1].present)
13104 {
13105 /* Swap operands[1] and operands[2]. */
13106 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13107 inst.operands[1] = inst.operands[2];
13108 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13109 }
13110 else
13111 {
13112 inst.operands[1] = inst.operands[2];
13113 inst.operands[2] = inst.operands[0];
13114 }
13115 }
13116
13117 static void
13118 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13119 {
13120 if (inst.operands[2].isreg)
13121 {
13122 if (invert)
13123 neon_exchange_operands ();
13124 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13125 }
13126 else
13127 {
13128 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13129 struct neon_type_el et = neon_check_type (2, rs,
13130 N_EQK | N_SIZ, immtypes | N_KEY);
13131
13132 NEON_ENCODE (IMMED, inst);
13133 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13134 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13135 inst.instruction |= LOW4 (inst.operands[1].reg);
13136 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13137 inst.instruction |= neon_quad (rs) << 6;
13138 inst.instruction |= (et.type == NT_float) << 10;
13139 inst.instruction |= neon_logbits (et.size) << 18;
13140
13141 neon_dp_fixup (&inst);
13142 }
13143 }
13144
13145 static void
13146 do_neon_cmp (void)
13147 {
13148 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13149 }
13150
13151 static void
13152 do_neon_cmp_inv (void)
13153 {
13154 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13155 }
13156
13157 static void
13158 do_neon_ceq (void)
13159 {
13160 neon_compare (N_IF_32, N_IF_32, FALSE);
13161 }
13162
13163 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13164 scalars, which are encoded in 5 bits, M : Rm.
13165 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13166 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13167 index in M. */
13168
13169 static unsigned
13170 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13171 {
13172 unsigned regno = NEON_SCALAR_REG (scalar);
13173 unsigned elno = NEON_SCALAR_INDEX (scalar);
13174
13175 switch (elsize)
13176 {
13177 case 16:
13178 if (regno > 7 || elno > 3)
13179 goto bad_scalar;
13180 return regno | (elno << 3);
13181
13182 case 32:
13183 if (regno > 15 || elno > 1)
13184 goto bad_scalar;
13185 return regno | (elno << 4);
13186
13187 default:
13188 bad_scalar:
13189 first_error (_("scalar out of range for multiply instruction"));
13190 }
13191
13192 return 0;
13193 }
13194
13195 /* Encode multiply / multiply-accumulate scalar instructions. */
13196
13197 static void
13198 neon_mul_mac (struct neon_type_el et, int ubit)
13199 {
13200 unsigned scalar;
13201
13202 /* Give a more helpful error message if we have an invalid type. */
13203 if (et.type == NT_invtype)
13204 return;
13205
13206 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13207 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13208 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13209 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13210 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13211 inst.instruction |= LOW4 (scalar);
13212 inst.instruction |= HI1 (scalar) << 5;
13213 inst.instruction |= (et.type == NT_float) << 8;
13214 inst.instruction |= neon_logbits (et.size) << 20;
13215 inst.instruction |= (ubit != 0) << 24;
13216
13217 neon_dp_fixup (&inst);
13218 }
13219
13220 static void
13221 do_neon_mac_maybe_scalar (void)
13222 {
13223 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
13224 return;
13225
13226 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13227 return;
13228
13229 if (inst.operands[2].isscalar)
13230 {
13231 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13232 struct neon_type_el et = neon_check_type (3, rs,
13233 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
13234 NEON_ENCODE (SCALAR, inst);
13235 neon_mul_mac (et, neon_quad (rs));
13236 }
13237 else
13238 {
13239 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13240 affected if we specify unsigned args. */
13241 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13242 }
13243 }
13244
13245 static void
13246 do_neon_fmac (void)
13247 {
13248 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
13249 return;
13250
13251 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13252 return;
13253
13254 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13255 }
13256
13257 static void
13258 do_neon_tst (void)
13259 {
13260 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13261 struct neon_type_el et = neon_check_type (3, rs,
13262 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
13263 neon_three_same (neon_quad (rs), 0, et.size);
13264 }
13265
13266 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13267 same types as the MAC equivalents. The polynomial type for this instruction
13268 is encoded the same as the integer type. */
13269
13270 static void
13271 do_neon_mul (void)
13272 {
13273 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
13274 return;
13275
13276 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13277 return;
13278
13279 if (inst.operands[2].isscalar)
13280 do_neon_mac_maybe_scalar ();
13281 else
13282 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
13283 }
13284
13285 static void
13286 do_neon_qdmulh (void)
13287 {
13288 if (inst.operands[2].isscalar)
13289 {
13290 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
13291 struct neon_type_el et = neon_check_type (3, rs,
13292 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13293 NEON_ENCODE (SCALAR, inst);
13294 neon_mul_mac (et, neon_quad (rs));
13295 }
13296 else
13297 {
13298 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13299 struct neon_type_el et = neon_check_type (3, rs,
13300 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
13301 NEON_ENCODE (INTEGER, inst);
13302 /* The U bit (rounding) comes from bit mask. */
13303 neon_three_same (neon_quad (rs), 0, et.size);
13304 }
13305 }
13306
13307 static void
13308 do_neon_fcmp_absolute (void)
13309 {
13310 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13311 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13312 /* Size field comes from bit mask. */
13313 neon_three_same (neon_quad (rs), 1, -1);
13314 }
13315
13316 static void
13317 do_neon_fcmp_absolute_inv (void)
13318 {
13319 neon_exchange_operands ();
13320 do_neon_fcmp_absolute ();
13321 }
13322
13323 static void
13324 do_neon_step (void)
13325 {
13326 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13327 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
13328 neon_three_same (neon_quad (rs), 0, -1);
13329 }
13330
13331 static void
13332 do_neon_abs_neg (void)
13333 {
13334 enum neon_shape rs;
13335 struct neon_type_el et;
13336
13337 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
13338 return;
13339
13340 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13341 return;
13342
13343 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13344 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
13345
13346 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13347 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13348 inst.instruction |= LOW4 (inst.operands[1].reg);
13349 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13350 inst.instruction |= neon_quad (rs) << 6;
13351 inst.instruction |= (et.type == NT_float) << 10;
13352 inst.instruction |= neon_logbits (et.size) << 18;
13353
13354 neon_dp_fixup (&inst);
13355 }
13356
13357 static void
13358 do_neon_sli (void)
13359 {
13360 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13361 struct neon_type_el et = neon_check_type (2, rs,
13362 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13363 int imm = inst.operands[2].imm;
13364 constraint (imm < 0 || (unsigned)imm >= et.size,
13365 _("immediate out of range for insert"));
13366 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13367 }
13368
13369 static void
13370 do_neon_sri (void)
13371 {
13372 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13373 struct neon_type_el et = neon_check_type (2, rs,
13374 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
13375 int imm = inst.operands[2].imm;
13376 constraint (imm < 1 || (unsigned)imm > et.size,
13377 _("immediate out of range for insert"));
13378 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
13379 }
13380
13381 static void
13382 do_neon_qshlu_imm (void)
13383 {
13384 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13385 struct neon_type_el et = neon_check_type (2, rs,
13386 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
13387 int imm = inst.operands[2].imm;
13388 constraint (imm < 0 || (unsigned)imm >= et.size,
13389 _("immediate out of range for shift"));
13390 /* Only encodes the 'U present' variant of the instruction.
13391 In this case, signed types have OP (bit 8) set to 0.
13392 Unsigned types have OP set to 1. */
13393 inst.instruction |= (et.type == NT_unsigned) << 8;
13394 /* The rest of the bits are the same as other immediate shifts. */
13395 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
13396 }
13397
13398 static void
13399 do_neon_qmovn (void)
13400 {
13401 struct neon_type_el et = neon_check_type (2, NS_DQ,
13402 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13403 /* Saturating move where operands can be signed or unsigned, and the
13404 destination has the same signedness. */
13405 NEON_ENCODE (INTEGER, inst);
13406 if (et.type == NT_unsigned)
13407 inst.instruction |= 0xc0;
13408 else
13409 inst.instruction |= 0x80;
13410 neon_two_same (0, 1, et.size / 2);
13411 }
13412
13413 static void
13414 do_neon_qmovun (void)
13415 {
13416 struct neon_type_el et = neon_check_type (2, NS_DQ,
13417 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13418 /* Saturating move with unsigned results. Operands must be signed. */
13419 NEON_ENCODE (INTEGER, inst);
13420 neon_two_same (0, 1, et.size / 2);
13421 }
13422
13423 static void
13424 do_neon_rshift_sat_narrow (void)
13425 {
13426 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13427 or unsigned. If operands are unsigned, results must also be unsigned. */
13428 struct neon_type_el et = neon_check_type (2, NS_DQI,
13429 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
13430 int imm = inst.operands[2].imm;
13431 /* This gets the bounds check, size encoding and immediate bits calculation
13432 right. */
13433 et.size /= 2;
13434
13435 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13436 VQMOVN.I<size> <Dd>, <Qm>. */
13437 if (imm == 0)
13438 {
13439 inst.operands[2].present = 0;
13440 inst.instruction = N_MNEM_vqmovn;
13441 do_neon_qmovn ();
13442 return;
13443 }
13444
13445 constraint (imm < 1 || (unsigned)imm > et.size,
13446 _("immediate out of range"));
13447 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
13448 }
13449
13450 static void
13451 do_neon_rshift_sat_narrow_u (void)
13452 {
13453 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13454 or unsigned. If operands are unsigned, results must also be unsigned. */
13455 struct neon_type_el et = neon_check_type (2, NS_DQI,
13456 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
13457 int imm = inst.operands[2].imm;
13458 /* This gets the bounds check, size encoding and immediate bits calculation
13459 right. */
13460 et.size /= 2;
13461
13462 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13463 VQMOVUN.I<size> <Dd>, <Qm>. */
13464 if (imm == 0)
13465 {
13466 inst.operands[2].present = 0;
13467 inst.instruction = N_MNEM_vqmovun;
13468 do_neon_qmovun ();
13469 return;
13470 }
13471
13472 constraint (imm < 1 || (unsigned)imm > et.size,
13473 _("immediate out of range"));
13474 /* FIXME: The manual is kind of unclear about what value U should have in
13475 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13476 must be 1. */
13477 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
13478 }
13479
13480 static void
13481 do_neon_movn (void)
13482 {
13483 struct neon_type_el et = neon_check_type (2, NS_DQ,
13484 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13485 NEON_ENCODE (INTEGER, inst);
13486 neon_two_same (0, 1, et.size / 2);
13487 }
13488
13489 static void
13490 do_neon_rshift_narrow (void)
13491 {
13492 struct neon_type_el et = neon_check_type (2, NS_DQI,
13493 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
13494 int imm = inst.operands[2].imm;
13495 /* This gets the bounds check, size encoding and immediate bits calculation
13496 right. */
13497 et.size /= 2;
13498
13499 /* If immediate is zero then we are a pseudo-instruction for
13500 VMOVN.I<size> <Dd>, <Qm> */
13501 if (imm == 0)
13502 {
13503 inst.operands[2].present = 0;
13504 inst.instruction = N_MNEM_vmovn;
13505 do_neon_movn ();
13506 return;
13507 }
13508
13509 constraint (imm < 1 || (unsigned)imm > et.size,
13510 _("immediate out of range for narrowing operation"));
13511 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
13512 }
13513
13514 static void
13515 do_neon_shll (void)
13516 {
13517 /* FIXME: Type checking when lengthening. */
13518 struct neon_type_el et = neon_check_type (2, NS_QDI,
13519 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
13520 unsigned imm = inst.operands[2].imm;
13521
13522 if (imm == et.size)
13523 {
13524 /* Maximum shift variant. */
13525 NEON_ENCODE (INTEGER, inst);
13526 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13527 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13528 inst.instruction |= LOW4 (inst.operands[1].reg);
13529 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13530 inst.instruction |= neon_logbits (et.size) << 18;
13531
13532 neon_dp_fixup (&inst);
13533 }
13534 else
13535 {
13536 /* A more-specific type check for non-max versions. */
13537 et = neon_check_type (2, NS_QDI,
13538 N_EQK | N_DBL, N_SU_32 | N_KEY);
13539 NEON_ENCODE (IMMED, inst);
13540 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
13541 }
13542 }
13543
13544 /* Check the various types for the VCVT instruction, and return which version
13545 the current instruction is. */
13546
13547 static int
13548 neon_cvt_flavour (enum neon_shape rs)
13549 {
13550 #define CVT_VAR(C,X,Y) \
13551 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13552 if (et.type != NT_invtype) \
13553 { \
13554 inst.error = NULL; \
13555 return (C); \
13556 }
13557 struct neon_type_el et;
13558 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
13559 || rs == NS_FF) ? N_VFP : 0;
13560 /* The instruction versions which take an immediate take one register
13561 argument, which is extended to the width of the full register. Thus the
13562 "source" and "destination" registers must have the same width. Hack that
13563 here by making the size equal to the key (wider, in this case) operand. */
13564 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
13565
13566 CVT_VAR (0, N_S32, N_F32);
13567 CVT_VAR (1, N_U32, N_F32);
13568 CVT_VAR (2, N_F32, N_S32);
13569 CVT_VAR (3, N_F32, N_U32);
13570 /* Half-precision conversions. */
13571 CVT_VAR (4, N_F32, N_F16);
13572 CVT_VAR (5, N_F16, N_F32);
13573
13574 whole_reg = N_VFP;
13575
13576 /* VFP instructions. */
13577 CVT_VAR (6, N_F32, N_F64);
13578 CVT_VAR (7, N_F64, N_F32);
13579 CVT_VAR (8, N_S32, N_F64 | key);
13580 CVT_VAR (9, N_U32, N_F64 | key);
13581 CVT_VAR (10, N_F64 | key, N_S32);
13582 CVT_VAR (11, N_F64 | key, N_U32);
13583 /* VFP instructions with bitshift. */
13584 CVT_VAR (12, N_F32 | key, N_S16);
13585 CVT_VAR (13, N_F32 | key, N_U16);
13586 CVT_VAR (14, N_F64 | key, N_S16);
13587 CVT_VAR (15, N_F64 | key, N_U16);
13588 CVT_VAR (16, N_S16, N_F32 | key);
13589 CVT_VAR (17, N_U16, N_F32 | key);
13590 CVT_VAR (18, N_S16, N_F64 | key);
13591 CVT_VAR (19, N_U16, N_F64 | key);
13592
13593 return -1;
13594 #undef CVT_VAR
13595 }
13596
13597 /* Neon-syntax VFP conversions. */
13598
13599 static void
13600 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
13601 {
13602 const char *opname = 0;
13603
13604 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
13605 {
13606 /* Conversions with immediate bitshift. */
13607 const char *enc[] =
13608 {
13609 "ftosls",
13610 "ftouls",
13611 "fsltos",
13612 "fultos",
13613 NULL,
13614 NULL,
13615 NULL,
13616 NULL,
13617 "ftosld",
13618 "ftould",
13619 "fsltod",
13620 "fultod",
13621 "fshtos",
13622 "fuhtos",
13623 "fshtod",
13624 "fuhtod",
13625 "ftoshs",
13626 "ftouhs",
13627 "ftoshd",
13628 "ftouhd"
13629 };
13630
13631 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13632 {
13633 opname = enc[flavour];
13634 constraint (inst.operands[0].reg != inst.operands[1].reg,
13635 _("operands 0 and 1 must be the same register"));
13636 inst.operands[1] = inst.operands[2];
13637 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
13638 }
13639 }
13640 else
13641 {
13642 /* Conversions without bitshift. */
13643 const char *enc[] =
13644 {
13645 "ftosis",
13646 "ftouis",
13647 "fsitos",
13648 "fuitos",
13649 "NULL",
13650 "NULL",
13651 "fcvtsd",
13652 "fcvtds",
13653 "ftosid",
13654 "ftouid",
13655 "fsitod",
13656 "fuitod"
13657 };
13658
13659 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
13660 opname = enc[flavour];
13661 }
13662
13663 if (opname)
13664 do_vfp_nsyn_opcode (opname);
13665 }
13666
13667 static void
13668 do_vfp_nsyn_cvtz (void)
13669 {
13670 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
13671 int flavour = neon_cvt_flavour (rs);
13672 const char *enc[] =
13673 {
13674 "ftosizs",
13675 "ftouizs",
13676 NULL,
13677 NULL,
13678 NULL,
13679 NULL,
13680 NULL,
13681 NULL,
13682 "ftosizd",
13683 "ftouizd"
13684 };
13685
13686 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
13687 do_vfp_nsyn_opcode (enc[flavour]);
13688 }
13689
13690 static void
13691 do_neon_cvt (void)
13692 {
13693 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
13694 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
13695 int flavour = neon_cvt_flavour (rs);
13696
13697 /* VFP rather than Neon conversions. */
13698 if (flavour >= 6)
13699 {
13700 do_vfp_nsyn_cvt (rs, flavour);
13701 return;
13702 }
13703
13704 switch (rs)
13705 {
13706 case NS_DDI:
13707 case NS_QQI:
13708 {
13709 unsigned immbits;
13710 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13711
13712 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13713 return;
13714
13715 /* Fixed-point conversion with #0 immediate is encoded as an
13716 integer conversion. */
13717 if (inst.operands[2].present && inst.operands[2].imm == 0)
13718 goto int_encode;
13719 immbits = 32 - inst.operands[2].imm;
13720 NEON_ENCODE (IMMED, inst);
13721 if (flavour != -1)
13722 inst.instruction |= enctab[flavour];
13723 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13724 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13725 inst.instruction |= LOW4 (inst.operands[1].reg);
13726 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13727 inst.instruction |= neon_quad (rs) << 6;
13728 inst.instruction |= 1 << 21;
13729 inst.instruction |= immbits << 16;
13730
13731 neon_dp_fixup (&inst);
13732 }
13733 break;
13734
13735 case NS_DD:
13736 case NS_QQ:
13737 int_encode:
13738 {
13739 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
13740
13741 NEON_ENCODE (INTEGER, inst);
13742
13743 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13744 return;
13745
13746 if (flavour != -1)
13747 inst.instruction |= enctab[flavour];
13748
13749 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13750 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13751 inst.instruction |= LOW4 (inst.operands[1].reg);
13752 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13753 inst.instruction |= neon_quad (rs) << 6;
13754 inst.instruction |= 2 << 18;
13755
13756 neon_dp_fixup (&inst);
13757 }
13758 break;
13759
13760 /* Half-precision conversions for Advanced SIMD -- neon. */
13761 case NS_QD:
13762 case NS_DQ:
13763
13764 if ((rs == NS_DQ)
13765 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
13766 {
13767 as_bad (_("operand size must match register width"));
13768 break;
13769 }
13770
13771 if ((rs == NS_QD)
13772 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
13773 {
13774 as_bad (_("operand size must match register width"));
13775 break;
13776 }
13777
13778 if (rs == NS_DQ)
13779 inst.instruction = 0x3b60600;
13780 else
13781 inst.instruction = 0x3b60700;
13782
13783 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13784 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13785 inst.instruction |= LOW4 (inst.operands[1].reg);
13786 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13787 neon_dp_fixup (&inst);
13788 break;
13789
13790 default:
13791 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13792 do_vfp_nsyn_cvt (rs, flavour);
13793 }
13794 }
13795
13796 static void
13797 do_neon_cvtb (void)
13798 {
13799 inst.instruction = 0xeb20a40;
13800
13801 /* The sizes are attached to the mnemonic. */
13802 if (inst.vectype.el[0].type != NT_invtype
13803 && inst.vectype.el[0].size == 16)
13804 inst.instruction |= 0x00010000;
13805
13806 /* Programmer's syntax: the sizes are attached to the operands. */
13807 else if (inst.operands[0].vectype.type != NT_invtype
13808 && inst.operands[0].vectype.size == 16)
13809 inst.instruction |= 0x00010000;
13810
13811 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
13812 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
13813 do_vfp_cond_or_thumb ();
13814 }
13815
13816
13817 static void
13818 do_neon_cvtt (void)
13819 {
13820 do_neon_cvtb ();
13821 inst.instruction |= 0x80;
13822 }
13823
13824 static void
13825 neon_move_immediate (void)
13826 {
13827 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
13828 struct neon_type_el et = neon_check_type (2, rs,
13829 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13830 unsigned immlo, immhi = 0, immbits;
13831 int op, cmode, float_p;
13832
13833 constraint (et.type == NT_invtype,
13834 _("operand size must be specified for immediate VMOV"));
13835
13836 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13837 op = (inst.instruction & (1 << 5)) != 0;
13838
13839 immlo = inst.operands[1].imm;
13840 if (inst.operands[1].regisimm)
13841 immhi = inst.operands[1].reg;
13842
13843 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
13844 _("immediate has bits set outside the operand size"));
13845
13846 float_p = inst.operands[1].immisfloat;
13847
13848 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
13849 et.size, et.type)) == FAIL)
13850 {
13851 /* Invert relevant bits only. */
13852 neon_invert_size (&immlo, &immhi, et.size);
13853 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13854 with one or the other; those cases are caught by
13855 neon_cmode_for_move_imm. */
13856 op = !op;
13857 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
13858 &op, et.size, et.type)) == FAIL)
13859 {
13860 first_error (_("immediate out of range"));
13861 return;
13862 }
13863 }
13864
13865 inst.instruction &= ~(1 << 5);
13866 inst.instruction |= op << 5;
13867
13868 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13869 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13870 inst.instruction |= neon_quad (rs) << 6;
13871 inst.instruction |= cmode << 8;
13872
13873 neon_write_immbits (immbits);
13874 }
13875
13876 static void
13877 do_neon_mvn (void)
13878 {
13879 if (inst.operands[1].isreg)
13880 {
13881 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13882
13883 NEON_ENCODE (INTEGER, inst);
13884 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13885 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13886 inst.instruction |= LOW4 (inst.operands[1].reg);
13887 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13888 inst.instruction |= neon_quad (rs) << 6;
13889 }
13890 else
13891 {
13892 NEON_ENCODE (IMMED, inst);
13893 neon_move_immediate ();
13894 }
13895
13896 neon_dp_fixup (&inst);
13897 }
13898
13899 /* Encode instructions of form:
13900
13901 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13902 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13903
13904 static void
13905 neon_mixed_length (struct neon_type_el et, unsigned size)
13906 {
13907 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13908 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13909 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13910 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13911 inst.instruction |= LOW4 (inst.operands[2].reg);
13912 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13913 inst.instruction |= (et.type == NT_unsigned) << 24;
13914 inst.instruction |= neon_logbits (size) << 20;
13915
13916 neon_dp_fixup (&inst);
13917 }
13918
13919 static void
13920 do_neon_dyadic_long (void)
13921 {
13922 /* FIXME: Type checking for lengthening op. */
13923 struct neon_type_el et = neon_check_type (3, NS_QDD,
13924 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
13925 neon_mixed_length (et, et.size);
13926 }
13927
13928 static void
13929 do_neon_abal (void)
13930 {
13931 struct neon_type_el et = neon_check_type (3, NS_QDD,
13932 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
13933 neon_mixed_length (et, et.size);
13934 }
13935
13936 static void
13937 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
13938 {
13939 if (inst.operands[2].isscalar)
13940 {
13941 struct neon_type_el et = neon_check_type (3, NS_QDS,
13942 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
13943 NEON_ENCODE (SCALAR, inst);
13944 neon_mul_mac (et, et.type == NT_unsigned);
13945 }
13946 else
13947 {
13948 struct neon_type_el et = neon_check_type (3, NS_QDD,
13949 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
13950 NEON_ENCODE (INTEGER, inst);
13951 neon_mixed_length (et, et.size);
13952 }
13953 }
13954
13955 static void
13956 do_neon_mac_maybe_scalar_long (void)
13957 {
13958 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
13959 }
13960
13961 static void
13962 do_neon_dyadic_wide (void)
13963 {
13964 struct neon_type_el et = neon_check_type (3, NS_QQD,
13965 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
13966 neon_mixed_length (et, et.size);
13967 }
13968
13969 static void
13970 do_neon_dyadic_narrow (void)
13971 {
13972 struct neon_type_el et = neon_check_type (3, NS_QDD,
13973 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
13974 /* Operand sign is unimportant, and the U bit is part of the opcode,
13975 so force the operand type to integer. */
13976 et.type = NT_integer;
13977 neon_mixed_length (et, et.size / 2);
13978 }
13979
13980 static void
13981 do_neon_mul_sat_scalar_long (void)
13982 {
13983 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
13984 }
13985
13986 static void
13987 do_neon_vmull (void)
13988 {
13989 if (inst.operands[2].isscalar)
13990 do_neon_mac_maybe_scalar_long ();
13991 else
13992 {
13993 struct neon_type_el et = neon_check_type (3, NS_QDD,
13994 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
13995 if (et.type == NT_poly)
13996 NEON_ENCODE (POLY, inst);
13997 else
13998 NEON_ENCODE (INTEGER, inst);
13999 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14000 zero. Should be OK as-is. */
14001 neon_mixed_length (et, et.size);
14002 }
14003 }
14004
14005 static void
14006 do_neon_ext (void)
14007 {
14008 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14009 struct neon_type_el et = neon_check_type (3, rs,
14010 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14011 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14012
14013 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14014 _("shift out of range"));
14015 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14016 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14017 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14018 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14019 inst.instruction |= LOW4 (inst.operands[2].reg);
14020 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14021 inst.instruction |= neon_quad (rs) << 6;
14022 inst.instruction |= imm << 8;
14023
14024 neon_dp_fixup (&inst);
14025 }
14026
14027 static void
14028 do_neon_rev (void)
14029 {
14030 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14031 struct neon_type_el et = neon_check_type (2, rs,
14032 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14033 unsigned op = (inst.instruction >> 7) & 3;
14034 /* N (width of reversed regions) is encoded as part of the bitmask. We
14035 extract it here to check the elements to be reversed are smaller.
14036 Otherwise we'd get a reserved instruction. */
14037 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14038 gas_assert (elsize != 0);
14039 constraint (et.size >= elsize,
14040 _("elements must be smaller than reversal region"));
14041 neon_two_same (neon_quad (rs), 1, et.size);
14042 }
14043
14044 static void
14045 do_neon_dup (void)
14046 {
14047 if (inst.operands[1].isscalar)
14048 {
14049 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14050 struct neon_type_el et = neon_check_type (2, rs,
14051 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14052 unsigned sizebits = et.size >> 3;
14053 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14054 int logsize = neon_logbits (et.size);
14055 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14056
14057 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14058 return;
14059
14060 NEON_ENCODE (SCALAR, inst);
14061 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14062 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14063 inst.instruction |= LOW4 (dm);
14064 inst.instruction |= HI1 (dm) << 5;
14065 inst.instruction |= neon_quad (rs) << 6;
14066 inst.instruction |= x << 17;
14067 inst.instruction |= sizebits << 16;
14068
14069 neon_dp_fixup (&inst);
14070 }
14071 else
14072 {
14073 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14074 struct neon_type_el et = neon_check_type (2, rs,
14075 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14076 /* Duplicate ARM register to lanes of vector. */
14077 NEON_ENCODE (ARMREG, inst);
14078 switch (et.size)
14079 {
14080 case 8: inst.instruction |= 0x400000; break;
14081 case 16: inst.instruction |= 0x000020; break;
14082 case 32: inst.instruction |= 0x000000; break;
14083 default: break;
14084 }
14085 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14086 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14087 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14088 inst.instruction |= neon_quad (rs) << 21;
14089 /* The encoding for this instruction is identical for the ARM and Thumb
14090 variants, except for the condition field. */
14091 do_vfp_cond_or_thumb ();
14092 }
14093 }
14094
14095 /* VMOV has particularly many variations. It can be one of:
14096 0. VMOV<c><q> <Qd>, <Qm>
14097 1. VMOV<c><q> <Dd>, <Dm>
14098 (Register operations, which are VORR with Rm = Rn.)
14099 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14100 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14101 (Immediate loads.)
14102 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14103 (ARM register to scalar.)
14104 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14105 (Two ARM registers to vector.)
14106 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14107 (Scalar to ARM register.)
14108 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14109 (Vector to two ARM registers.)
14110 8. VMOV.F32 <Sd>, <Sm>
14111 9. VMOV.F64 <Dd>, <Dm>
14112 (VFP register moves.)
14113 10. VMOV.F32 <Sd>, #imm
14114 11. VMOV.F64 <Dd>, #imm
14115 (VFP float immediate load.)
14116 12. VMOV <Rd>, <Sm>
14117 (VFP single to ARM reg.)
14118 13. VMOV <Sd>, <Rm>
14119 (ARM reg to VFP single.)
14120 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14121 (Two ARM regs to two VFP singles.)
14122 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14123 (Two VFP singles to two ARM regs.)
14124
14125 These cases can be disambiguated using neon_select_shape, except cases 1/9
14126 and 3/11 which depend on the operand type too.
14127
14128 All the encoded bits are hardcoded by this function.
14129
14130 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14131 Cases 5, 7 may be used with VFPv2 and above.
14132
14133 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14134 can specify a type where it doesn't make sense to, and is ignored). */
14135
14136 static void
14137 do_neon_mov (void)
14138 {
14139 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14140 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14141 NS_NULL);
14142 struct neon_type_el et;
14143 const char *ldconst = 0;
14144
14145 switch (rs)
14146 {
14147 case NS_DD: /* case 1/9. */
14148 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14149 /* It is not an error here if no type is given. */
14150 inst.error = NULL;
14151 if (et.type == NT_float && et.size == 64)
14152 {
14153 do_vfp_nsyn_opcode ("fcpyd");
14154 break;
14155 }
14156 /* fall through. */
14157
14158 case NS_QQ: /* case 0/1. */
14159 {
14160 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14161 return;
14162 /* The architecture manual I have doesn't explicitly state which
14163 value the U bit should have for register->register moves, but
14164 the equivalent VORR instruction has U = 0, so do that. */
14165 inst.instruction = 0x0200110;
14166 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14167 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14168 inst.instruction |= LOW4 (inst.operands[1].reg);
14169 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14170 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14171 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14172 inst.instruction |= neon_quad (rs) << 6;
14173
14174 neon_dp_fixup (&inst);
14175 }
14176 break;
14177
14178 case NS_DI: /* case 3/11. */
14179 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14180 inst.error = NULL;
14181 if (et.type == NT_float && et.size == 64)
14182 {
14183 /* case 11 (fconstd). */
14184 ldconst = "fconstd";
14185 goto encode_fconstd;
14186 }
14187 /* fall through. */
14188
14189 case NS_QI: /* case 2/3. */
14190 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14191 return;
14192 inst.instruction = 0x0800010;
14193 neon_move_immediate ();
14194 neon_dp_fixup (&inst);
14195 break;
14196
14197 case NS_SR: /* case 4. */
14198 {
14199 unsigned bcdebits = 0;
14200 int logsize;
14201 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
14202 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
14203
14204 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
14205 logsize = neon_logbits (et.size);
14206
14207 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14208 _(BAD_FPU));
14209 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14210 && et.size != 32, _(BAD_FPU));
14211 constraint (et.type == NT_invtype, _("bad type for scalar"));
14212 constraint (x >= 64 / et.size, _("scalar index out of range"));
14213
14214 switch (et.size)
14215 {
14216 case 8: bcdebits = 0x8; break;
14217 case 16: bcdebits = 0x1; break;
14218 case 32: bcdebits = 0x0; break;
14219 default: ;
14220 }
14221
14222 bcdebits |= x << logsize;
14223
14224 inst.instruction = 0xe000b10;
14225 do_vfp_cond_or_thumb ();
14226 inst.instruction |= LOW4 (dn) << 16;
14227 inst.instruction |= HI1 (dn) << 7;
14228 inst.instruction |= inst.operands[1].reg << 12;
14229 inst.instruction |= (bcdebits & 3) << 5;
14230 inst.instruction |= (bcdebits >> 2) << 21;
14231 }
14232 break;
14233
14234 case NS_DRR: /* case 5 (fmdrr). */
14235 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14236 _(BAD_FPU));
14237
14238 inst.instruction = 0xc400b10;
14239 do_vfp_cond_or_thumb ();
14240 inst.instruction |= LOW4 (inst.operands[0].reg);
14241 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
14242 inst.instruction |= inst.operands[1].reg << 12;
14243 inst.instruction |= inst.operands[2].reg << 16;
14244 break;
14245
14246 case NS_RS: /* case 6. */
14247 {
14248 unsigned logsize;
14249 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
14250 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
14251 unsigned abcdebits = 0;
14252
14253 et = neon_check_type (2, NS_NULL,
14254 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
14255 logsize = neon_logbits (et.size);
14256
14257 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
14258 _(BAD_FPU));
14259 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
14260 && et.size != 32, _(BAD_FPU));
14261 constraint (et.type == NT_invtype, _("bad type for scalar"));
14262 constraint (x >= 64 / et.size, _("scalar index out of range"));
14263
14264 switch (et.size)
14265 {
14266 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
14267 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
14268 case 32: abcdebits = 0x00; break;
14269 default: ;
14270 }
14271
14272 abcdebits |= x << logsize;
14273 inst.instruction = 0xe100b10;
14274 do_vfp_cond_or_thumb ();
14275 inst.instruction |= LOW4 (dn) << 16;
14276 inst.instruction |= HI1 (dn) << 7;
14277 inst.instruction |= inst.operands[0].reg << 12;
14278 inst.instruction |= (abcdebits & 3) << 5;
14279 inst.instruction |= (abcdebits >> 2) << 21;
14280 }
14281 break;
14282
14283 case NS_RRD: /* case 7 (fmrrd). */
14284 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
14285 _(BAD_FPU));
14286
14287 inst.instruction = 0xc500b10;
14288 do_vfp_cond_or_thumb ();
14289 inst.instruction |= inst.operands[0].reg << 12;
14290 inst.instruction |= inst.operands[1].reg << 16;
14291 inst.instruction |= LOW4 (inst.operands[2].reg);
14292 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14293 break;
14294
14295 case NS_FF: /* case 8 (fcpys). */
14296 do_vfp_nsyn_opcode ("fcpys");
14297 break;
14298
14299 case NS_FI: /* case 10 (fconsts). */
14300 ldconst = "fconsts";
14301 encode_fconstd:
14302 if (is_quarter_float (inst.operands[1].imm))
14303 {
14304 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
14305 do_vfp_nsyn_opcode (ldconst);
14306 }
14307 else
14308 first_error (_("immediate out of range"));
14309 break;
14310
14311 case NS_RF: /* case 12 (fmrs). */
14312 do_vfp_nsyn_opcode ("fmrs");
14313 break;
14314
14315 case NS_FR: /* case 13 (fmsr). */
14316 do_vfp_nsyn_opcode ("fmsr");
14317 break;
14318
14319 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14320 (one of which is a list), but we have parsed four. Do some fiddling to
14321 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14322 expect. */
14323 case NS_RRFF: /* case 14 (fmrrs). */
14324 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
14325 _("VFP registers must be adjacent"));
14326 inst.operands[2].imm = 2;
14327 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14328 do_vfp_nsyn_opcode ("fmrrs");
14329 break;
14330
14331 case NS_FFRR: /* case 15 (fmsrr). */
14332 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
14333 _("VFP registers must be adjacent"));
14334 inst.operands[1] = inst.operands[2];
14335 inst.operands[2] = inst.operands[3];
14336 inst.operands[0].imm = 2;
14337 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
14338 do_vfp_nsyn_opcode ("fmsrr");
14339 break;
14340
14341 default:
14342 abort ();
14343 }
14344 }
14345
14346 static void
14347 do_neon_rshift_round_imm (void)
14348 {
14349 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14350 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14351 int imm = inst.operands[2].imm;
14352
14353 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14354 if (imm == 0)
14355 {
14356 inst.operands[2].present = 0;
14357 do_neon_mov ();
14358 return;
14359 }
14360
14361 constraint (imm < 1 || (unsigned)imm > et.size,
14362 _("immediate out of range for shift"));
14363 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14364 et.size - imm);
14365 }
14366
14367 static void
14368 do_neon_movl (void)
14369 {
14370 struct neon_type_el et = neon_check_type (2, NS_QD,
14371 N_EQK | N_DBL, N_SU_32 | N_KEY);
14372 unsigned sizebits = et.size >> 3;
14373 inst.instruction |= sizebits << 19;
14374 neon_two_same (0, et.type == NT_unsigned, -1);
14375 }
14376
14377 static void
14378 do_neon_trn (void)
14379 {
14380 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14381 struct neon_type_el et = neon_check_type (2, rs,
14382 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14383 NEON_ENCODE (INTEGER, inst);
14384 neon_two_same (neon_quad (rs), 1, et.size);
14385 }
14386
14387 static void
14388 do_neon_zip_uzp (void)
14389 {
14390 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14391 struct neon_type_el et = neon_check_type (2, rs,
14392 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14393 if (rs == NS_DD && et.size == 32)
14394 {
14395 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14396 inst.instruction = N_MNEM_vtrn;
14397 do_neon_trn ();
14398 return;
14399 }
14400 neon_two_same (neon_quad (rs), 1, et.size);
14401 }
14402
14403 static void
14404 do_neon_sat_abs_neg (void)
14405 {
14406 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14407 struct neon_type_el et = neon_check_type (2, rs,
14408 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14409 neon_two_same (neon_quad (rs), 1, et.size);
14410 }
14411
14412 static void
14413 do_neon_pair_long (void)
14414 {
14415 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14416 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
14417 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14418 inst.instruction |= (et.type == NT_unsigned) << 7;
14419 neon_two_same (neon_quad (rs), 1, et.size);
14420 }
14421
14422 static void
14423 do_neon_recip_est (void)
14424 {
14425 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14426 struct neon_type_el et = neon_check_type (2, rs,
14427 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
14428 inst.instruction |= (et.type == NT_float) << 8;
14429 neon_two_same (neon_quad (rs), 1, et.size);
14430 }
14431
14432 static void
14433 do_neon_cls (void)
14434 {
14435 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14436 struct neon_type_el et = neon_check_type (2, rs,
14437 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
14438 neon_two_same (neon_quad (rs), 1, et.size);
14439 }
14440
14441 static void
14442 do_neon_clz (void)
14443 {
14444 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14445 struct neon_type_el et = neon_check_type (2, rs,
14446 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
14447 neon_two_same (neon_quad (rs), 1, et.size);
14448 }
14449
14450 static void
14451 do_neon_cnt (void)
14452 {
14453 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14454 struct neon_type_el et = neon_check_type (2, rs,
14455 N_EQK | N_INT, N_8 | N_KEY);
14456 neon_two_same (neon_quad (rs), 1, et.size);
14457 }
14458
14459 static void
14460 do_neon_swp (void)
14461 {
14462 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14463 neon_two_same (neon_quad (rs), 1, -1);
14464 }
14465
14466 static void
14467 do_neon_tbl_tbx (void)
14468 {
14469 unsigned listlenbits;
14470 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
14471
14472 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
14473 {
14474 first_error (_("bad list length for table lookup"));
14475 return;
14476 }
14477
14478 listlenbits = inst.operands[1].imm - 1;
14479 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14480 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14481 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14482 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14483 inst.instruction |= LOW4 (inst.operands[2].reg);
14484 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14485 inst.instruction |= listlenbits << 8;
14486
14487 neon_dp_fixup (&inst);
14488 }
14489
14490 static void
14491 do_neon_ldm_stm (void)
14492 {
14493 /* P, U and L bits are part of bitmask. */
14494 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
14495 unsigned offsetbits = inst.operands[1].imm * 2;
14496
14497 if (inst.operands[1].issingle)
14498 {
14499 do_vfp_nsyn_ldm_stm (is_dbmode);
14500 return;
14501 }
14502
14503 constraint (is_dbmode && !inst.operands[0].writeback,
14504 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14505
14506 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14507 _("register list must contain at least 1 and at most 16 "
14508 "registers"));
14509
14510 inst.instruction |= inst.operands[0].reg << 16;
14511 inst.instruction |= inst.operands[0].writeback << 21;
14512 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14513 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
14514
14515 inst.instruction |= offsetbits;
14516
14517 do_vfp_cond_or_thumb ();
14518 }
14519
14520 static void
14521 do_neon_ldr_str (void)
14522 {
14523 int is_ldr = (inst.instruction & (1 << 20)) != 0;
14524
14525 if (inst.operands[0].issingle)
14526 {
14527 if (is_ldr)
14528 do_vfp_nsyn_opcode ("flds");
14529 else
14530 do_vfp_nsyn_opcode ("fsts");
14531 }
14532 else
14533 {
14534 if (is_ldr)
14535 do_vfp_nsyn_opcode ("fldd");
14536 else
14537 do_vfp_nsyn_opcode ("fstd");
14538 }
14539 }
14540
14541 /* "interleave" version also handles non-interleaving register VLD1/VST1
14542 instructions. */
14543
14544 static void
14545 do_neon_ld_st_interleave (void)
14546 {
14547 struct neon_type_el et = neon_check_type (1, NS_NULL,
14548 N_8 | N_16 | N_32 | N_64);
14549 unsigned alignbits = 0;
14550 unsigned idx;
14551 /* The bits in this table go:
14552 0: register stride of one (0) or two (1)
14553 1,2: register list length, minus one (1, 2, 3, 4).
14554 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14555 We use -1 for invalid entries. */
14556 const int typetable[] =
14557 {
14558 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14559 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14560 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14561 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14562 };
14563 int typebits;
14564
14565 if (et.type == NT_invtype)
14566 return;
14567
14568 if (inst.operands[1].immisalign)
14569 switch (inst.operands[1].imm >> 8)
14570 {
14571 case 64: alignbits = 1; break;
14572 case 128:
14573 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14574 goto bad_alignment;
14575 alignbits = 2;
14576 break;
14577 case 256:
14578 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
14579 goto bad_alignment;
14580 alignbits = 3;
14581 break;
14582 default:
14583 bad_alignment:
14584 first_error (_("bad alignment"));
14585 return;
14586 }
14587
14588 inst.instruction |= alignbits << 4;
14589 inst.instruction |= neon_logbits (et.size) << 6;
14590
14591 /* Bits [4:6] of the immediate in a list specifier encode register stride
14592 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14593 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14594 up the right value for "type" in a table based on this value and the given
14595 list style, then stick it back. */
14596 idx = ((inst.operands[0].imm >> 4) & 7)
14597 | (((inst.instruction >> 8) & 3) << 3);
14598
14599 typebits = typetable[idx];
14600
14601 constraint (typebits == -1, _("bad list type for instruction"));
14602
14603 inst.instruction &= ~0xf00;
14604 inst.instruction |= typebits << 8;
14605 }
14606
14607 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14608 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14609 otherwise. The variable arguments are a list of pairs of legal (size, align)
14610 values, terminated with -1. */
14611
14612 static int
14613 neon_alignment_bit (int size, int align, int *do_align, ...)
14614 {
14615 va_list ap;
14616 int result = FAIL, thissize, thisalign;
14617
14618 if (!inst.operands[1].immisalign)
14619 {
14620 *do_align = 0;
14621 return SUCCESS;
14622 }
14623
14624 va_start (ap, do_align);
14625
14626 do
14627 {
14628 thissize = va_arg (ap, int);
14629 if (thissize == -1)
14630 break;
14631 thisalign = va_arg (ap, int);
14632
14633 if (size == thissize && align == thisalign)
14634 result = SUCCESS;
14635 }
14636 while (result != SUCCESS);
14637
14638 va_end (ap);
14639
14640 if (result == SUCCESS)
14641 *do_align = 1;
14642 else
14643 first_error (_("unsupported alignment for instruction"));
14644
14645 return result;
14646 }
14647
14648 static void
14649 do_neon_ld_st_lane (void)
14650 {
14651 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14652 int align_good, do_align = 0;
14653 int logsize = neon_logbits (et.size);
14654 int align = inst.operands[1].imm >> 8;
14655 int n = (inst.instruction >> 8) & 3;
14656 int max_el = 64 / et.size;
14657
14658 if (et.type == NT_invtype)
14659 return;
14660
14661 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
14662 _("bad list length"));
14663 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
14664 _("scalar index out of range"));
14665 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
14666 && et.size == 8,
14667 _("stride of 2 unavailable when element size is 8"));
14668
14669 switch (n)
14670 {
14671 case 0: /* VLD1 / VST1. */
14672 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
14673 32, 32, -1);
14674 if (align_good == FAIL)
14675 return;
14676 if (do_align)
14677 {
14678 unsigned alignbits = 0;
14679 switch (et.size)
14680 {
14681 case 16: alignbits = 0x1; break;
14682 case 32: alignbits = 0x3; break;
14683 default: ;
14684 }
14685 inst.instruction |= alignbits << 4;
14686 }
14687 break;
14688
14689 case 1: /* VLD2 / VST2. */
14690 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
14691 32, 64, -1);
14692 if (align_good == FAIL)
14693 return;
14694 if (do_align)
14695 inst.instruction |= 1 << 4;
14696 break;
14697
14698 case 2: /* VLD3 / VST3. */
14699 constraint (inst.operands[1].immisalign,
14700 _("can't use alignment with this instruction"));
14701 break;
14702
14703 case 3: /* VLD4 / VST4. */
14704 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14705 16, 64, 32, 64, 32, 128, -1);
14706 if (align_good == FAIL)
14707 return;
14708 if (do_align)
14709 {
14710 unsigned alignbits = 0;
14711 switch (et.size)
14712 {
14713 case 8: alignbits = 0x1; break;
14714 case 16: alignbits = 0x1; break;
14715 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
14716 default: ;
14717 }
14718 inst.instruction |= alignbits << 4;
14719 }
14720 break;
14721
14722 default: ;
14723 }
14724
14725 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14726 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14727 inst.instruction |= 1 << (4 + logsize);
14728
14729 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
14730 inst.instruction |= logsize << 10;
14731 }
14732
14733 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14734
14735 static void
14736 do_neon_ld_dup (void)
14737 {
14738 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
14739 int align_good, do_align = 0;
14740
14741 if (et.type == NT_invtype)
14742 return;
14743
14744 switch ((inst.instruction >> 8) & 3)
14745 {
14746 case 0: /* VLD1. */
14747 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
14748 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14749 &do_align, 16, 16, 32, 32, -1);
14750 if (align_good == FAIL)
14751 return;
14752 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
14753 {
14754 case 1: break;
14755 case 2: inst.instruction |= 1 << 5; break;
14756 default: first_error (_("bad list length")); return;
14757 }
14758 inst.instruction |= neon_logbits (et.size) << 6;
14759 break;
14760
14761 case 1: /* VLD2. */
14762 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
14763 &do_align, 8, 16, 16, 32, 32, 64, -1);
14764 if (align_good == FAIL)
14765 return;
14766 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
14767 _("bad list length"));
14768 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14769 inst.instruction |= 1 << 5;
14770 inst.instruction |= neon_logbits (et.size) << 6;
14771 break;
14772
14773 case 2: /* VLD3. */
14774 constraint (inst.operands[1].immisalign,
14775 _("can't use alignment with this instruction"));
14776 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
14777 _("bad list length"));
14778 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14779 inst.instruction |= 1 << 5;
14780 inst.instruction |= neon_logbits (et.size) << 6;
14781 break;
14782
14783 case 3: /* VLD4. */
14784 {
14785 int align = inst.operands[1].imm >> 8;
14786 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
14787 16, 64, 32, 64, 32, 128, -1);
14788 if (align_good == FAIL)
14789 return;
14790 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
14791 _("bad list length"));
14792 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
14793 inst.instruction |= 1 << 5;
14794 if (et.size == 32 && align == 128)
14795 inst.instruction |= 0x3 << 6;
14796 else
14797 inst.instruction |= neon_logbits (et.size) << 6;
14798 }
14799 break;
14800
14801 default: ;
14802 }
14803
14804 inst.instruction |= do_align << 4;
14805 }
14806
14807 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14808 apart from bits [11:4]. */
14809
14810 static void
14811 do_neon_ldx_stx (void)
14812 {
14813 switch (NEON_LANE (inst.operands[0].imm))
14814 {
14815 case NEON_INTERLEAVE_LANES:
14816 NEON_ENCODE (INTERLV, inst);
14817 do_neon_ld_st_interleave ();
14818 break;
14819
14820 case NEON_ALL_LANES:
14821 NEON_ENCODE (DUP, inst);
14822 do_neon_ld_dup ();
14823 break;
14824
14825 default:
14826 NEON_ENCODE (LANE, inst);
14827 do_neon_ld_st_lane ();
14828 }
14829
14830 /* L bit comes from bit mask. */
14831 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14832 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14833 inst.instruction |= inst.operands[1].reg << 16;
14834
14835 if (inst.operands[1].postind)
14836 {
14837 int postreg = inst.operands[1].imm & 0xf;
14838 constraint (!inst.operands[1].immisreg,
14839 _("post-index must be a register"));
14840 constraint (postreg == 0xd || postreg == 0xf,
14841 _("bad register for post-index"));
14842 inst.instruction |= postreg;
14843 }
14844 else if (inst.operands[1].writeback)
14845 {
14846 inst.instruction |= 0xd;
14847 }
14848 else
14849 inst.instruction |= 0xf;
14850
14851 if (thumb_mode)
14852 inst.instruction |= 0xf9000000;
14853 else
14854 inst.instruction |= 0xf4000000;
14855 }
14856 \f
14857 /* Overall per-instruction processing. */
14858
14859 /* We need to be able to fix up arbitrary expressions in some statements.
14860 This is so that we can handle symbols that are an arbitrary distance from
14861 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14862 which returns part of an address in a form which will be valid for
14863 a data instruction. We do this by pushing the expression into a symbol
14864 in the expr_section, and creating a fix for that. */
14865
14866 static void
14867 fix_new_arm (fragS * frag,
14868 int where,
14869 short int size,
14870 expressionS * exp,
14871 int pc_rel,
14872 int reloc)
14873 {
14874 fixS * new_fix;
14875
14876 switch (exp->X_op)
14877 {
14878 case O_constant:
14879 case O_symbol:
14880 case O_add:
14881 case O_subtract:
14882 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
14883 (enum bfd_reloc_code_real) reloc);
14884 break;
14885
14886 default:
14887 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
14888 pc_rel, (enum bfd_reloc_code_real) reloc);
14889 break;
14890 }
14891
14892 /* Mark whether the fix is to a THUMB instruction, or an ARM
14893 instruction. */
14894 new_fix->tc_fix_data = thumb_mode;
14895 }
14896
14897 /* Create a frg for an instruction requiring relaxation. */
14898 static void
14899 output_relax_insn (void)
14900 {
14901 char * to;
14902 symbolS *sym;
14903 int offset;
14904
14905 /* The size of the instruction is unknown, so tie the debug info to the
14906 start of the instruction. */
14907 dwarf2_emit_insn (0);
14908
14909 switch (inst.reloc.exp.X_op)
14910 {
14911 case O_symbol:
14912 sym = inst.reloc.exp.X_add_symbol;
14913 offset = inst.reloc.exp.X_add_number;
14914 break;
14915 case O_constant:
14916 sym = NULL;
14917 offset = inst.reloc.exp.X_add_number;
14918 break;
14919 default:
14920 sym = make_expr_symbol (&inst.reloc.exp);
14921 offset = 0;
14922 break;
14923 }
14924 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
14925 inst.relax, sym, offset, NULL/*offset, opcode*/);
14926 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
14927 }
14928
14929 /* Write a 32-bit thumb instruction to buf. */
14930 static void
14931 put_thumb32_insn (char * buf, unsigned long insn)
14932 {
14933 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
14934 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
14935 }
14936
14937 static void
14938 output_inst (const char * str)
14939 {
14940 char * to = NULL;
14941
14942 if (inst.error)
14943 {
14944 as_bad ("%s -- `%s'", inst.error, str);
14945 return;
14946 }
14947 if (inst.relax)
14948 {
14949 output_relax_insn ();
14950 return;
14951 }
14952 if (inst.size == 0)
14953 return;
14954
14955 to = frag_more (inst.size);
14956 /* PR 9814: Record the thumb mode into the current frag so that we know
14957 what type of NOP padding to use, if necessary. We override any previous
14958 setting so that if the mode has changed then the NOPS that we use will
14959 match the encoding of the last instruction in the frag. */
14960 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
14961
14962 if (thumb_mode && (inst.size > THUMB_SIZE))
14963 {
14964 gas_assert (inst.size == (2 * THUMB_SIZE));
14965 put_thumb32_insn (to, inst.instruction);
14966 }
14967 else if (inst.size > INSN_SIZE)
14968 {
14969 gas_assert (inst.size == (2 * INSN_SIZE));
14970 md_number_to_chars (to, inst.instruction, INSN_SIZE);
14971 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
14972 }
14973 else
14974 md_number_to_chars (to, inst.instruction, inst.size);
14975
14976 if (inst.reloc.type != BFD_RELOC_UNUSED)
14977 fix_new_arm (frag_now, to - frag_now->fr_literal,
14978 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
14979 inst.reloc.type);
14980
14981 dwarf2_emit_insn (inst.size);
14982 }
14983
14984 static char *
14985 output_it_inst (int cond, int mask, char * to)
14986 {
14987 unsigned long instruction = 0xbf00;
14988
14989 mask &= 0xf;
14990 instruction |= mask;
14991 instruction |= cond << 4;
14992
14993 if (to == NULL)
14994 {
14995 to = frag_more (2);
14996 #ifdef OBJ_ELF
14997 dwarf2_emit_insn (2);
14998 #endif
14999 }
15000
15001 md_number_to_chars (to, instruction, 2);
15002
15003 return to;
15004 }
15005
15006 /* Tag values used in struct asm_opcode's tag field. */
15007 enum opcode_tag
15008 {
15009 OT_unconditional, /* Instruction cannot be conditionalized.
15010 The ARM condition field is still 0xE. */
15011 OT_unconditionalF, /* Instruction cannot be conditionalized
15012 and carries 0xF in its ARM condition field. */
15013 OT_csuffix, /* Instruction takes a conditional suffix. */
15014 OT_csuffixF, /* Some forms of the instruction take a conditional
15015 suffix, others place 0xF where the condition field
15016 would be. */
15017 OT_cinfix3, /* Instruction takes a conditional infix,
15018 beginning at character index 3. (In
15019 unified mode, it becomes a suffix.) */
15020 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15021 tsts, cmps, cmns, and teqs. */
15022 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15023 character index 3, even in unified mode. Used for
15024 legacy instructions where suffix and infix forms
15025 may be ambiguous. */
15026 OT_csuf_or_in3, /* Instruction takes either a conditional
15027 suffix or an infix at character index 3. */
15028 OT_odd_infix_unc, /* This is the unconditional variant of an
15029 instruction that takes a conditional infix
15030 at an unusual position. In unified mode,
15031 this variant will accept a suffix. */
15032 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15033 are the conditional variants of instructions that
15034 take conditional infixes in unusual positions.
15035 The infix appears at character index
15036 (tag - OT_odd_infix_0). These are not accepted
15037 in unified mode. */
15038 };
15039
15040 /* Subroutine of md_assemble, responsible for looking up the primary
15041 opcode from the mnemonic the user wrote. STR points to the
15042 beginning of the mnemonic.
15043
15044 This is not simply a hash table lookup, because of conditional
15045 variants. Most instructions have conditional variants, which are
15046 expressed with a _conditional affix_ to the mnemonic. If we were
15047 to encode each conditional variant as a literal string in the opcode
15048 table, it would have approximately 20,000 entries.
15049
15050 Most mnemonics take this affix as a suffix, and in unified syntax,
15051 'most' is upgraded to 'all'. However, in the divided syntax, some
15052 instructions take the affix as an infix, notably the s-variants of
15053 the arithmetic instructions. Of those instructions, all but six
15054 have the infix appear after the third character of the mnemonic.
15055
15056 Accordingly, the algorithm for looking up primary opcodes given
15057 an identifier is:
15058
15059 1. Look up the identifier in the opcode table.
15060 If we find a match, go to step U.
15061
15062 2. Look up the last two characters of the identifier in the
15063 conditions table. If we find a match, look up the first N-2
15064 characters of the identifier in the opcode table. If we
15065 find a match, go to step CE.
15066
15067 3. Look up the fourth and fifth characters of the identifier in
15068 the conditions table. If we find a match, extract those
15069 characters from the identifier, and look up the remaining
15070 characters in the opcode table. If we find a match, go
15071 to step CM.
15072
15073 4. Fail.
15074
15075 U. Examine the tag field of the opcode structure, in case this is
15076 one of the six instructions with its conditional infix in an
15077 unusual place. If it is, the tag tells us where to find the
15078 infix; look it up in the conditions table and set inst.cond
15079 accordingly. Otherwise, this is an unconditional instruction.
15080 Again set inst.cond accordingly. Return the opcode structure.
15081
15082 CE. Examine the tag field to make sure this is an instruction that
15083 should receive a conditional suffix. If it is not, fail.
15084 Otherwise, set inst.cond from the suffix we already looked up,
15085 and return the opcode structure.
15086
15087 CM. Examine the tag field to make sure this is an instruction that
15088 should receive a conditional infix after the third character.
15089 If it is not, fail. Otherwise, undo the edits to the current
15090 line of input and proceed as for case CE. */
15091
15092 static const struct asm_opcode *
15093 opcode_lookup (char **str)
15094 {
15095 char *end, *base;
15096 char *affix;
15097 const struct asm_opcode *opcode;
15098 const struct asm_cond *cond;
15099 char save[2];
15100
15101 /* Scan up to the end of the mnemonic, which must end in white space,
15102 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15103 for (base = end = *str; *end != '\0'; end++)
15104 if (*end == ' ' || *end == '.')
15105 break;
15106
15107 if (end == base)
15108 return NULL;
15109
15110 /* Handle a possible width suffix and/or Neon type suffix. */
15111 if (end[0] == '.')
15112 {
15113 int offset = 2;
15114
15115 /* The .w and .n suffixes are only valid if the unified syntax is in
15116 use. */
15117 if (unified_syntax && end[1] == 'w')
15118 inst.size_req = 4;
15119 else if (unified_syntax && end[1] == 'n')
15120 inst.size_req = 2;
15121 else
15122 offset = 0;
15123
15124 inst.vectype.elems = 0;
15125
15126 *str = end + offset;
15127
15128 if (end[offset] == '.')
15129 {
15130 /* See if we have a Neon type suffix (possible in either unified or
15131 non-unified ARM syntax mode). */
15132 if (parse_neon_type (&inst.vectype, str) == FAIL)
15133 return NULL;
15134 }
15135 else if (end[offset] != '\0' && end[offset] != ' ')
15136 return NULL;
15137 }
15138 else
15139 *str = end;
15140
15141 /* Look for unaffixed or special-case affixed mnemonic. */
15142 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15143 end - base);
15144 if (opcode)
15145 {
15146 /* step U */
15147 if (opcode->tag < OT_odd_infix_0)
15148 {
15149 inst.cond = COND_ALWAYS;
15150 return opcode;
15151 }
15152
15153 if (warn_on_deprecated && unified_syntax)
15154 as_warn (_("conditional infixes are deprecated in unified syntax"));
15155 affix = base + (opcode->tag - OT_odd_infix_0);
15156 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15157 gas_assert (cond);
15158
15159 inst.cond = cond->value;
15160 return opcode;
15161 }
15162
15163 /* Cannot have a conditional suffix on a mnemonic of less than two
15164 characters. */
15165 if (end - base < 3)
15166 return NULL;
15167
15168 /* Look for suffixed mnemonic. */
15169 affix = end - 2;
15170 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15171 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15172 affix - base);
15173 if (opcode && cond)
15174 {
15175 /* step CE */
15176 switch (opcode->tag)
15177 {
15178 case OT_cinfix3_legacy:
15179 /* Ignore conditional suffixes matched on infix only mnemonics. */
15180 break;
15181
15182 case OT_cinfix3:
15183 case OT_cinfix3_deprecated:
15184 case OT_odd_infix_unc:
15185 if (!unified_syntax)
15186 return 0;
15187 /* else fall through */
15188
15189 case OT_csuffix:
15190 case OT_csuffixF:
15191 case OT_csuf_or_in3:
15192 inst.cond = cond->value;
15193 return opcode;
15194
15195 case OT_unconditional:
15196 case OT_unconditionalF:
15197 if (thumb_mode)
15198 inst.cond = cond->value;
15199 else
15200 {
15201 /* Delayed diagnostic. */
15202 inst.error = BAD_COND;
15203 inst.cond = COND_ALWAYS;
15204 }
15205 return opcode;
15206
15207 default:
15208 return NULL;
15209 }
15210 }
15211
15212 /* Cannot have a usual-position infix on a mnemonic of less than
15213 six characters (five would be a suffix). */
15214 if (end - base < 6)
15215 return NULL;
15216
15217 /* Look for infixed mnemonic in the usual position. */
15218 affix = base + 3;
15219 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
15220 if (!cond)
15221 return NULL;
15222
15223 memcpy (save, affix, 2);
15224 memmove (affix, affix + 2, (end - affix) - 2);
15225 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15226 (end - base) - 2);
15227 memmove (affix + 2, affix, (end - affix) - 2);
15228 memcpy (affix, save, 2);
15229
15230 if (opcode
15231 && (opcode->tag == OT_cinfix3
15232 || opcode->tag == OT_cinfix3_deprecated
15233 || opcode->tag == OT_csuf_or_in3
15234 || opcode->tag == OT_cinfix3_legacy))
15235 {
15236 /* Step CM. */
15237 if (warn_on_deprecated && unified_syntax
15238 && (opcode->tag == OT_cinfix3
15239 || opcode->tag == OT_cinfix3_deprecated))
15240 as_warn (_("conditional infixes are deprecated in unified syntax"));
15241
15242 inst.cond = cond->value;
15243 return opcode;
15244 }
15245
15246 return NULL;
15247 }
15248
15249 /* This function generates an initial IT instruction, leaving its block
15250 virtually open for the new instructions. Eventually,
15251 the mask will be updated by now_it_add_mask () each time
15252 a new instruction needs to be included in the IT block.
15253 Finally, the block is closed with close_automatic_it_block ().
15254 The block closure can be requested either from md_assemble (),
15255 a tencode (), or due to a label hook. */
15256
15257 static void
15258 new_automatic_it_block (int cond)
15259 {
15260 now_it.state = AUTOMATIC_IT_BLOCK;
15261 now_it.mask = 0x18;
15262 now_it.cc = cond;
15263 now_it.block_length = 1;
15264 mapping_state (MAP_THUMB);
15265 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
15266 }
15267
15268 /* Close an automatic IT block.
15269 See comments in new_automatic_it_block (). */
15270
15271 static void
15272 close_automatic_it_block (void)
15273 {
15274 now_it.mask = 0x10;
15275 now_it.block_length = 0;
15276 }
15277
15278 /* Update the mask of the current automatically-generated IT
15279 instruction. See comments in new_automatic_it_block (). */
15280
15281 static void
15282 now_it_add_mask (int cond)
15283 {
15284 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15285 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15286 | ((bitvalue) << (nbit)))
15287 const int resulting_bit = (cond & 1);
15288
15289 now_it.mask &= 0xf;
15290 now_it.mask = SET_BIT_VALUE (now_it.mask,
15291 resulting_bit,
15292 (5 - now_it.block_length));
15293 now_it.mask = SET_BIT_VALUE (now_it.mask,
15294 1,
15295 ((5 - now_it.block_length) - 1) );
15296 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
15297
15298 #undef CLEAR_BIT
15299 #undef SET_BIT_VALUE
15300 }
15301
15302 /* The IT blocks handling machinery is accessed through the these functions:
15303 it_fsm_pre_encode () from md_assemble ()
15304 set_it_insn_type () optional, from the tencode functions
15305 set_it_insn_type_last () ditto
15306 in_it_block () ditto
15307 it_fsm_post_encode () from md_assemble ()
15308 force_automatic_it_block_close () from label habdling functions
15309
15310 Rationale:
15311 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15312 initializing the IT insn type with a generic initial value depending
15313 on the inst.condition.
15314 2) During the tencode function, two things may happen:
15315 a) The tencode function overrides the IT insn type by
15316 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15317 b) The tencode function queries the IT block state by
15318 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15319
15320 Both set_it_insn_type and in_it_block run the internal FSM state
15321 handling function (handle_it_state), because: a) setting the IT insn
15322 type may incur in an invalid state (exiting the function),
15323 and b) querying the state requires the FSM to be updated.
15324 Specifically we want to avoid creating an IT block for conditional
15325 branches, so it_fsm_pre_encode is actually a guess and we can't
15326 determine whether an IT block is required until the tencode () routine
15327 has decided what type of instruction this actually it.
15328 Because of this, if set_it_insn_type and in_it_block have to be used,
15329 set_it_insn_type has to be called first.
15330
15331 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15332 determines the insn IT type depending on the inst.cond code.
15333 When a tencode () routine encodes an instruction that can be
15334 either outside an IT block, or, in the case of being inside, has to be
15335 the last one, set_it_insn_type_last () will determine the proper
15336 IT instruction type based on the inst.cond code. Otherwise,
15337 set_it_insn_type can be called for overriding that logic or
15338 for covering other cases.
15339
15340 Calling handle_it_state () may not transition the IT block state to
15341 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15342 still queried. Instead, if the FSM determines that the state should
15343 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15344 after the tencode () function: that's what it_fsm_post_encode () does.
15345
15346 Since in_it_block () calls the state handling function to get an
15347 updated state, an error may occur (due to invalid insns combination).
15348 In that case, inst.error is set.
15349 Therefore, inst.error has to be checked after the execution of
15350 the tencode () routine.
15351
15352 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15353 any pending state change (if any) that didn't take place in
15354 handle_it_state () as explained above. */
15355
15356 static void
15357 it_fsm_pre_encode (void)
15358 {
15359 if (inst.cond != COND_ALWAYS)
15360 inst.it_insn_type = INSIDE_IT_INSN;
15361 else
15362 inst.it_insn_type = OUTSIDE_IT_INSN;
15363
15364 now_it.state_handled = 0;
15365 }
15366
15367 /* IT state FSM handling function. */
15368
15369 static int
15370 handle_it_state (void)
15371 {
15372 now_it.state_handled = 1;
15373
15374 switch (now_it.state)
15375 {
15376 case OUTSIDE_IT_BLOCK:
15377 switch (inst.it_insn_type)
15378 {
15379 case OUTSIDE_IT_INSN:
15380 break;
15381
15382 case INSIDE_IT_INSN:
15383 case INSIDE_IT_LAST_INSN:
15384 if (thumb_mode == 0)
15385 {
15386 if (unified_syntax
15387 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
15388 as_tsktsk (_("Warning: conditional outside an IT block"\
15389 " for Thumb."));
15390 }
15391 else
15392 {
15393 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
15394 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
15395 {
15396 /* Automatically generate the IT instruction. */
15397 new_automatic_it_block (inst.cond);
15398 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
15399 close_automatic_it_block ();
15400 }
15401 else
15402 {
15403 inst.error = BAD_OUT_IT;
15404 return FAIL;
15405 }
15406 }
15407 break;
15408
15409 case IF_INSIDE_IT_LAST_INSN:
15410 case NEUTRAL_IT_INSN:
15411 break;
15412
15413 case IT_INSN:
15414 now_it.state = MANUAL_IT_BLOCK;
15415 now_it.block_length = 0;
15416 break;
15417 }
15418 break;
15419
15420 case AUTOMATIC_IT_BLOCK:
15421 /* Three things may happen now:
15422 a) We should increment current it block size;
15423 b) We should close current it block (closing insn or 4 insns);
15424 c) We should close current it block and start a new one (due
15425 to incompatible conditions or
15426 4 insns-length block reached). */
15427
15428 switch (inst.it_insn_type)
15429 {
15430 case OUTSIDE_IT_INSN:
15431 /* The closure of the block shall happen immediatelly,
15432 so any in_it_block () call reports the block as closed. */
15433 force_automatic_it_block_close ();
15434 break;
15435
15436 case INSIDE_IT_INSN:
15437 case INSIDE_IT_LAST_INSN:
15438 case IF_INSIDE_IT_LAST_INSN:
15439 now_it.block_length++;
15440
15441 if (now_it.block_length > 4
15442 || !now_it_compatible (inst.cond))
15443 {
15444 force_automatic_it_block_close ();
15445 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
15446 new_automatic_it_block (inst.cond);
15447 }
15448 else
15449 {
15450 now_it_add_mask (inst.cond);
15451 }
15452
15453 if (now_it.state == AUTOMATIC_IT_BLOCK
15454 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
15455 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
15456 close_automatic_it_block ();
15457 break;
15458
15459 case NEUTRAL_IT_INSN:
15460 now_it.block_length++;
15461
15462 if (now_it.block_length > 4)
15463 force_automatic_it_block_close ();
15464 else
15465 now_it_add_mask (now_it.cc & 1);
15466 break;
15467
15468 case IT_INSN:
15469 close_automatic_it_block ();
15470 now_it.state = MANUAL_IT_BLOCK;
15471 break;
15472 }
15473 break;
15474
15475 case MANUAL_IT_BLOCK:
15476 {
15477 /* Check conditional suffixes. */
15478 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
15479 int is_last;
15480 now_it.mask <<= 1;
15481 now_it.mask &= 0x1f;
15482 is_last = (now_it.mask == 0x10);
15483
15484 switch (inst.it_insn_type)
15485 {
15486 case OUTSIDE_IT_INSN:
15487 inst.error = BAD_NOT_IT;
15488 return FAIL;
15489
15490 case INSIDE_IT_INSN:
15491 if (cond != inst.cond)
15492 {
15493 inst.error = BAD_IT_COND;
15494 return FAIL;
15495 }
15496 break;
15497
15498 case INSIDE_IT_LAST_INSN:
15499 case IF_INSIDE_IT_LAST_INSN:
15500 if (cond != inst.cond)
15501 {
15502 inst.error = BAD_IT_COND;
15503 return FAIL;
15504 }
15505 if (!is_last)
15506 {
15507 inst.error = BAD_BRANCH;
15508 return FAIL;
15509 }
15510 break;
15511
15512 case NEUTRAL_IT_INSN:
15513 /* The BKPT instruction is unconditional even in an IT block. */
15514 break;
15515
15516 case IT_INSN:
15517 inst.error = BAD_IT_IT;
15518 return FAIL;
15519 }
15520 }
15521 break;
15522 }
15523
15524 return SUCCESS;
15525 }
15526
15527 static void
15528 it_fsm_post_encode (void)
15529 {
15530 int is_last;
15531
15532 if (!now_it.state_handled)
15533 handle_it_state ();
15534
15535 is_last = (now_it.mask == 0x10);
15536 if (is_last)
15537 {
15538 now_it.state = OUTSIDE_IT_BLOCK;
15539 now_it.mask = 0;
15540 }
15541 }
15542
15543 static void
15544 force_automatic_it_block_close (void)
15545 {
15546 if (now_it.state == AUTOMATIC_IT_BLOCK)
15547 {
15548 close_automatic_it_block ();
15549 now_it.state = OUTSIDE_IT_BLOCK;
15550 now_it.mask = 0;
15551 }
15552 }
15553
15554 static int
15555 in_it_block (void)
15556 {
15557 if (!now_it.state_handled)
15558 handle_it_state ();
15559
15560 return now_it.state != OUTSIDE_IT_BLOCK;
15561 }
15562
15563 void
15564 md_assemble (char *str)
15565 {
15566 char *p = str;
15567 const struct asm_opcode * opcode;
15568
15569 /* Align the previous label if needed. */
15570 if (last_label_seen != NULL)
15571 {
15572 symbol_set_frag (last_label_seen, frag_now);
15573 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
15574 S_SET_SEGMENT (last_label_seen, now_seg);
15575 }
15576
15577 memset (&inst, '\0', sizeof (inst));
15578 inst.reloc.type = BFD_RELOC_UNUSED;
15579
15580 opcode = opcode_lookup (&p);
15581 if (!opcode)
15582 {
15583 /* It wasn't an instruction, but it might be a register alias of
15584 the form alias .req reg, or a Neon .dn/.qn directive. */
15585 if (! create_register_alias (str, p)
15586 && ! create_neon_reg_alias (str, p))
15587 as_bad (_("bad instruction `%s'"), str);
15588
15589 return;
15590 }
15591
15592 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
15593 as_warn (_("s suffix on comparison instruction is deprecated"));
15594
15595 /* The value which unconditional instructions should have in place of the
15596 condition field. */
15597 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
15598
15599 if (thumb_mode)
15600 {
15601 arm_feature_set variant;
15602
15603 variant = cpu_variant;
15604 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15605 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
15606 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
15607 /* Check that this instruction is supported for this CPU. */
15608 if (!opcode->tvariant
15609 || (thumb_mode == 1
15610 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
15611 {
15612 as_bad (_("selected processor does not support `%s'"), str);
15613 return;
15614 }
15615 if (inst.cond != COND_ALWAYS && !unified_syntax
15616 && opcode->tencode != do_t_branch)
15617 {
15618 as_bad (_("Thumb does not support conditional execution"));
15619 return;
15620 }
15621
15622 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
15623 {
15624 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
15625 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
15626 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
15627 {
15628 /* Two things are addressed here.
15629 1) Implicit require narrow instructions on Thumb-1.
15630 This avoids relaxation accidentally introducing Thumb-2
15631 instructions.
15632 2) Reject wide instructions in non Thumb-2 cores. */
15633 if (inst.size_req == 0)
15634 inst.size_req = 2;
15635 else if (inst.size_req == 4)
15636 {
15637 as_bad (_("selected processor does not support `%s'"), str);
15638 return;
15639 }
15640 }
15641 }
15642
15643 inst.instruction = opcode->tvalue;
15644
15645 if (!parse_operands (p, opcode->operands))
15646 {
15647 /* Prepare the it_insn_type for those encodings that don't set
15648 it. */
15649 it_fsm_pre_encode ();
15650
15651 opcode->tencode ();
15652
15653 it_fsm_post_encode ();
15654 }
15655
15656 if (!(inst.error || inst.relax))
15657 {
15658 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
15659 inst.size = (inst.instruction > 0xffff ? 4 : 2);
15660 if (inst.size_req && inst.size_req != inst.size)
15661 {
15662 as_bad (_("cannot honor width suffix -- `%s'"), str);
15663 return;
15664 }
15665 }
15666
15667 /* Something has gone badly wrong if we try to relax a fixed size
15668 instruction. */
15669 gas_assert (inst.size_req == 0 || !inst.relax);
15670
15671 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15672 *opcode->tvariant);
15673 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15674 set those bits when Thumb-2 32-bit instructions are seen. ie.
15675 anything other than bl/blx and v6-M instructions.
15676 This is overly pessimistic for relaxable instructions. */
15677 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
15678 || inst.relax)
15679 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
15680 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
15681 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
15682 arm_ext_v6t2);
15683
15684 check_neon_suffixes;
15685
15686 if (!inst.error)
15687 {
15688 mapping_state (MAP_THUMB);
15689 }
15690 }
15691 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
15692 {
15693 bfd_boolean is_bx;
15694
15695 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15696 is_bx = (opcode->aencode == do_bx);
15697
15698 /* Check that this instruction is supported for this CPU. */
15699 if (!(is_bx && fix_v4bx)
15700 && !(opcode->avariant &&
15701 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
15702 {
15703 as_bad (_("selected processor does not support `%s'"), str);
15704 return;
15705 }
15706 if (inst.size_req)
15707 {
15708 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
15709 return;
15710 }
15711
15712 inst.instruction = opcode->avalue;
15713 if (opcode->tag == OT_unconditionalF)
15714 inst.instruction |= 0xF << 28;
15715 else
15716 inst.instruction |= inst.cond << 28;
15717 inst.size = INSN_SIZE;
15718 if (!parse_operands (p, opcode->operands))
15719 {
15720 it_fsm_pre_encode ();
15721 opcode->aencode ();
15722 it_fsm_post_encode ();
15723 }
15724 /* Arm mode bx is marked as both v4T and v5 because it's still required
15725 on a hypothetical non-thumb v5 core. */
15726 if (is_bx)
15727 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
15728 else
15729 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
15730 *opcode->avariant);
15731
15732 check_neon_suffixes;
15733
15734 if (!inst.error)
15735 {
15736 mapping_state (MAP_ARM);
15737 }
15738 }
15739 else
15740 {
15741 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15742 "-- `%s'"), str);
15743 return;
15744 }
15745 output_inst (str);
15746 }
15747
15748 static void
15749 check_it_blocks_finished (void)
15750 {
15751 #ifdef OBJ_ELF
15752 asection *sect;
15753
15754 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
15755 if (seg_info (sect)->tc_segment_info_data.current_it.state
15756 == MANUAL_IT_BLOCK)
15757 {
15758 as_warn (_("section '%s' finished with an open IT block."),
15759 sect->name);
15760 }
15761 #else
15762 if (now_it.state == MANUAL_IT_BLOCK)
15763 as_warn (_("file finished with an open IT block."));
15764 #endif
15765 }
15766
15767 /* Various frobbings of labels and their addresses. */
15768
15769 void
15770 arm_start_line_hook (void)
15771 {
15772 last_label_seen = NULL;
15773 }
15774
15775 void
15776 arm_frob_label (symbolS * sym)
15777 {
15778 last_label_seen = sym;
15779
15780 ARM_SET_THUMB (sym, thumb_mode);
15781
15782 #if defined OBJ_COFF || defined OBJ_ELF
15783 ARM_SET_INTERWORK (sym, support_interwork);
15784 #endif
15785
15786 force_automatic_it_block_close ();
15787
15788 /* Note - do not allow local symbols (.Lxxx) to be labelled
15789 as Thumb functions. This is because these labels, whilst
15790 they exist inside Thumb code, are not the entry points for
15791 possible ARM->Thumb calls. Also, these labels can be used
15792 as part of a computed goto or switch statement. eg gcc
15793 can generate code that looks like this:
15794
15795 ldr r2, [pc, .Laaa]
15796 lsl r3, r3, #2
15797 ldr r2, [r3, r2]
15798 mov pc, r2
15799
15800 .Lbbb: .word .Lxxx
15801 .Lccc: .word .Lyyy
15802 ..etc...
15803 .Laaa: .word Lbbb
15804
15805 The first instruction loads the address of the jump table.
15806 The second instruction converts a table index into a byte offset.
15807 The third instruction gets the jump address out of the table.
15808 The fourth instruction performs the jump.
15809
15810 If the address stored at .Laaa is that of a symbol which has the
15811 Thumb_Func bit set, then the linker will arrange for this address
15812 to have the bottom bit set, which in turn would mean that the
15813 address computation performed by the third instruction would end
15814 up with the bottom bit set. Since the ARM is capable of unaligned
15815 word loads, the instruction would then load the incorrect address
15816 out of the jump table, and chaos would ensue. */
15817 if (label_is_thumb_function_name
15818 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
15819 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
15820 {
15821 /* When the address of a Thumb function is taken the bottom
15822 bit of that address should be set. This will allow
15823 interworking between Arm and Thumb functions to work
15824 correctly. */
15825
15826 THUMB_SET_FUNC (sym, 1);
15827
15828 label_is_thumb_function_name = FALSE;
15829 }
15830
15831 dwarf2_emit_label (sym);
15832 }
15833
15834 bfd_boolean
15835 arm_data_in_code (void)
15836 {
15837 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
15838 {
15839 *input_line_pointer = '/';
15840 input_line_pointer += 5;
15841 *input_line_pointer = 0;
15842 return TRUE;
15843 }
15844
15845 return FALSE;
15846 }
15847
15848 char *
15849 arm_canonicalize_symbol_name (char * name)
15850 {
15851 int len;
15852
15853 if (thumb_mode && (len = strlen (name)) > 5
15854 && streq (name + len - 5, "/data"))
15855 *(name + len - 5) = 0;
15856
15857 return name;
15858 }
15859 \f
15860 /* Table of all register names defined by default. The user can
15861 define additional names with .req. Note that all register names
15862 should appear in both upper and lowercase variants. Some registers
15863 also have mixed-case names. */
15864
15865 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15866 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15867 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15868 #define REGSET(p,t) \
15869 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15870 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15871 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15872 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15873 #define REGSETH(p,t) \
15874 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15875 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15876 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15877 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15878 #define REGSET2(p,t) \
15879 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15880 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15881 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15882 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15883
15884 static const struct reg_entry reg_names[] =
15885 {
15886 /* ARM integer registers. */
15887 REGSET(r, RN), REGSET(R, RN),
15888
15889 /* ATPCS synonyms. */
15890 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
15891 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
15892 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
15893
15894 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
15895 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
15896 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
15897
15898 /* Well-known aliases. */
15899 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
15900 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
15901
15902 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
15903 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
15904
15905 /* Coprocessor numbers. */
15906 REGSET(p, CP), REGSET(P, CP),
15907
15908 /* Coprocessor register numbers. The "cr" variants are for backward
15909 compatibility. */
15910 REGSET(c, CN), REGSET(C, CN),
15911 REGSET(cr, CN), REGSET(CR, CN),
15912
15913 /* FPA registers. */
15914 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
15915 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
15916
15917 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
15918 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
15919
15920 /* VFP SP registers. */
15921 REGSET(s,VFS), REGSET(S,VFS),
15922 REGSETH(s,VFS), REGSETH(S,VFS),
15923
15924 /* VFP DP Registers. */
15925 REGSET(d,VFD), REGSET(D,VFD),
15926 /* Extra Neon DP registers. */
15927 REGSETH(d,VFD), REGSETH(D,VFD),
15928
15929 /* Neon QP registers. */
15930 REGSET2(q,NQ), REGSET2(Q,NQ),
15931
15932 /* VFP control registers. */
15933 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
15934 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
15935 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
15936 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
15937 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
15938 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
15939
15940 /* Maverick DSP coprocessor registers. */
15941 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
15942 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
15943
15944 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
15945 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
15946 REGDEF(dspsc,0,DSPSC),
15947
15948 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
15949 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
15950 REGDEF(DSPSC,0,DSPSC),
15951
15952 /* iWMMXt data registers - p0, c0-15. */
15953 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
15954
15955 /* iWMMXt control registers - p1, c0-3. */
15956 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
15957 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
15958 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
15959 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
15960
15961 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15962 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
15963 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
15964 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
15965 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
15966
15967 /* XScale accumulator registers. */
15968 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
15969 };
15970 #undef REGDEF
15971 #undef REGNUM
15972 #undef REGSET
15973
15974 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15975 within psr_required_here. */
15976 static const struct asm_psr psrs[] =
15977 {
15978 /* Backward compatibility notation. Note that "all" is no longer
15979 truly all possible PSR bits. */
15980 {"all", PSR_c | PSR_f},
15981 {"flg", PSR_f},
15982 {"ctl", PSR_c},
15983
15984 /* Individual flags. */
15985 {"f", PSR_f},
15986 {"c", PSR_c},
15987 {"x", PSR_x},
15988 {"s", PSR_s},
15989 /* Combinations of flags. */
15990 {"fs", PSR_f | PSR_s},
15991 {"fx", PSR_f | PSR_x},
15992 {"fc", PSR_f | PSR_c},
15993 {"sf", PSR_s | PSR_f},
15994 {"sx", PSR_s | PSR_x},
15995 {"sc", PSR_s | PSR_c},
15996 {"xf", PSR_x | PSR_f},
15997 {"xs", PSR_x | PSR_s},
15998 {"xc", PSR_x | PSR_c},
15999 {"cf", PSR_c | PSR_f},
16000 {"cs", PSR_c | PSR_s},
16001 {"cx", PSR_c | PSR_x},
16002 {"fsx", PSR_f | PSR_s | PSR_x},
16003 {"fsc", PSR_f | PSR_s | PSR_c},
16004 {"fxs", PSR_f | PSR_x | PSR_s},
16005 {"fxc", PSR_f | PSR_x | PSR_c},
16006 {"fcs", PSR_f | PSR_c | PSR_s},
16007 {"fcx", PSR_f | PSR_c | PSR_x},
16008 {"sfx", PSR_s | PSR_f | PSR_x},
16009 {"sfc", PSR_s | PSR_f | PSR_c},
16010 {"sxf", PSR_s | PSR_x | PSR_f},
16011 {"sxc", PSR_s | PSR_x | PSR_c},
16012 {"scf", PSR_s | PSR_c | PSR_f},
16013 {"scx", PSR_s | PSR_c | PSR_x},
16014 {"xfs", PSR_x | PSR_f | PSR_s},
16015 {"xfc", PSR_x | PSR_f | PSR_c},
16016 {"xsf", PSR_x | PSR_s | PSR_f},
16017 {"xsc", PSR_x | PSR_s | PSR_c},
16018 {"xcf", PSR_x | PSR_c | PSR_f},
16019 {"xcs", PSR_x | PSR_c | PSR_s},
16020 {"cfs", PSR_c | PSR_f | PSR_s},
16021 {"cfx", PSR_c | PSR_f | PSR_x},
16022 {"csf", PSR_c | PSR_s | PSR_f},
16023 {"csx", PSR_c | PSR_s | PSR_x},
16024 {"cxf", PSR_c | PSR_x | PSR_f},
16025 {"cxs", PSR_c | PSR_x | PSR_s},
16026 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16027 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16028 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16029 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16030 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16031 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16032 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16033 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16034 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16035 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16036 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16037 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16038 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16039 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16040 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16041 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16042 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16043 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16044 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16045 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16046 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16047 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16048 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16049 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16050 };
16051
16052 /* Table of V7M psr names. */
16053 static const struct asm_psr v7m_psrs[] =
16054 {
16055 {"apsr", 0 }, {"APSR", 0 },
16056 {"iapsr", 1 }, {"IAPSR", 1 },
16057 {"eapsr", 2 }, {"EAPSR", 2 },
16058 {"psr", 3 }, {"PSR", 3 },
16059 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16060 {"ipsr", 5 }, {"IPSR", 5 },
16061 {"epsr", 6 }, {"EPSR", 6 },
16062 {"iepsr", 7 }, {"IEPSR", 7 },
16063 {"msp", 8 }, {"MSP", 8 },
16064 {"psp", 9 }, {"PSP", 9 },
16065 {"primask", 16}, {"PRIMASK", 16},
16066 {"basepri", 17}, {"BASEPRI", 17},
16067 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16068 {"faultmask", 19}, {"FAULTMASK", 19},
16069 {"control", 20}, {"CONTROL", 20}
16070 };
16071
16072 /* Table of all shift-in-operand names. */
16073 static const struct asm_shift_name shift_names [] =
16074 {
16075 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
16076 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
16077 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
16078 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
16079 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
16080 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
16081 };
16082
16083 /* Table of all explicit relocation names. */
16084 #ifdef OBJ_ELF
16085 static struct reloc_entry reloc_names[] =
16086 {
16087 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
16088 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
16089 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
16090 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16091 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16092 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
16093 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
16094 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
16095 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
16096 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16097 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
16098 };
16099 #endif
16100
16101 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16102 static const struct asm_cond conds[] =
16103 {
16104 {"eq", 0x0},
16105 {"ne", 0x1},
16106 {"cs", 0x2}, {"hs", 0x2},
16107 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16108 {"mi", 0x4},
16109 {"pl", 0x5},
16110 {"vs", 0x6},
16111 {"vc", 0x7},
16112 {"hi", 0x8},
16113 {"ls", 0x9},
16114 {"ge", 0xa},
16115 {"lt", 0xb},
16116 {"gt", 0xc},
16117 {"le", 0xd},
16118 {"al", 0xe}
16119 };
16120
16121 static struct asm_barrier_opt barrier_opt_names[] =
16122 {
16123 { "sy", 0xf },
16124 { "un", 0x7 },
16125 { "st", 0xe },
16126 { "unst", 0x6 }
16127 };
16128
16129 /* Table of ARM-format instructions. */
16130
16131 /* Macros for gluing together operand strings. N.B. In all cases
16132 other than OPS0, the trailing OP_stop comes from default
16133 zero-initialization of the unspecified elements of the array. */
16134 #define OPS0() { OP_stop, }
16135 #define OPS1(a) { OP_##a, }
16136 #define OPS2(a,b) { OP_##a,OP_##b, }
16137 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16138 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16139 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16140 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16141
16142 /* These macros abstract out the exact format of the mnemonic table and
16143 save some repeated characters. */
16144
16145 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16146 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16147 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16148 THUMB_VARIANT, do_##ae, do_##te }
16149
16150 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16151 a T_MNEM_xyz enumerator. */
16152 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16153 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16154 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16155 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16156
16157 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16158 infix after the third character. */
16159 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16160 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16161 THUMB_VARIANT, do_##ae, do_##te }
16162 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16163 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16164 THUMB_VARIANT, do_##ae, do_##te }
16165 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16166 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16167 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16168 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16169 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16170 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16171 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16172 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16173
16174 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16175 appear in the condition table. */
16176 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16177 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16178 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16179
16180 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16181 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16182 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16183 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16184 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16185 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16186 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16187 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16188 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16189 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16190 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16191 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16192 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16193 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16194 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16195 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16196 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16197 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16198 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16199 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16200
16201 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16202 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16203 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16204 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16205
16206 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16207 field is still 0xE. Many of the Thumb variants can be executed
16208 conditionally, so this is checked separately. */
16209 #define TUE(mnem, op, top, nops, ops, ae, te) \
16210 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16211 THUMB_VARIANT, do_##ae, do_##te }
16212
16213 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16214 condition code field. */
16215 #define TUF(mnem, op, top, nops, ops, ae, te) \
16216 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16217 THUMB_VARIANT, do_##ae, do_##te }
16218
16219 /* ARM-only variants of all the above. */
16220 #define CE(mnem, op, nops, ops, ae) \
16221 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16222
16223 #define C3(mnem, op, nops, ops, ae) \
16224 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16225
16226 /* Legacy mnemonics that always have conditional infix after the third
16227 character. */
16228 #define CL(mnem, op, nops, ops, ae) \
16229 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16230 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16231
16232 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16233 #define cCE(mnem, op, nops, ops, ae) \
16234 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16235
16236 /* Legacy coprocessor instructions where conditional infix and conditional
16237 suffix are ambiguous. For consistency this includes all FPA instructions,
16238 not just the potentially ambiguous ones. */
16239 #define cCL(mnem, op, nops, ops, ae) \
16240 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16241 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16242
16243 /* Coprocessor, takes either a suffix or a position-3 infix
16244 (for an FPA corner case). */
16245 #define C3E(mnem, op, nops, ops, ae) \
16246 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16247 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16248
16249 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16250 { m1 #m2 m3, OPS##nops ops, \
16251 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16252 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16253
16254 #define CM(m1, m2, op, nops, ops, ae) \
16255 xCM_ (m1, , m2, op, nops, ops, ae), \
16256 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16257 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16258 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16259 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16260 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16261 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16262 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16263 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16264 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16265 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16266 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16267 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16268 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16269 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16270 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16271 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16272 xCM_ (m1, le, m2, op, nops, ops, ae), \
16273 xCM_ (m1, al, m2, op, nops, ops, ae)
16274
16275 #define UE(mnem, op, nops, ops, ae) \
16276 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16277
16278 #define UF(mnem, op, nops, ops, ae) \
16279 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16280
16281 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16282 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16283 use the same encoding function for each. */
16284 #define NUF(mnem, op, nops, ops, enc) \
16285 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16286 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16287
16288 /* Neon data processing, version which indirects through neon_enc_tab for
16289 the various overloaded versions of opcodes. */
16290 #define nUF(mnem, op, nops, ops, enc) \
16291 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16292 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16293
16294 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16295 version. */
16296 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16297 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16298 THUMB_VARIANT, do_##enc, do_##enc }
16299
16300 #define NCE(mnem, op, nops, ops, enc) \
16301 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16302
16303 #define NCEF(mnem, op, nops, ops, enc) \
16304 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16305
16306 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16307 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16308 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16309 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16310
16311 #define nCE(mnem, op, nops, ops, enc) \
16312 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16313
16314 #define nCEF(mnem, op, nops, ops, enc) \
16315 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16316
16317 #define do_0 0
16318
16319 /* Thumb-only, unconditional. */
16320 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16321
16322 static const struct asm_opcode insns[] =
16323 {
16324 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16325 #define THUMB_VARIANT &arm_ext_v4t
16326 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
16327 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
16328 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
16329 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
16330 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
16331 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
16332 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
16333 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
16334 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
16335 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
16336 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
16337 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
16338 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
16339 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
16340 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
16341 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
16342
16343 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16344 for setting PSR flag bits. They are obsolete in V6 and do not
16345 have Thumb equivalents. */
16346 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16347 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
16348 CL("tstp", 110f000, 2, (RR, SH), cmp),
16349 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16350 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
16351 CL("cmpp", 150f000, 2, (RR, SH), cmp),
16352 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16353 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
16354 CL("cmnp", 170f000, 2, (RR, SH), cmp),
16355
16356 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
16357 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
16358 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
16359 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
16360
16361 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
16362 tC3("ldrb", 4500000, _ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16363 tCE("str", 4000000, _str, 2, (RR, ADDRGLDR),ldst, t_ldst),
16364 tC3("strb", 4400000, _strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
16365
16366 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16367 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16368 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16369 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16370 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16371 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16372
16373 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
16374 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
16375 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
16376 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
16377
16378 /* Pseudo ops. */
16379 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
16380 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
16381 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
16382
16383 /* Thumb-compatibility pseudo ops. */
16384 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
16385 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
16386 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
16387 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
16388 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
16389 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
16390 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
16391 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
16392 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
16393 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
16394 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
16395 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
16396
16397 /* These may simplify to neg. */
16398 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
16399 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
16400
16401 #undef THUMB_VARIANT
16402 #define THUMB_VARIANT & arm_ext_v6
16403
16404 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
16405
16406 /* V1 instructions with no Thumb analogue prior to V6T2. */
16407 #undef THUMB_VARIANT
16408 #define THUMB_VARIANT & arm_ext_v6t2
16409
16410 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16411 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
16412 CL("teqp", 130f000, 2, (RR, SH), cmp),
16413
16414 TC3("ldrt", 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
16415 TC3("ldrbt", 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
16416 TC3("strt", 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
16417 TC3("strbt", 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
16418
16419 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16420 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16421
16422 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16423 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
16424
16425 /* V1 instructions with no Thumb analogue at all. */
16426 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
16427 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
16428
16429 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
16430 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
16431 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
16432 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
16433 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
16434 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
16435 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
16436 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
16437
16438 #undef ARM_VARIANT
16439 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16440 #undef THUMB_VARIANT
16441 #define THUMB_VARIANT & arm_ext_v4t
16442
16443 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16444 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
16445
16446 #undef THUMB_VARIANT
16447 #define THUMB_VARIANT & arm_ext_v6t2
16448
16449 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16450 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
16451
16452 /* Generic coprocessor instructions. */
16453 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16454 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16455 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16456 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16457 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16458 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16459 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16460
16461 #undef ARM_VARIANT
16462 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16463
16464 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16465 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
16466
16467 #undef ARM_VARIANT
16468 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16469 #undef THUMB_VARIANT
16470 #define THUMB_VARIANT & arm_ext_msr
16471
16472 TCE("mrs", 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
16473 TCE("msr", 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
16474
16475 #undef ARM_VARIANT
16476 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16477 #undef THUMB_VARIANT
16478 #define THUMB_VARIANT & arm_ext_v6t2
16479
16480 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16481 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16482 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16483 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16484 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16485 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16486 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
16487 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
16488
16489 #undef ARM_VARIANT
16490 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16491 #undef THUMB_VARIANT
16492 #define THUMB_VARIANT & arm_ext_v4t
16493
16494 tC3("ldrh", 01000b0, _ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16495 tC3("strh", 00000b0, _strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16496 tC3("ldrsh", 01000f0, _ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16497 tC3("ldrsb", 01000d0, _ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16498 tCM("ld","sh", 01000f0, _ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16499 tCM("ld","sb", 01000d0, _ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
16500
16501 #undef ARM_VARIANT
16502 #define ARM_VARIANT & arm_ext_v4t_5
16503
16504 /* ARM Architecture 4T. */
16505 /* Note: bx (and blx) are required on V5, even if the processor does
16506 not support Thumb. */
16507 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
16508
16509 #undef ARM_VARIANT
16510 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16511 #undef THUMB_VARIANT
16512 #define THUMB_VARIANT & arm_ext_v5t
16513
16514 /* Note: blx has 2 variants; the .value coded here is for
16515 BLX(2). Only this variant has conditional execution. */
16516 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
16517 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
16518
16519 #undef THUMB_VARIANT
16520 #define THUMB_VARIANT & arm_ext_v6t2
16521
16522 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
16523 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16524 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16525 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16526 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
16527 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
16528 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16529 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
16530
16531 #undef ARM_VARIANT
16532 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16533 #undef THUMB_VARIANT
16534 #define THUMB_VARIANT &arm_ext_v5exp
16535
16536 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16537 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16538 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16539 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16540
16541 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16542 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
16543
16544 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16545 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16546 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16547 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
16548
16549 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16550 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16551 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16552 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16553
16554 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16555 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16556
16557 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16558 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16559 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16560 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
16561
16562 #undef ARM_VARIANT
16563 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16564 #undef THUMB_VARIANT
16565 #define THUMB_VARIANT &arm_ext_v6t2
16566
16567 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
16568 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16569 TC3("strd", 00000f0, e8400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
16570
16571 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16572 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16573
16574 #undef ARM_VARIANT
16575 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16576
16577 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
16578
16579 #undef ARM_VARIANT
16580 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16581 #undef THUMB_VARIANT
16582 #define THUMB_VARIANT & arm_ext_v6
16583
16584 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
16585 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
16586 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16587 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16588 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
16589 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16590 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16591 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16592 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16593 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
16594
16595 #undef THUMB_VARIANT
16596 #define THUMB_VARIANT & arm_ext_v6t2
16597
16598 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
16599 TCE("strex", 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
16600 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16601 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
16602
16603 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
16604 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
16605
16606 /* ARM V6 not included in V7M. */
16607 #undef THUMB_VARIANT
16608 #define THUMB_VARIANT & arm_ext_v6_notm
16609 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16610 UF(rfeib, 9900a00, 1, (RRw), rfe),
16611 UF(rfeda, 8100a00, 1, (RRw), rfe),
16612 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16613 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
16614 UF(rfefa, 9900a00, 1, (RRw), rfe),
16615 UF(rfeea, 8100a00, 1, (RRw), rfe),
16616 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
16617 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
16618 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
16619 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
16620 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
16621
16622 /* ARM V6 not included in V7M (eg. integer SIMD). */
16623 #undef THUMB_VARIANT
16624 #define THUMB_VARIANT & arm_ext_v6_dsp
16625 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
16626 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
16627 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
16628 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16629 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16630 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16631 /* Old name for QASX. */
16632 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16633 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16634 /* Old name for QSAX. */
16635 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16636 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16637 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16638 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16639 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16640 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16641 /* Old name for SASX. */
16642 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16643 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16644 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16645 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16646 /* Old name for SHASX. */
16647 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16648 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16649 /* Old name for SHSAX. */
16650 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16651 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16652 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16653 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16654 /* Old name for SSAX. */
16655 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16656 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16657 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16658 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16659 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16660 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16661 /* Old name for UASX. */
16662 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16663 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16664 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16665 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16666 /* Old name for UHASX. */
16667 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16668 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16669 /* Old name for UHSAX. */
16670 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16671 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16672 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16673 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16674 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16675 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16676 /* Old name for UQASX. */
16677 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16678 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16679 /* Old name for UQSAX. */
16680 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16681 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16682 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16683 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16684 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16685 /* Old name for USAX. */
16686 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16687 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16688 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16689 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16690 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16691 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16692 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16693 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16694 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
16695 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
16696 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
16697 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16698 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16699 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16700 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16701 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16702 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16703 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16704 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
16705 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16706 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16707 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16708 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16709 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16710 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16711 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16712 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16713 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16714 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16715 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
16716 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
16717 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
16718 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
16719 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
16720
16721 #undef ARM_VARIANT
16722 #define ARM_VARIANT & arm_ext_v6k
16723 #undef THUMB_VARIANT
16724 #define THUMB_VARIANT & arm_ext_v6k
16725
16726 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
16727 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
16728 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
16729 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
16730
16731 #undef THUMB_VARIANT
16732 #define THUMB_VARIANT & arm_ext_v6_notm
16733
16734 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
16735 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
16736
16737 #undef THUMB_VARIANT
16738 #define THUMB_VARIANT & arm_ext_v6t2
16739
16740 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16741 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
16742 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16743 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
16744 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
16745
16746 #undef ARM_VARIANT
16747 #define ARM_VARIANT & arm_ext_v6z
16748
16749 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
16750
16751 #undef ARM_VARIANT
16752 #define ARM_VARIANT & arm_ext_v6t2
16753
16754 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
16755 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
16756 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16757 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
16758
16759 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
16760 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
16761 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
16762 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
16763
16764 TC3("ldrht", 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16765 TC3("ldrsht", 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16766 TC3("ldrsbt", 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16767 TC3("strht", 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
16768
16769 UT("cbnz", b900, 2, (RR, EXP), t_cbz),
16770 UT("cbz", b100, 2, (RR, EXP), t_cbz),
16771
16772 /* ARM does not really have an IT instruction, so always allow it.
16773 The opcode is copied from Thumb in order to allow warnings in
16774 -mimplicit-it=[never | arm] modes. */
16775 #undef ARM_VARIANT
16776 #define ARM_VARIANT & arm_ext_v1
16777
16778 TUE("it", bf08, bf08, 1, (COND), it, t_it),
16779 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
16780 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
16781 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
16782 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
16783 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
16784 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
16785 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
16786 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
16787 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
16788 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
16789 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
16790 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
16791 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
16792 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
16793 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16794 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
16795 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
16796
16797 /* Thumb2 only instructions. */
16798 #undef ARM_VARIANT
16799 #define ARM_VARIANT NULL
16800
16801 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16802 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
16803 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
16804 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
16805 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
16806 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
16807
16808 /* Thumb-2 hardware division instructions (R and M profiles only). */
16809 #undef THUMB_VARIANT
16810 #define THUMB_VARIANT & arm_ext_div
16811
16812 TCE("sdiv", 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
16813 TCE("udiv", 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
16814
16815 /* ARM V6M/V7 instructions. */
16816 #undef ARM_VARIANT
16817 #define ARM_VARIANT & arm_ext_barrier
16818 #undef THUMB_VARIANT
16819 #define THUMB_VARIANT & arm_ext_barrier
16820
16821 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
16822 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
16823 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
16824
16825 /* ARM V7 instructions. */
16826 #undef ARM_VARIANT
16827 #define ARM_VARIANT & arm_ext_v7
16828 #undef THUMB_VARIANT
16829 #define THUMB_VARIANT & arm_ext_v7
16830
16831 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
16832 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
16833
16834 #undef ARM_VARIANT
16835 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16836
16837 cCE("wfs", e200110, 1, (RR), rd),
16838 cCE("rfs", e300110, 1, (RR), rd),
16839 cCE("wfc", e400110, 1, (RR), rd),
16840 cCE("rfc", e500110, 1, (RR), rd),
16841
16842 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
16843 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
16844 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
16845 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
16846
16847 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
16848 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
16849 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
16850 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
16851
16852 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
16853 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
16854 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
16855 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
16856 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
16857 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
16858 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
16859 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
16860 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
16861 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
16862 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
16863 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
16864
16865 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
16866 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
16867 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
16868 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
16869 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
16870 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
16871 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
16872 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
16873 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
16874 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
16875 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
16876 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
16877
16878 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
16879 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
16880 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
16881 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
16882 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
16883 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
16884 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
16885 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
16886 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
16887 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
16888 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
16889 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
16890
16891 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
16892 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
16893 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
16894 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
16895 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
16896 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
16897 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
16898 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
16899 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
16900 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
16901 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
16902 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
16903
16904 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
16905 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
16906 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
16907 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
16908 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
16909 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
16910 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
16911 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
16912 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
16913 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
16914 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
16915 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
16916
16917 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
16918 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
16919 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
16920 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
16921 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
16922 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
16923 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
16924 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
16925 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
16926 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
16927 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
16928 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
16929
16930 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
16931 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
16932 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
16933 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
16934 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
16935 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
16936 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
16937 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
16938 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
16939 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
16940 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
16941 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
16942
16943 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
16944 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
16945 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
16946 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
16947 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
16948 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
16949 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
16950 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
16951 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
16952 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
16953 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
16954 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
16955
16956 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
16957 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
16958 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
16959 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
16960 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
16961 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
16962 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
16963 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
16964 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
16965 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
16966 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
16967 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
16968
16969 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
16970 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
16971 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
16972 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
16973 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
16974 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
16975 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
16976 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
16977 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
16978 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
16979 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
16980 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
16981
16982 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
16983 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
16984 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
16985 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
16986 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
16987 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
16988 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
16989 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
16990 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
16991 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
16992 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
16993 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
16994
16995 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
16996 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
16997 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
16998 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
16999 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
17000 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
17001 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
17002 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
17003 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
17004 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
17005 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
17006 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
17007
17008 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
17009 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
17010 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
17011 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
17012 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
17013 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
17014 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
17015 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
17016 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
17017 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
17018 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
17019 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
17020
17021 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
17022 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
17023 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
17024 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
17025 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
17026 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
17027 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
17028 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
17029 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
17030 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
17031 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
17032 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
17033
17034 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
17035 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
17036 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
17037 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
17038 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
17039 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
17040 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
17041 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
17042 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
17043 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
17044 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
17045 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
17046
17047 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
17048 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
17049 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
17050 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
17051 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
17052 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
17053 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
17054 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
17055 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
17056 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
17057 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
17058 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
17059
17060 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
17061 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
17062 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
17063 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
17064 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
17065 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17066 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17067 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17068 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
17069 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
17070 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
17071 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
17072
17073 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
17074 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
17075 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
17076 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
17077 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
17078 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17079 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17080 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17081 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
17082 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
17083 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
17084 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
17085
17086 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
17087 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
17088 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
17089 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
17090 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
17091 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17092 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17093 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17094 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
17095 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
17096 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
17097 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
17098
17099 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
17100 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
17101 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
17102 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
17103 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
17104 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17105 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17106 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17107 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
17108 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
17109 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
17110 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
17111
17112 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
17113 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
17114 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
17115 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
17116 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
17117 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17118 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17119 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17120 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
17121 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
17122 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
17123 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
17124
17125 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
17126 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
17127 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
17128 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
17129 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
17130 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17131 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17132 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17133 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
17134 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
17135 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
17136 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
17137
17138 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
17139 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
17140 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
17141 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
17142 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
17143 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17144 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17145 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17146 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
17147 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
17148 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
17149 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
17150
17151 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
17152 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
17153 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
17154 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
17155 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
17156 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17157 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17158 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17159 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
17160 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
17161 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
17162 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
17163
17164 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
17165 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
17166 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
17167 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
17168 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
17169 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17170 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17171 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17172 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
17173 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
17174 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
17175 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
17176
17177 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
17178 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
17179 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
17180 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
17181 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
17182 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17183 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17184 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17185 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
17186 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
17187 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
17188 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
17189
17190 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17191 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17192 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17193 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17194 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17195 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17196 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17197 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17198 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17199 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17200 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17201 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17202
17203 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17204 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17205 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17206 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17207 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17208 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17209 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17210 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17211 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17212 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17213 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17214 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17215
17216 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
17217 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
17218 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
17219 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
17220 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
17221 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
17222 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
17223 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
17224 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
17225 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
17226 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
17227 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
17228
17229 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
17230 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
17231 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
17232 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
17233
17234 cCL("flts", e000110, 2, (RF, RR), rn_rd),
17235 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
17236 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
17237 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
17238 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
17239 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
17240 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
17241 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
17242 cCL("flte", e080110, 2, (RF, RR), rn_rd),
17243 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
17244 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
17245 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
17246
17247 /* The implementation of the FIX instruction is broken on some
17248 assemblers, in that it accepts a precision specifier as well as a
17249 rounding specifier, despite the fact that this is meaningless.
17250 To be more compatible, we accept it as well, though of course it
17251 does not set any bits. */
17252 cCE("fix", e100110, 2, (RR, RF), rd_rm),
17253 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
17254 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
17255 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
17256 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
17257 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
17258 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
17259 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
17260 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
17261 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
17262 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
17263 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
17264 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
17265
17266 /* Instructions that were new with the real FPA, call them V2. */
17267 #undef ARM_VARIANT
17268 #define ARM_VARIANT & fpu_fpa_ext_v2
17269
17270 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17271 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17272 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17273 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17274 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17275 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
17276
17277 #undef ARM_VARIANT
17278 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17279
17280 /* Moves and type conversions. */
17281 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
17282 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
17283 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
17284 cCE("fmstat", ef1fa10, 0, (), noargs),
17285 cCE("vmrs", ef10a10, 2, (APSR_RR, RVC), vmrs),
17286 cCE("vmsr", ee10a10, 2, (RVC, RR), vmsr),
17287 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
17288 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
17289 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
17290 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17291 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
17292 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
17293 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
17294 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
17295
17296 /* Memory operations. */
17297 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17298 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
17299 cCE("fldmias", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17300 cCE("fldmfds", c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17301 cCE("fldmdbs", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17302 cCE("fldmeas", d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17303 cCE("fldmiax", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17304 cCE("fldmfdx", c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17305 cCE("fldmdbx", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17306 cCE("fldmeax", d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17307 cCE("fstmias", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17308 cCE("fstmeas", c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
17309 cCE("fstmdbs", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17310 cCE("fstmfds", d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
17311 cCE("fstmiax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17312 cCE("fstmeax", c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
17313 cCE("fstmdbx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17314 cCE("fstmfdx", d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
17315
17316 /* Monadic operations. */
17317 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
17318 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
17319 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
17320
17321 /* Dyadic operations. */
17322 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17323 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17324 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17325 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17326 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17327 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17328 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17329 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17330 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17331
17332 /* Comparisons. */
17333 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
17334 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
17335 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
17336 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
17337
17338 /* Double precision load/store are still present on single precision
17339 implementations. */
17340 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17341 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
17342 cCE("fldmiad", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17343 cCE("fldmfdd", c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17344 cCE("fldmdbd", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17345 cCE("fldmead", d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17346 cCE("fstmiad", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17347 cCE("fstmead", c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
17348 cCE("fstmdbd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17349 cCE("fstmfdd", d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
17350
17351 #undef ARM_VARIANT
17352 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17353
17354 /* Moves and type conversions. */
17355 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17356 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17357 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17358 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
17359 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
17360 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
17361 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
17362 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
17363 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
17364 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17365 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17366 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
17367 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
17368
17369 /* Monadic operations. */
17370 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17371 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17372 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17373
17374 /* Dyadic operations. */
17375 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17376 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17377 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17378 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17379 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17380 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17381 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17382 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17383 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17384
17385 /* Comparisons. */
17386 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
17387 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
17388 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
17389 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
17390
17391 #undef ARM_VARIANT
17392 #define ARM_VARIANT & fpu_vfp_ext_v2
17393
17394 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
17395 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
17396 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
17397 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
17398
17399 /* Instructions which may belong to either the Neon or VFP instruction sets.
17400 Individual encoder functions perform additional architecture checks. */
17401 #undef ARM_VARIANT
17402 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17403 #undef THUMB_VARIANT
17404 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17405
17406 /* These mnemonics are unique to VFP. */
17407 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
17408 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
17409 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17410 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17411 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17412 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17413 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
17414 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
17415 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
17416 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
17417
17418 /* Mnemonics shared by Neon and VFP. */
17419 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
17420 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17421 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
17422
17423 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17424 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
17425
17426 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17427 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
17428
17429 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17430 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17431 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17432 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17433 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17434 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
17435 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17436 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
17437
17438 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
17439 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
17440 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
17441
17442
17443 /* NOTE: All VMOV encoding is special-cased! */
17444 NCE(vmov, 0, 1, (VMOV), neon_mov),
17445 NCE(vmovq, 0, 1, (VMOV), neon_mov),
17446
17447 #undef THUMB_VARIANT
17448 #define THUMB_VARIANT & fpu_neon_ext_v1
17449 #undef ARM_VARIANT
17450 #define ARM_VARIANT & fpu_neon_ext_v1
17451
17452 /* Data processing with three registers of the same length. */
17453 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17454 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
17455 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
17456 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17457 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17458 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17459 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17460 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
17461 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
17462 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17463 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17464 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17465 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
17466 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
17467 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17468 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17469 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
17470 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
17471 /* If not immediate, fall back to neon_dyadic_i64_su.
17472 shl_imm should accept I8 I16 I32 I64,
17473 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17474 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
17475 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
17476 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
17477 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
17478 /* Logic ops, types optional & ignored. */
17479 nUF(vand, _vand, 2, (RNDQ, NILO), neon_logic),
17480 nUF(vandq, _vand, 2, (RNQ, NILO), neon_logic),
17481 nUF(vbic, _vbic, 2, (RNDQ, NILO), neon_logic),
17482 nUF(vbicq, _vbic, 2, (RNQ, NILO), neon_logic),
17483 nUF(vorr, _vorr, 2, (RNDQ, NILO), neon_logic),
17484 nUF(vorrq, _vorr, 2, (RNQ, NILO), neon_logic),
17485 nUF(vorn, _vorn, 2, (RNDQ, NILO), neon_logic),
17486 nUF(vornq, _vorn, 2, (RNQ, NILO), neon_logic),
17487 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
17488 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
17489 /* Bitfield ops, untyped. */
17490 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17491 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17492 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17493 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17494 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
17495 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
17496 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17497 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17498 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17499 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17500 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17501 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
17502 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
17503 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17504 back to neon_dyadic_if_su. */
17505 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17506 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17507 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
17508 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
17509 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17510 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17511 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
17512 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
17513 /* Comparison. Type I8 I16 I32 F32. */
17514 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
17515 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
17516 /* As above, D registers only. */
17517 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17518 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
17519 /* Int and float variants, signedness unimportant. */
17520 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17521 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
17522 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
17523 /* Add/sub take types I8 I16 I32 I64 F32. */
17524 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17525 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
17526 /* vtst takes sizes 8, 16, 32. */
17527 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
17528 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
17529 /* VMUL takes I8 I16 I32 F32 P8. */
17530 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
17531 /* VQD{R}MULH takes S16 S32. */
17532 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17533 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17534 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
17535 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
17536 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17537 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17538 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
17539 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
17540 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17541 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17542 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
17543 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
17544 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17545 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17546 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
17547 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
17548
17549 /* Two address, int/float. Types S8 S16 S32 F32. */
17550 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
17551 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
17552
17553 /* Data processing with two registers and a shift amount. */
17554 /* Right shifts, and variants with rounding.
17555 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17556 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17557 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17558 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
17559 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
17560 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17561 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17562 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
17563 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
17564 /* Shift and insert. Sizes accepted 8 16 32 64. */
17565 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
17566 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
17567 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
17568 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
17569 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17570 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
17571 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
17572 /* Right shift immediate, saturating & narrowing, with rounding variants.
17573 Types accepted S16 S32 S64 U16 U32 U64. */
17574 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17575 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
17576 /* As above, unsigned. Types accepted S16 S32 S64. */
17577 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17578 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
17579 /* Right shift narrowing. Types accepted I16 I32 I64. */
17580 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17581 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
17582 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17583 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
17584 /* CVT with optional immediate for fixed-point variant. */
17585 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
17586
17587 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
17588 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
17589
17590 /* Data processing, three registers of different lengths. */
17591 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17592 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
17593 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
17594 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
17595 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
17596 /* If not scalar, fall back to neon_dyadic_long.
17597 Vector types as above, scalar types S16 S32 U16 U32. */
17598 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17599 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
17600 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17601 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17602 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
17603 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17604 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17605 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17606 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17607 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
17608 /* Saturating doubling multiplies. Types S16 S32. */
17609 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17610 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17611 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
17612 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17613 S16 S32 U16 U32. */
17614 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
17615
17616 /* Extract. Size 8. */
17617 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
17618 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
17619
17620 /* Two registers, miscellaneous. */
17621 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17622 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
17623 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
17624 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
17625 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
17626 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
17627 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
17628 /* Vector replicate. Sizes 8 16 32. */
17629 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
17630 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
17631 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17632 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
17633 /* VMOVN. Types I16 I32 I64. */
17634 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
17635 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17636 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
17637 /* VQMOVUN. Types S16 S32 S64. */
17638 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
17639 /* VZIP / VUZP. Sizes 8 16 32. */
17640 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
17641 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
17642 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
17643 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
17644 /* VQABS / VQNEG. Types S8 S16 S32. */
17645 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17646 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
17647 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
17648 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
17649 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17650 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
17651 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
17652 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
17653 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
17654 /* Reciprocal estimates. Types U32 F32. */
17655 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
17656 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
17657 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
17658 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
17659 /* VCLS. Types S8 S16 S32. */
17660 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
17661 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
17662 /* VCLZ. Types I8 I16 I32. */
17663 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
17664 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
17665 /* VCNT. Size 8. */
17666 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
17667 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
17668 /* Two address, untyped. */
17669 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
17670 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
17671 /* VTRN. Sizes 8 16 32. */
17672 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
17673 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
17674
17675 /* Table lookup. Size 8. */
17676 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17677 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
17678
17679 #undef THUMB_VARIANT
17680 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17681 #undef ARM_VARIANT
17682 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17683
17684 /* Neon element/structure load/store. */
17685 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17686 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
17687 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17688 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
17689 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17690 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
17691 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17692 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
17693
17694 #undef THUMB_VARIANT
17695 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
17696 #undef ARM_VARIANT
17697 #define ARM_VARIANT &fpu_vfp_ext_v3xd
17698 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
17699 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17700 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17701 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17702 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17703 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17704 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17705 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
17706 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
17707
17708 #undef THUMB_VARIANT
17709 #define THUMB_VARIANT & fpu_vfp_ext_v3
17710 #undef ARM_VARIANT
17711 #define ARM_VARIANT & fpu_vfp_ext_v3
17712
17713 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
17714 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17715 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17716 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17717 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17718 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17719 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17720 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
17721 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
17722
17723 #undef ARM_VARIANT
17724 #define ARM_VARIANT &fpu_vfp_ext_fma
17725 #undef THUMB_VARIANT
17726 #define THUMB_VARIANT &fpu_vfp_ext_fma
17727 /* Mnemonics shared by Neon and VFP. These are included in the
17728 VFP FMA variant; NEON and VFP FMA always includes the NEON
17729 FMA instructions. */
17730 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17731 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
17732 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
17733 the v form should always be used. */
17734 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17735 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
17736 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17737 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
17738 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17739 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
17740
17741 #undef THUMB_VARIANT
17742 #undef ARM_VARIANT
17743 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17744
17745 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17746 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17747 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17748 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17749 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17750 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
17751 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
17752 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
17753
17754 #undef ARM_VARIANT
17755 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17756
17757 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
17758 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
17759 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
17760 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
17761 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
17762 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
17763 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
17764 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
17765 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
17766 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17767 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17768 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
17769 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17770 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17771 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
17772 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17773 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17774 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
17775 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
17776 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
17777 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17778 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17779 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17780 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17781 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17782 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
17783 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
17784 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
17785 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
17786 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
17787 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
17788 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
17789 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
17790 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
17791 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
17792 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
17793 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
17794 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17795 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17796 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17797 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17798 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17799 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17800 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17801 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17802 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17803 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
17804 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17805 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17806 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17807 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17808 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17809 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17810 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17811 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17812 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17813 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17814 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17815 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17816 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17817 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17818 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17819 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17820 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17821 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17822 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17823 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17824 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17825 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17826 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17827 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17828 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17829 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17830 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17831 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17832 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17833 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17834 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17835 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17836 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17837 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17838 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17839 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17840 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17841 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17842 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17843 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17844 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17845 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
17846 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17847 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17848 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17849 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17850 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17851 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17852 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17853 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17854 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17855 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17856 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17857 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17858 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17859 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17860 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17861 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17862 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17863 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17864 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17865 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17866 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17867 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
17868 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17869 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17870 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17871 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17872 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17873 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17874 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17875 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17876 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17877 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17878 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17879 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17880 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17881 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17882 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17883 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17884 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
17885 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
17886 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17887 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
17888 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
17889 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
17890 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17891 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17892 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17893 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17894 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17895 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17896 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17897 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17898 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17899 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
17900 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
17901 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
17902 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
17903 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
17904 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
17905 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17906 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17907 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17908 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
17909 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
17910 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
17911 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
17912 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
17913 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
17914 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17915 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17916 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17917 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17918 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
17919
17920 #undef ARM_VARIANT
17921 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
17922
17923 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
17924 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
17925 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
17926 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
17927 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
17928 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
17929 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17930 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17931 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17932 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17933 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17934 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17935 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17936 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17937 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17938 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17939 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17940 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17941 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17942 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17943 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
17944 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17945 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17946 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17947 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17948 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17949 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17950 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17951 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17952 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17953 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17954 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17955 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17956 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17957 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17958 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17959 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17960 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17961 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17962 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17963 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17964 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17965 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17966 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17967 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17968 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17969 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17970 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17971 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17972 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17973 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17974 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17975 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17976 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17977 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17978 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17979 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
17980
17981 #undef ARM_VARIANT
17982 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
17983
17984 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17985 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17986 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17987 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17988 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
17989 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
17990 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
17991 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
17992 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
17993 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
17994 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
17995 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
17996 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
17997 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
17998 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
17999 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
18000 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
18001 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
18002 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
18003 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
18004 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
18005 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
18006 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
18007 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
18008 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
18009 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
18010 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
18011 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
18012 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
18013 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
18014 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
18015 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
18016 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
18017 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
18018 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
18019 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
18020 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
18021 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
18022 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
18023 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
18024 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
18025 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
18026 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
18027 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
18028 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
18029 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
18030 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
18031 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
18032 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
18033 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
18034 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
18035 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
18036 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
18037 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
18038 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
18039 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
18040 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
18041 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
18042 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
18043 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
18044 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
18045 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
18046 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
18047 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
18048 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18049 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18050 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18051 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18052 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18053 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18054 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18055 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18056 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18057 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18058 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18059 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18060 };
18061 #undef ARM_VARIANT
18062 #undef THUMB_VARIANT
18063 #undef TCE
18064 #undef TCM
18065 #undef TUE
18066 #undef TUF
18067 #undef TCC
18068 #undef cCE
18069 #undef cCL
18070 #undef C3E
18071 #undef CE
18072 #undef CM
18073 #undef UE
18074 #undef UF
18075 #undef UT
18076 #undef NUF
18077 #undef nUF
18078 #undef NCE
18079 #undef nCE
18080 #undef OPS0
18081 #undef OPS1
18082 #undef OPS2
18083 #undef OPS3
18084 #undef OPS4
18085 #undef OPS5
18086 #undef OPS6
18087 #undef do_0
18088 \f
18089 /* MD interface: bits in the object file. */
18090
18091 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18092 for use in the a.out file, and stores them in the array pointed to by buf.
18093 This knows about the endian-ness of the target machine and does
18094 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18095 2 (short) and 4 (long) Floating numbers are put out as a series of
18096 LITTLENUMS (shorts, here at least). */
18097
18098 void
18099 md_number_to_chars (char * buf, valueT val, int n)
18100 {
18101 if (target_big_endian)
18102 number_to_chars_bigendian (buf, val, n);
18103 else
18104 number_to_chars_littleendian (buf, val, n);
18105 }
18106
18107 static valueT
18108 md_chars_to_number (char * buf, int n)
18109 {
18110 valueT result = 0;
18111 unsigned char * where = (unsigned char *) buf;
18112
18113 if (target_big_endian)
18114 {
18115 while (n--)
18116 {
18117 result <<= 8;
18118 result |= (*where++ & 255);
18119 }
18120 }
18121 else
18122 {
18123 while (n--)
18124 {
18125 result <<= 8;
18126 result |= (where[n] & 255);
18127 }
18128 }
18129
18130 return result;
18131 }
18132
18133 /* MD interface: Sections. */
18134
18135 /* Estimate the size of a frag before relaxing. Assume everything fits in
18136 2 bytes. */
18137
18138 int
18139 md_estimate_size_before_relax (fragS * fragp,
18140 segT segtype ATTRIBUTE_UNUSED)
18141 {
18142 fragp->fr_var = 2;
18143 return 2;
18144 }
18145
18146 /* Convert a machine dependent frag. */
18147
18148 void
18149 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
18150 {
18151 unsigned long insn;
18152 unsigned long old_op;
18153 char *buf;
18154 expressionS exp;
18155 fixS *fixp;
18156 int reloc_type;
18157 int pc_rel;
18158 int opcode;
18159
18160 buf = fragp->fr_literal + fragp->fr_fix;
18161
18162 old_op = bfd_get_16(abfd, buf);
18163 if (fragp->fr_symbol)
18164 {
18165 exp.X_op = O_symbol;
18166 exp.X_add_symbol = fragp->fr_symbol;
18167 }
18168 else
18169 {
18170 exp.X_op = O_constant;
18171 }
18172 exp.X_add_number = fragp->fr_offset;
18173 opcode = fragp->fr_subtype;
18174 switch (opcode)
18175 {
18176 case T_MNEM_ldr_pc:
18177 case T_MNEM_ldr_pc2:
18178 case T_MNEM_ldr_sp:
18179 case T_MNEM_str_sp:
18180 case T_MNEM_ldr:
18181 case T_MNEM_ldrb:
18182 case T_MNEM_ldrh:
18183 case T_MNEM_str:
18184 case T_MNEM_strb:
18185 case T_MNEM_strh:
18186 if (fragp->fr_var == 4)
18187 {
18188 insn = THUMB_OP32 (opcode);
18189 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
18190 {
18191 insn |= (old_op & 0x700) << 4;
18192 }
18193 else
18194 {
18195 insn |= (old_op & 7) << 12;
18196 insn |= (old_op & 0x38) << 13;
18197 }
18198 insn |= 0x00000c00;
18199 put_thumb32_insn (buf, insn);
18200 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
18201 }
18202 else
18203 {
18204 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
18205 }
18206 pc_rel = (opcode == T_MNEM_ldr_pc2);
18207 break;
18208 case T_MNEM_adr:
18209 if (fragp->fr_var == 4)
18210 {
18211 insn = THUMB_OP32 (opcode);
18212 insn |= (old_op & 0xf0) << 4;
18213 put_thumb32_insn (buf, insn);
18214 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
18215 }
18216 else
18217 {
18218 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18219 exp.X_add_number -= 4;
18220 }
18221 pc_rel = 1;
18222 break;
18223 case T_MNEM_mov:
18224 case T_MNEM_movs:
18225 case T_MNEM_cmp:
18226 case T_MNEM_cmn:
18227 if (fragp->fr_var == 4)
18228 {
18229 int r0off = (opcode == T_MNEM_mov
18230 || opcode == T_MNEM_movs) ? 0 : 8;
18231 insn = THUMB_OP32 (opcode);
18232 insn = (insn & 0xe1ffffff) | 0x10000000;
18233 insn |= (old_op & 0x700) << r0off;
18234 put_thumb32_insn (buf, insn);
18235 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18236 }
18237 else
18238 {
18239 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
18240 }
18241 pc_rel = 0;
18242 break;
18243 case T_MNEM_b:
18244 if (fragp->fr_var == 4)
18245 {
18246 insn = THUMB_OP32(opcode);
18247 put_thumb32_insn (buf, insn);
18248 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
18249 }
18250 else
18251 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
18252 pc_rel = 1;
18253 break;
18254 case T_MNEM_bcond:
18255 if (fragp->fr_var == 4)
18256 {
18257 insn = THUMB_OP32(opcode);
18258 insn |= (old_op & 0xf00) << 14;
18259 put_thumb32_insn (buf, insn);
18260 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
18261 }
18262 else
18263 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
18264 pc_rel = 1;
18265 break;
18266 case T_MNEM_add_sp:
18267 case T_MNEM_add_pc:
18268 case T_MNEM_inc_sp:
18269 case T_MNEM_dec_sp:
18270 if (fragp->fr_var == 4)
18271 {
18272 /* ??? Choose between add and addw. */
18273 insn = THUMB_OP32 (opcode);
18274 insn |= (old_op & 0xf0) << 4;
18275 put_thumb32_insn (buf, insn);
18276 if (opcode == T_MNEM_add_pc)
18277 reloc_type = BFD_RELOC_ARM_T32_IMM12;
18278 else
18279 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18280 }
18281 else
18282 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18283 pc_rel = 0;
18284 break;
18285
18286 case T_MNEM_addi:
18287 case T_MNEM_addis:
18288 case T_MNEM_subi:
18289 case T_MNEM_subis:
18290 if (fragp->fr_var == 4)
18291 {
18292 insn = THUMB_OP32 (opcode);
18293 insn |= (old_op & 0xf0) << 4;
18294 insn |= (old_op & 0xf) << 16;
18295 put_thumb32_insn (buf, insn);
18296 if (insn & (1 << 20))
18297 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
18298 else
18299 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
18300 }
18301 else
18302 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
18303 pc_rel = 0;
18304 break;
18305 default:
18306 abort ();
18307 }
18308 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
18309 (enum bfd_reloc_code_real) reloc_type);
18310 fixp->fx_file = fragp->fr_file;
18311 fixp->fx_line = fragp->fr_line;
18312 fragp->fr_fix += fragp->fr_var;
18313 }
18314
18315 /* Return the size of a relaxable immediate operand instruction.
18316 SHIFT and SIZE specify the form of the allowable immediate. */
18317 static int
18318 relax_immediate (fragS *fragp, int size, int shift)
18319 {
18320 offsetT offset;
18321 offsetT mask;
18322 offsetT low;
18323
18324 /* ??? Should be able to do better than this. */
18325 if (fragp->fr_symbol)
18326 return 4;
18327
18328 low = (1 << shift) - 1;
18329 mask = (1 << (shift + size)) - (1 << shift);
18330 offset = fragp->fr_offset;
18331 /* Force misaligned offsets to 32-bit variant. */
18332 if (offset & low)
18333 return 4;
18334 if (offset & ~mask)
18335 return 4;
18336 return 2;
18337 }
18338
18339 /* Get the address of a symbol during relaxation. */
18340 static addressT
18341 relaxed_symbol_addr (fragS *fragp, long stretch)
18342 {
18343 fragS *sym_frag;
18344 addressT addr;
18345 symbolS *sym;
18346
18347 sym = fragp->fr_symbol;
18348 sym_frag = symbol_get_frag (sym);
18349 know (S_GET_SEGMENT (sym) != absolute_section
18350 || sym_frag == &zero_address_frag);
18351 addr = S_GET_VALUE (sym) + fragp->fr_offset;
18352
18353 /* If frag has yet to be reached on this pass, assume it will
18354 move by STRETCH just as we did. If this is not so, it will
18355 be because some frag between grows, and that will force
18356 another pass. */
18357
18358 if (stretch != 0
18359 && sym_frag->relax_marker != fragp->relax_marker)
18360 {
18361 fragS *f;
18362
18363 /* Adjust stretch for any alignment frag. Note that if have
18364 been expanding the earlier code, the symbol may be
18365 defined in what appears to be an earlier frag. FIXME:
18366 This doesn't handle the fr_subtype field, which specifies
18367 a maximum number of bytes to skip when doing an
18368 alignment. */
18369 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
18370 {
18371 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
18372 {
18373 if (stretch < 0)
18374 stretch = - ((- stretch)
18375 & ~ ((1 << (int) f->fr_offset) - 1));
18376 else
18377 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
18378 if (stretch == 0)
18379 break;
18380 }
18381 }
18382 if (f != NULL)
18383 addr += stretch;
18384 }
18385
18386 return addr;
18387 }
18388
18389 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18390 load. */
18391 static int
18392 relax_adr (fragS *fragp, asection *sec, long stretch)
18393 {
18394 addressT addr;
18395 offsetT val;
18396
18397 /* Assume worst case for symbols not known to be in the same section. */
18398 if (fragp->fr_symbol == NULL
18399 || !S_IS_DEFINED (fragp->fr_symbol)
18400 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18401 return 4;
18402
18403 val = relaxed_symbol_addr (fragp, stretch);
18404 addr = fragp->fr_address + fragp->fr_fix;
18405 addr = (addr + 4) & ~3;
18406 /* Force misaligned targets to 32-bit variant. */
18407 if (val & 3)
18408 return 4;
18409 val -= addr;
18410 if (val < 0 || val > 1020)
18411 return 4;
18412 return 2;
18413 }
18414
18415 /* Return the size of a relaxable add/sub immediate instruction. */
18416 static int
18417 relax_addsub (fragS *fragp, asection *sec)
18418 {
18419 char *buf;
18420 int op;
18421
18422 buf = fragp->fr_literal + fragp->fr_fix;
18423 op = bfd_get_16(sec->owner, buf);
18424 if ((op & 0xf) == ((op >> 4) & 0xf))
18425 return relax_immediate (fragp, 8, 0);
18426 else
18427 return relax_immediate (fragp, 3, 0);
18428 }
18429
18430
18431 /* Return the size of a relaxable branch instruction. BITS is the
18432 size of the offset field in the narrow instruction. */
18433
18434 static int
18435 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
18436 {
18437 addressT addr;
18438 offsetT val;
18439 offsetT limit;
18440
18441 /* Assume worst case for symbols not known to be in the same section. */
18442 if (!S_IS_DEFINED (fragp->fr_symbol)
18443 || sec != S_GET_SEGMENT (fragp->fr_symbol))
18444 return 4;
18445
18446 #ifdef OBJ_ELF
18447 if (S_IS_DEFINED (fragp->fr_symbol)
18448 && ARM_IS_FUNC (fragp->fr_symbol))
18449 return 4;
18450 #endif
18451
18452 val = relaxed_symbol_addr (fragp, stretch);
18453 addr = fragp->fr_address + fragp->fr_fix + 4;
18454 val -= addr;
18455
18456 /* Offset is a signed value *2 */
18457 limit = 1 << bits;
18458 if (val >= limit || val < -limit)
18459 return 4;
18460 return 2;
18461 }
18462
18463
18464 /* Relax a machine dependent frag. This returns the amount by which
18465 the current size of the frag should change. */
18466
18467 int
18468 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
18469 {
18470 int oldsize;
18471 int newsize;
18472
18473 oldsize = fragp->fr_var;
18474 switch (fragp->fr_subtype)
18475 {
18476 case T_MNEM_ldr_pc2:
18477 newsize = relax_adr (fragp, sec, stretch);
18478 break;
18479 case T_MNEM_ldr_pc:
18480 case T_MNEM_ldr_sp:
18481 case T_MNEM_str_sp:
18482 newsize = relax_immediate (fragp, 8, 2);
18483 break;
18484 case T_MNEM_ldr:
18485 case T_MNEM_str:
18486 newsize = relax_immediate (fragp, 5, 2);
18487 break;
18488 case T_MNEM_ldrh:
18489 case T_MNEM_strh:
18490 newsize = relax_immediate (fragp, 5, 1);
18491 break;
18492 case T_MNEM_ldrb:
18493 case T_MNEM_strb:
18494 newsize = relax_immediate (fragp, 5, 0);
18495 break;
18496 case T_MNEM_adr:
18497 newsize = relax_adr (fragp, sec, stretch);
18498 break;
18499 case T_MNEM_mov:
18500 case T_MNEM_movs:
18501 case T_MNEM_cmp:
18502 case T_MNEM_cmn:
18503 newsize = relax_immediate (fragp, 8, 0);
18504 break;
18505 case T_MNEM_b:
18506 newsize = relax_branch (fragp, sec, 11, stretch);
18507 break;
18508 case T_MNEM_bcond:
18509 newsize = relax_branch (fragp, sec, 8, stretch);
18510 break;
18511 case T_MNEM_add_sp:
18512 case T_MNEM_add_pc:
18513 newsize = relax_immediate (fragp, 8, 2);
18514 break;
18515 case T_MNEM_inc_sp:
18516 case T_MNEM_dec_sp:
18517 newsize = relax_immediate (fragp, 7, 2);
18518 break;
18519 case T_MNEM_addi:
18520 case T_MNEM_addis:
18521 case T_MNEM_subi:
18522 case T_MNEM_subis:
18523 newsize = relax_addsub (fragp, sec);
18524 break;
18525 default:
18526 abort ();
18527 }
18528
18529 fragp->fr_var = newsize;
18530 /* Freeze wide instructions that are at or before the same location as
18531 in the previous pass. This avoids infinite loops.
18532 Don't freeze them unconditionally because targets may be artificially
18533 misaligned by the expansion of preceding frags. */
18534 if (stretch <= 0 && newsize > 2)
18535 {
18536 md_convert_frag (sec->owner, sec, fragp);
18537 frag_wane (fragp);
18538 }
18539
18540 return newsize - oldsize;
18541 }
18542
18543 /* Round up a section size to the appropriate boundary. */
18544
18545 valueT
18546 md_section_align (segT segment ATTRIBUTE_UNUSED,
18547 valueT size)
18548 {
18549 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18550 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
18551 {
18552 /* For a.out, force the section size to be aligned. If we don't do
18553 this, BFD will align it for us, but it will not write out the
18554 final bytes of the section. This may be a bug in BFD, but it is
18555 easier to fix it here since that is how the other a.out targets
18556 work. */
18557 int align;
18558
18559 align = bfd_get_section_alignment (stdoutput, segment);
18560 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
18561 }
18562 #endif
18563
18564 return size;
18565 }
18566
18567 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18568 of an rs_align_code fragment. */
18569
18570 void
18571 arm_handle_align (fragS * fragP)
18572 {
18573 static char const arm_noop[2][2][4] =
18574 {
18575 { /* ARMv1 */
18576 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18577 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18578 },
18579 { /* ARMv6k */
18580 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18581 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18582 },
18583 };
18584 static char const thumb_noop[2][2][2] =
18585 {
18586 { /* Thumb-1 */
18587 {0xc0, 0x46}, /* LE */
18588 {0x46, 0xc0}, /* BE */
18589 },
18590 { /* Thumb-2 */
18591 {0x00, 0xbf}, /* LE */
18592 {0xbf, 0x00} /* BE */
18593 }
18594 };
18595 static char const wide_thumb_noop[2][4] =
18596 { /* Wide Thumb-2 */
18597 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18598 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18599 };
18600
18601 unsigned bytes, fix, noop_size;
18602 char * p;
18603 const char * noop;
18604 const char *narrow_noop = NULL;
18605 #ifdef OBJ_ELF
18606 enum mstate state;
18607 #endif
18608
18609 if (fragP->fr_type != rs_align_code)
18610 return;
18611
18612 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
18613 p = fragP->fr_literal + fragP->fr_fix;
18614 fix = 0;
18615
18616 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
18617 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
18618
18619 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
18620
18621 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
18622 {
18623 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
18624 {
18625 narrow_noop = thumb_noop[1][target_big_endian];
18626 noop = wide_thumb_noop[target_big_endian];
18627 }
18628 else
18629 noop = thumb_noop[0][target_big_endian];
18630 noop_size = 2;
18631 #ifdef OBJ_ELF
18632 state = MAP_THUMB;
18633 #endif
18634 }
18635 else
18636 {
18637 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
18638 [target_big_endian];
18639 noop_size = 4;
18640 #ifdef OBJ_ELF
18641 state = MAP_ARM;
18642 #endif
18643 }
18644
18645 fragP->fr_var = noop_size;
18646
18647 if (bytes & (noop_size - 1))
18648 {
18649 fix = bytes & (noop_size - 1);
18650 #ifdef OBJ_ELF
18651 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
18652 #endif
18653 memset (p, 0, fix);
18654 p += fix;
18655 bytes -= fix;
18656 }
18657
18658 if (narrow_noop)
18659 {
18660 if (bytes & noop_size)
18661 {
18662 /* Insert a narrow noop. */
18663 memcpy (p, narrow_noop, noop_size);
18664 p += noop_size;
18665 bytes -= noop_size;
18666 fix += noop_size;
18667 }
18668
18669 /* Use wide noops for the remainder */
18670 noop_size = 4;
18671 }
18672
18673 while (bytes >= noop_size)
18674 {
18675 memcpy (p, noop, noop_size);
18676 p += noop_size;
18677 bytes -= noop_size;
18678 fix += noop_size;
18679 }
18680
18681 fragP->fr_fix += fix;
18682 }
18683
18684 /* Called from md_do_align. Used to create an alignment
18685 frag in a code section. */
18686
18687 void
18688 arm_frag_align_code (int n, int max)
18689 {
18690 char * p;
18691
18692 /* We assume that there will never be a requirement
18693 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18694 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
18695 {
18696 char err_msg[128];
18697
18698 sprintf (err_msg,
18699 _("alignments greater than %d bytes not supported in .text sections."),
18700 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
18701 as_fatal ("%s", err_msg);
18702 }
18703
18704 p = frag_var (rs_align_code,
18705 MAX_MEM_FOR_RS_ALIGN_CODE,
18706 1,
18707 (relax_substateT) max,
18708 (symbolS *) NULL,
18709 (offsetT) n,
18710 (char *) NULL);
18711 *p = 0;
18712 }
18713
18714 /* Perform target specific initialisation of a frag.
18715 Note - despite the name this initialisation is not done when the frag
18716 is created, but only when its type is assigned. A frag can be created
18717 and used a long time before its type is set, so beware of assuming that
18718 this initialisationis performed first. */
18719
18720 #ifndef OBJ_ELF
18721 void
18722 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
18723 {
18724 /* Record whether this frag is in an ARM or a THUMB area. */
18725 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18726 }
18727
18728 #else /* OBJ_ELF is defined. */
18729 void
18730 arm_init_frag (fragS * fragP, int max_chars)
18731 {
18732 /* If the current ARM vs THUMB mode has not already
18733 been recorded into this frag then do so now. */
18734 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
18735 {
18736 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18737
18738 /* Record a mapping symbol for alignment frags. We will delete this
18739 later if the alignment ends up empty. */
18740 switch (fragP->fr_type)
18741 {
18742 case rs_align:
18743 case rs_align_test:
18744 case rs_fill:
18745 mapping_state_2 (MAP_DATA, max_chars);
18746 break;
18747 case rs_align_code:
18748 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
18749 break;
18750 default:
18751 break;
18752 }
18753 }
18754 }
18755
18756 /* When we change sections we need to issue a new mapping symbol. */
18757
18758 void
18759 arm_elf_change_section (void)
18760 {
18761 /* Link an unlinked unwind index table section to the .text section. */
18762 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
18763 && elf_linked_to_section (now_seg) == NULL)
18764 elf_linked_to_section (now_seg) = text_section;
18765 }
18766
18767 int
18768 arm_elf_section_type (const char * str, size_t len)
18769 {
18770 if (len == 5 && strncmp (str, "exidx", 5) == 0)
18771 return SHT_ARM_EXIDX;
18772
18773 return -1;
18774 }
18775 \f
18776 /* Code to deal with unwinding tables. */
18777
18778 static void add_unwind_adjustsp (offsetT);
18779
18780 /* Generate any deferred unwind frame offset. */
18781
18782 static void
18783 flush_pending_unwind (void)
18784 {
18785 offsetT offset;
18786
18787 offset = unwind.pending_offset;
18788 unwind.pending_offset = 0;
18789 if (offset != 0)
18790 add_unwind_adjustsp (offset);
18791 }
18792
18793 /* Add an opcode to this list for this function. Two-byte opcodes should
18794 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18795 order. */
18796
18797 static void
18798 add_unwind_opcode (valueT op, int length)
18799 {
18800 /* Add any deferred stack adjustment. */
18801 if (unwind.pending_offset)
18802 flush_pending_unwind ();
18803
18804 unwind.sp_restored = 0;
18805
18806 if (unwind.opcode_count + length > unwind.opcode_alloc)
18807 {
18808 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
18809 if (unwind.opcodes)
18810 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
18811 unwind.opcode_alloc);
18812 else
18813 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
18814 }
18815 while (length > 0)
18816 {
18817 length--;
18818 unwind.opcodes[unwind.opcode_count] = op & 0xff;
18819 op >>= 8;
18820 unwind.opcode_count++;
18821 }
18822 }
18823
18824 /* Add unwind opcodes to adjust the stack pointer. */
18825
18826 static void
18827 add_unwind_adjustsp (offsetT offset)
18828 {
18829 valueT op;
18830
18831 if (offset > 0x200)
18832 {
18833 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18834 char bytes[5];
18835 int n;
18836 valueT o;
18837
18838 /* Long form: 0xb2, uleb128. */
18839 /* This might not fit in a word so add the individual bytes,
18840 remembering the list is built in reverse order. */
18841 o = (valueT) ((offset - 0x204) >> 2);
18842 if (o == 0)
18843 add_unwind_opcode (0, 1);
18844
18845 /* Calculate the uleb128 encoding of the offset. */
18846 n = 0;
18847 while (o)
18848 {
18849 bytes[n] = o & 0x7f;
18850 o >>= 7;
18851 if (o)
18852 bytes[n] |= 0x80;
18853 n++;
18854 }
18855 /* Add the insn. */
18856 for (; n; n--)
18857 add_unwind_opcode (bytes[n - 1], 1);
18858 add_unwind_opcode (0xb2, 1);
18859 }
18860 else if (offset > 0x100)
18861 {
18862 /* Two short opcodes. */
18863 add_unwind_opcode (0x3f, 1);
18864 op = (offset - 0x104) >> 2;
18865 add_unwind_opcode (op, 1);
18866 }
18867 else if (offset > 0)
18868 {
18869 /* Short opcode. */
18870 op = (offset - 4) >> 2;
18871 add_unwind_opcode (op, 1);
18872 }
18873 else if (offset < 0)
18874 {
18875 offset = -offset;
18876 while (offset > 0x100)
18877 {
18878 add_unwind_opcode (0x7f, 1);
18879 offset -= 0x100;
18880 }
18881 op = ((offset - 4) >> 2) | 0x40;
18882 add_unwind_opcode (op, 1);
18883 }
18884 }
18885
18886 /* Finish the list of unwind opcodes for this function. */
18887 static void
18888 finish_unwind_opcodes (void)
18889 {
18890 valueT op;
18891
18892 if (unwind.fp_used)
18893 {
18894 /* Adjust sp as necessary. */
18895 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
18896 flush_pending_unwind ();
18897
18898 /* After restoring sp from the frame pointer. */
18899 op = 0x90 | unwind.fp_reg;
18900 add_unwind_opcode (op, 1);
18901 }
18902 else
18903 flush_pending_unwind ();
18904 }
18905
18906
18907 /* Start an exception table entry. If idx is nonzero this is an index table
18908 entry. */
18909
18910 static void
18911 start_unwind_section (const segT text_seg, int idx)
18912 {
18913 const char * text_name;
18914 const char * prefix;
18915 const char * prefix_once;
18916 const char * group_name;
18917 size_t prefix_len;
18918 size_t text_len;
18919 char * sec_name;
18920 size_t sec_name_len;
18921 int type;
18922 int flags;
18923 int linkonce;
18924
18925 if (idx)
18926 {
18927 prefix = ELF_STRING_ARM_unwind;
18928 prefix_once = ELF_STRING_ARM_unwind_once;
18929 type = SHT_ARM_EXIDX;
18930 }
18931 else
18932 {
18933 prefix = ELF_STRING_ARM_unwind_info;
18934 prefix_once = ELF_STRING_ARM_unwind_info_once;
18935 type = SHT_PROGBITS;
18936 }
18937
18938 text_name = segment_name (text_seg);
18939 if (streq (text_name, ".text"))
18940 text_name = "";
18941
18942 if (strncmp (text_name, ".gnu.linkonce.t.",
18943 strlen (".gnu.linkonce.t.")) == 0)
18944 {
18945 prefix = prefix_once;
18946 text_name += strlen (".gnu.linkonce.t.");
18947 }
18948
18949 prefix_len = strlen (prefix);
18950 text_len = strlen (text_name);
18951 sec_name_len = prefix_len + text_len;
18952 sec_name = (char *) xmalloc (sec_name_len + 1);
18953 memcpy (sec_name, prefix, prefix_len);
18954 memcpy (sec_name + prefix_len, text_name, text_len);
18955 sec_name[prefix_len + text_len] = '\0';
18956
18957 flags = SHF_ALLOC;
18958 linkonce = 0;
18959 group_name = 0;
18960
18961 /* Handle COMDAT group. */
18962 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
18963 {
18964 group_name = elf_group_name (text_seg);
18965 if (group_name == NULL)
18966 {
18967 as_bad (_("Group section `%s' has no group signature"),
18968 segment_name (text_seg));
18969 ignore_rest_of_line ();
18970 return;
18971 }
18972 flags |= SHF_GROUP;
18973 linkonce = 1;
18974 }
18975
18976 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
18977
18978 /* Set the section link for index tables. */
18979 if (idx)
18980 elf_linked_to_section (now_seg) = text_seg;
18981 }
18982
18983
18984 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
18985 personality routine data. Returns zero, or the index table value for
18986 and inline entry. */
18987
18988 static valueT
18989 create_unwind_entry (int have_data)
18990 {
18991 int size;
18992 addressT where;
18993 char *ptr;
18994 /* The current word of data. */
18995 valueT data;
18996 /* The number of bytes left in this word. */
18997 int n;
18998
18999 finish_unwind_opcodes ();
19000
19001 /* Remember the current text section. */
19002 unwind.saved_seg = now_seg;
19003 unwind.saved_subseg = now_subseg;
19004
19005 start_unwind_section (now_seg, 0);
19006
19007 if (unwind.personality_routine == NULL)
19008 {
19009 if (unwind.personality_index == -2)
19010 {
19011 if (have_data)
19012 as_bad (_("handlerdata in cantunwind frame"));
19013 return 1; /* EXIDX_CANTUNWIND. */
19014 }
19015
19016 /* Use a default personality routine if none is specified. */
19017 if (unwind.personality_index == -1)
19018 {
19019 if (unwind.opcode_count > 3)
19020 unwind.personality_index = 1;
19021 else
19022 unwind.personality_index = 0;
19023 }
19024
19025 /* Space for the personality routine entry. */
19026 if (unwind.personality_index == 0)
19027 {
19028 if (unwind.opcode_count > 3)
19029 as_bad (_("too many unwind opcodes for personality routine 0"));
19030
19031 if (!have_data)
19032 {
19033 /* All the data is inline in the index table. */
19034 data = 0x80;
19035 n = 3;
19036 while (unwind.opcode_count > 0)
19037 {
19038 unwind.opcode_count--;
19039 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19040 n--;
19041 }
19042
19043 /* Pad with "finish" opcodes. */
19044 while (n--)
19045 data = (data << 8) | 0xb0;
19046
19047 return data;
19048 }
19049 size = 0;
19050 }
19051 else
19052 /* We get two opcodes "free" in the first word. */
19053 size = unwind.opcode_count - 2;
19054 }
19055 else
19056 /* An extra byte is required for the opcode count. */
19057 size = unwind.opcode_count + 1;
19058
19059 size = (size + 3) >> 2;
19060 if (size > 0xff)
19061 as_bad (_("too many unwind opcodes"));
19062
19063 frag_align (2, 0, 0);
19064 record_alignment (now_seg, 2);
19065 unwind.table_entry = expr_build_dot ();
19066
19067 /* Allocate the table entry. */
19068 ptr = frag_more ((size << 2) + 4);
19069 where = frag_now_fix () - ((size << 2) + 4);
19070
19071 switch (unwind.personality_index)
19072 {
19073 case -1:
19074 /* ??? Should this be a PLT generating relocation? */
19075 /* Custom personality routine. */
19076 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
19077 BFD_RELOC_ARM_PREL31);
19078
19079 where += 4;
19080 ptr += 4;
19081
19082 /* Set the first byte to the number of additional words. */
19083 data = size - 1;
19084 n = 3;
19085 break;
19086
19087 /* ABI defined personality routines. */
19088 case 0:
19089 /* Three opcodes bytes are packed into the first word. */
19090 data = 0x80;
19091 n = 3;
19092 break;
19093
19094 case 1:
19095 case 2:
19096 /* The size and first two opcode bytes go in the first word. */
19097 data = ((0x80 + unwind.personality_index) << 8) | size;
19098 n = 2;
19099 break;
19100
19101 default:
19102 /* Should never happen. */
19103 abort ();
19104 }
19105
19106 /* Pack the opcodes into words (MSB first), reversing the list at the same
19107 time. */
19108 while (unwind.opcode_count > 0)
19109 {
19110 if (n == 0)
19111 {
19112 md_number_to_chars (ptr, data, 4);
19113 ptr += 4;
19114 n = 4;
19115 data = 0;
19116 }
19117 unwind.opcode_count--;
19118 n--;
19119 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
19120 }
19121
19122 /* Finish off the last word. */
19123 if (n < 4)
19124 {
19125 /* Pad with "finish" opcodes. */
19126 while (n--)
19127 data = (data << 8) | 0xb0;
19128
19129 md_number_to_chars (ptr, data, 4);
19130 }
19131
19132 if (!have_data)
19133 {
19134 /* Add an empty descriptor if there is no user-specified data. */
19135 ptr = frag_more (4);
19136 md_number_to_chars (ptr, 0, 4);
19137 }
19138
19139 return 0;
19140 }
19141
19142
19143 /* Initialize the DWARF-2 unwind information for this procedure. */
19144
19145 void
19146 tc_arm_frame_initial_instructions (void)
19147 {
19148 cfi_add_CFA_def_cfa (REG_SP, 0);
19149 }
19150 #endif /* OBJ_ELF */
19151
19152 /* Convert REGNAME to a DWARF-2 register number. */
19153
19154 int
19155 tc_arm_regname_to_dw2regnum (char *regname)
19156 {
19157 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
19158
19159 if (reg == FAIL)
19160 return -1;
19161
19162 return reg;
19163 }
19164
19165 #ifdef TE_PE
19166 void
19167 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
19168 {
19169 expressionS exp;
19170
19171 exp.X_op = O_secrel;
19172 exp.X_add_symbol = symbol;
19173 exp.X_add_number = 0;
19174 emit_expr (&exp, size);
19175 }
19176 #endif
19177
19178 /* MD interface: Symbol and relocation handling. */
19179
19180 /* Return the address within the segment that a PC-relative fixup is
19181 relative to. For ARM, PC-relative fixups applied to instructions
19182 are generally relative to the location of the fixup plus 8 bytes.
19183 Thumb branches are offset by 4, and Thumb loads relative to PC
19184 require special handling. */
19185
19186 long
19187 md_pcrel_from_section (fixS * fixP, segT seg)
19188 {
19189 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
19190
19191 /* If this is pc-relative and we are going to emit a relocation
19192 then we just want to put out any pipeline compensation that the linker
19193 will need. Otherwise we want to use the calculated base.
19194 For WinCE we skip the bias for externals as well, since this
19195 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19196 if (fixP->fx_pcrel
19197 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19198 || (arm_force_relocation (fixP)
19199 #ifdef TE_WINCE
19200 && !S_IS_EXTERNAL (fixP->fx_addsy)
19201 #endif
19202 )))
19203 base = 0;
19204
19205
19206 switch (fixP->fx_r_type)
19207 {
19208 /* PC relative addressing on the Thumb is slightly odd as the
19209 bottom two bits of the PC are forced to zero for the
19210 calculation. This happens *after* application of the
19211 pipeline offset. However, Thumb adrl already adjusts for
19212 this, so we need not do it again. */
19213 case BFD_RELOC_ARM_THUMB_ADD:
19214 return base & ~3;
19215
19216 case BFD_RELOC_ARM_THUMB_OFFSET:
19217 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19218 case BFD_RELOC_ARM_T32_ADD_PC12:
19219 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
19220 return (base + 4) & ~3;
19221
19222 /* Thumb branches are simply offset by +4. */
19223 case BFD_RELOC_THUMB_PCREL_BRANCH7:
19224 case BFD_RELOC_THUMB_PCREL_BRANCH9:
19225 case BFD_RELOC_THUMB_PCREL_BRANCH12:
19226 case BFD_RELOC_THUMB_PCREL_BRANCH20:
19227 case BFD_RELOC_THUMB_PCREL_BRANCH25:
19228 return base + 4;
19229
19230 case BFD_RELOC_THUMB_PCREL_BRANCH23:
19231 if (fixP->fx_addsy
19232 && ARM_IS_FUNC (fixP->fx_addsy)
19233 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19234 base = fixP->fx_where + fixP->fx_frag->fr_address;
19235 return base + 4;
19236
19237 /* BLX is like branches above, but forces the low two bits of PC to
19238 zero. */
19239 case BFD_RELOC_THUMB_PCREL_BLX:
19240 if (fixP->fx_addsy
19241 && THUMB_IS_FUNC (fixP->fx_addsy)
19242 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19243 base = fixP->fx_where + fixP->fx_frag->fr_address;
19244 return (base + 4) & ~3;
19245
19246 /* ARM mode branches are offset by +8. However, the Windows CE
19247 loader expects the relocation not to take this into account. */
19248 case BFD_RELOC_ARM_PCREL_BLX:
19249 if (fixP->fx_addsy
19250 && ARM_IS_FUNC (fixP->fx_addsy)
19251 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19252 base = fixP->fx_where + fixP->fx_frag->fr_address;
19253 return base + 8;
19254
19255 case BFD_RELOC_ARM_PCREL_CALL:
19256 if (fixP->fx_addsy
19257 && THUMB_IS_FUNC (fixP->fx_addsy)
19258 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
19259 base = fixP->fx_where + fixP->fx_frag->fr_address;
19260 return base + 8;
19261
19262 case BFD_RELOC_ARM_PCREL_BRANCH:
19263 case BFD_RELOC_ARM_PCREL_JUMP:
19264 case BFD_RELOC_ARM_PLT32:
19265 #ifdef TE_WINCE
19266 /* When handling fixups immediately, because we have already
19267 discovered the value of a symbol, or the address of the frag involved
19268 we must account for the offset by +8, as the OS loader will never see the reloc.
19269 see fixup_segment() in write.c
19270 The S_IS_EXTERNAL test handles the case of global symbols.
19271 Those need the calculated base, not just the pipe compensation the linker will need. */
19272 if (fixP->fx_pcrel
19273 && fixP->fx_addsy != NULL
19274 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
19275 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
19276 return base + 8;
19277 return base;
19278 #else
19279 return base + 8;
19280 #endif
19281
19282
19283 /* ARM mode loads relative to PC are also offset by +8. Unlike
19284 branches, the Windows CE loader *does* expect the relocation
19285 to take this into account. */
19286 case BFD_RELOC_ARM_OFFSET_IMM:
19287 case BFD_RELOC_ARM_OFFSET_IMM8:
19288 case BFD_RELOC_ARM_HWLITERAL:
19289 case BFD_RELOC_ARM_LITERAL:
19290 case BFD_RELOC_ARM_CP_OFF_IMM:
19291 return base + 8;
19292
19293
19294 /* Other PC-relative relocations are un-offset. */
19295 default:
19296 return base;
19297 }
19298 }
19299
19300 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19301 Otherwise we have no need to default values of symbols. */
19302
19303 symbolS *
19304 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
19305 {
19306 #ifdef OBJ_ELF
19307 if (name[0] == '_' && name[1] == 'G'
19308 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
19309 {
19310 if (!GOT_symbol)
19311 {
19312 if (symbol_find (name))
19313 as_bad (_("GOT already in the symbol table"));
19314
19315 GOT_symbol = symbol_new (name, undefined_section,
19316 (valueT) 0, & zero_address_frag);
19317 }
19318
19319 return GOT_symbol;
19320 }
19321 #endif
19322
19323 return NULL;
19324 }
19325
19326 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19327 computed as two separate immediate values, added together. We
19328 already know that this value cannot be computed by just one ARM
19329 instruction. */
19330
19331 static unsigned int
19332 validate_immediate_twopart (unsigned int val,
19333 unsigned int * highpart)
19334 {
19335 unsigned int a;
19336 unsigned int i;
19337
19338 for (i = 0; i < 32; i += 2)
19339 if (((a = rotate_left (val, i)) & 0xff) != 0)
19340 {
19341 if (a & 0xff00)
19342 {
19343 if (a & ~ 0xffff)
19344 continue;
19345 * highpart = (a >> 8) | ((i + 24) << 7);
19346 }
19347 else if (a & 0xff0000)
19348 {
19349 if (a & 0xff000000)
19350 continue;
19351 * highpart = (a >> 16) | ((i + 16) << 7);
19352 }
19353 else
19354 {
19355 gas_assert (a & 0xff000000);
19356 * highpart = (a >> 24) | ((i + 8) << 7);
19357 }
19358
19359 return (a & 0xff) | (i << 7);
19360 }
19361
19362 return FAIL;
19363 }
19364
19365 static int
19366 validate_offset_imm (unsigned int val, int hwse)
19367 {
19368 if ((hwse && val > 255) || val > 4095)
19369 return FAIL;
19370 return val;
19371 }
19372
19373 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19374 negative immediate constant by altering the instruction. A bit of
19375 a hack really.
19376 MOV <-> MVN
19377 AND <-> BIC
19378 ADC <-> SBC
19379 by inverting the second operand, and
19380 ADD <-> SUB
19381 CMP <-> CMN
19382 by negating the second operand. */
19383
19384 static int
19385 negate_data_op (unsigned long * instruction,
19386 unsigned long value)
19387 {
19388 int op, new_inst;
19389 unsigned long negated, inverted;
19390
19391 negated = encode_arm_immediate (-value);
19392 inverted = encode_arm_immediate (~value);
19393
19394 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
19395 switch (op)
19396 {
19397 /* First negates. */
19398 case OPCODE_SUB: /* ADD <-> SUB */
19399 new_inst = OPCODE_ADD;
19400 value = negated;
19401 break;
19402
19403 case OPCODE_ADD:
19404 new_inst = OPCODE_SUB;
19405 value = negated;
19406 break;
19407
19408 case OPCODE_CMP: /* CMP <-> CMN */
19409 new_inst = OPCODE_CMN;
19410 value = negated;
19411 break;
19412
19413 case OPCODE_CMN:
19414 new_inst = OPCODE_CMP;
19415 value = negated;
19416 break;
19417
19418 /* Now Inverted ops. */
19419 case OPCODE_MOV: /* MOV <-> MVN */
19420 new_inst = OPCODE_MVN;
19421 value = inverted;
19422 break;
19423
19424 case OPCODE_MVN:
19425 new_inst = OPCODE_MOV;
19426 value = inverted;
19427 break;
19428
19429 case OPCODE_AND: /* AND <-> BIC */
19430 new_inst = OPCODE_BIC;
19431 value = inverted;
19432 break;
19433
19434 case OPCODE_BIC:
19435 new_inst = OPCODE_AND;
19436 value = inverted;
19437 break;
19438
19439 case OPCODE_ADC: /* ADC <-> SBC */
19440 new_inst = OPCODE_SBC;
19441 value = inverted;
19442 break;
19443
19444 case OPCODE_SBC:
19445 new_inst = OPCODE_ADC;
19446 value = inverted;
19447 break;
19448
19449 /* We cannot do anything. */
19450 default:
19451 return FAIL;
19452 }
19453
19454 if (value == (unsigned) FAIL)
19455 return FAIL;
19456
19457 *instruction &= OPCODE_MASK;
19458 *instruction |= new_inst << DATA_OP_SHIFT;
19459 return value;
19460 }
19461
19462 /* Like negate_data_op, but for Thumb-2. */
19463
19464 static unsigned int
19465 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
19466 {
19467 int op, new_inst;
19468 int rd;
19469 unsigned int negated, inverted;
19470
19471 negated = encode_thumb32_immediate (-value);
19472 inverted = encode_thumb32_immediate (~value);
19473
19474 rd = (*instruction >> 8) & 0xf;
19475 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
19476 switch (op)
19477 {
19478 /* ADD <-> SUB. Includes CMP <-> CMN. */
19479 case T2_OPCODE_SUB:
19480 new_inst = T2_OPCODE_ADD;
19481 value = negated;
19482 break;
19483
19484 case T2_OPCODE_ADD:
19485 new_inst = T2_OPCODE_SUB;
19486 value = negated;
19487 break;
19488
19489 /* ORR <-> ORN. Includes MOV <-> MVN. */
19490 case T2_OPCODE_ORR:
19491 new_inst = T2_OPCODE_ORN;
19492 value = inverted;
19493 break;
19494
19495 case T2_OPCODE_ORN:
19496 new_inst = T2_OPCODE_ORR;
19497 value = inverted;
19498 break;
19499
19500 /* AND <-> BIC. TST has no inverted equivalent. */
19501 case T2_OPCODE_AND:
19502 new_inst = T2_OPCODE_BIC;
19503 if (rd == 15)
19504 value = FAIL;
19505 else
19506 value = inverted;
19507 break;
19508
19509 case T2_OPCODE_BIC:
19510 new_inst = T2_OPCODE_AND;
19511 value = inverted;
19512 break;
19513
19514 /* ADC <-> SBC */
19515 case T2_OPCODE_ADC:
19516 new_inst = T2_OPCODE_SBC;
19517 value = inverted;
19518 break;
19519
19520 case T2_OPCODE_SBC:
19521 new_inst = T2_OPCODE_ADC;
19522 value = inverted;
19523 break;
19524
19525 /* We cannot do anything. */
19526 default:
19527 return FAIL;
19528 }
19529
19530 if (value == (unsigned int)FAIL)
19531 return FAIL;
19532
19533 *instruction &= T2_OPCODE_MASK;
19534 *instruction |= new_inst << T2_DATA_OP_SHIFT;
19535 return value;
19536 }
19537
19538 /* Read a 32-bit thumb instruction from buf. */
19539 static unsigned long
19540 get_thumb32_insn (char * buf)
19541 {
19542 unsigned long insn;
19543 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
19544 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19545
19546 return insn;
19547 }
19548
19549
19550 /* We usually want to set the low bit on the address of thumb function
19551 symbols. In particular .word foo - . should have the low bit set.
19552 Generic code tries to fold the difference of two symbols to
19553 a constant. Prevent this and force a relocation when the first symbols
19554 is a thumb function. */
19555
19556 bfd_boolean
19557 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
19558 {
19559 if (op == O_subtract
19560 && l->X_op == O_symbol
19561 && r->X_op == O_symbol
19562 && THUMB_IS_FUNC (l->X_add_symbol))
19563 {
19564 l->X_op = O_subtract;
19565 l->X_op_symbol = r->X_add_symbol;
19566 l->X_add_number -= r->X_add_number;
19567 return TRUE;
19568 }
19569
19570 /* Process as normal. */
19571 return FALSE;
19572 }
19573
19574 /* Encode Thumb2 unconditional branches and calls. The encoding
19575 for the 2 are identical for the immediate values. */
19576
19577 static void
19578 encode_thumb2_b_bl_offset (char * buf, offsetT value)
19579 {
19580 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19581 offsetT newval;
19582 offsetT newval2;
19583 addressT S, I1, I2, lo, hi;
19584
19585 S = (value >> 24) & 0x01;
19586 I1 = (value >> 23) & 0x01;
19587 I2 = (value >> 22) & 0x01;
19588 hi = (value >> 12) & 0x3ff;
19589 lo = (value >> 1) & 0x7ff;
19590 newval = md_chars_to_number (buf, THUMB_SIZE);
19591 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
19592 newval |= (S << 10) | hi;
19593 newval2 &= ~T2I1I2MASK;
19594 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
19595 md_number_to_chars (buf, newval, THUMB_SIZE);
19596 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
19597 }
19598
19599 void
19600 md_apply_fix (fixS * fixP,
19601 valueT * valP,
19602 segT seg)
19603 {
19604 offsetT value = * valP;
19605 offsetT newval;
19606 unsigned int newimm;
19607 unsigned long temp;
19608 int sign;
19609 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
19610
19611 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
19612
19613 /* Note whether this will delete the relocation. */
19614
19615 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
19616 fixP->fx_done = 1;
19617
19618 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19619 consistency with the behaviour on 32-bit hosts. Remember value
19620 for emit_reloc. */
19621 value &= 0xffffffff;
19622 value ^= 0x80000000;
19623 value -= 0x80000000;
19624
19625 *valP = value;
19626 fixP->fx_addnumber = value;
19627
19628 /* Same treatment for fixP->fx_offset. */
19629 fixP->fx_offset &= 0xffffffff;
19630 fixP->fx_offset ^= 0x80000000;
19631 fixP->fx_offset -= 0x80000000;
19632
19633 switch (fixP->fx_r_type)
19634 {
19635 case BFD_RELOC_NONE:
19636 /* This will need to go in the object file. */
19637 fixP->fx_done = 0;
19638 break;
19639
19640 case BFD_RELOC_ARM_IMMEDIATE:
19641 /* We claim that this fixup has been processed here,
19642 even if in fact we generate an error because we do
19643 not have a reloc for it, so tc_gen_reloc will reject it. */
19644 fixP->fx_done = 1;
19645
19646 if (fixP->fx_addsy
19647 && ! S_IS_DEFINED (fixP->fx_addsy))
19648 {
19649 as_bad_where (fixP->fx_file, fixP->fx_line,
19650 _("undefined symbol %s used as an immediate value"),
19651 S_GET_NAME (fixP->fx_addsy));
19652 break;
19653 }
19654
19655 if (fixP->fx_addsy
19656 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19657 {
19658 as_bad_where (fixP->fx_file, fixP->fx_line,
19659 _("symbol %s is in a different section"),
19660 S_GET_NAME (fixP->fx_addsy));
19661 break;
19662 }
19663
19664 newimm = encode_arm_immediate (value);
19665 temp = md_chars_to_number (buf, INSN_SIZE);
19666
19667 /* If the instruction will fail, see if we can fix things up by
19668 changing the opcode. */
19669 if (newimm == (unsigned int) FAIL
19670 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
19671 {
19672 as_bad_where (fixP->fx_file, fixP->fx_line,
19673 _("invalid constant (%lx) after fixup"),
19674 (unsigned long) value);
19675 break;
19676 }
19677
19678 newimm |= (temp & 0xfffff000);
19679 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19680 break;
19681
19682 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
19683 {
19684 unsigned int highpart = 0;
19685 unsigned int newinsn = 0xe1a00000; /* nop. */
19686
19687 if (fixP->fx_addsy
19688 && ! S_IS_DEFINED (fixP->fx_addsy))
19689 {
19690 as_bad_where (fixP->fx_file, fixP->fx_line,
19691 _("undefined symbol %s used as an immediate value"),
19692 S_GET_NAME (fixP->fx_addsy));
19693 break;
19694 }
19695
19696 if (fixP->fx_addsy
19697 && S_GET_SEGMENT (fixP->fx_addsy) != seg)
19698 {
19699 as_bad_where (fixP->fx_file, fixP->fx_line,
19700 _("symbol %s is in a different section"),
19701 S_GET_NAME (fixP->fx_addsy));
19702 break;
19703 }
19704
19705 newimm = encode_arm_immediate (value);
19706 temp = md_chars_to_number (buf, INSN_SIZE);
19707
19708 /* If the instruction will fail, see if we can fix things up by
19709 changing the opcode. */
19710 if (newimm == (unsigned int) FAIL
19711 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
19712 {
19713 /* No ? OK - try using two ADD instructions to generate
19714 the value. */
19715 newimm = validate_immediate_twopart (value, & highpart);
19716
19717 /* Yes - then make sure that the second instruction is
19718 also an add. */
19719 if (newimm != (unsigned int) FAIL)
19720 newinsn = temp;
19721 /* Still No ? Try using a negated value. */
19722 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
19723 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
19724 /* Otherwise - give up. */
19725 else
19726 {
19727 as_bad_where (fixP->fx_file, fixP->fx_line,
19728 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19729 (long) value);
19730 break;
19731 }
19732
19733 /* Replace the first operand in the 2nd instruction (which
19734 is the PC) with the destination register. We have
19735 already added in the PC in the first instruction and we
19736 do not want to do it again. */
19737 newinsn &= ~ 0xf0000;
19738 newinsn |= ((newinsn & 0x0f000) << 4);
19739 }
19740
19741 newimm |= (temp & 0xfffff000);
19742 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
19743
19744 highpart |= (newinsn & 0xfffff000);
19745 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
19746 }
19747 break;
19748
19749 case BFD_RELOC_ARM_OFFSET_IMM:
19750 if (!fixP->fx_done && seg->use_rela_p)
19751 value = 0;
19752
19753 case BFD_RELOC_ARM_LITERAL:
19754 sign = value >= 0;
19755
19756 if (value < 0)
19757 value = - value;
19758
19759 if (validate_offset_imm (value, 0) == FAIL)
19760 {
19761 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
19762 as_bad_where (fixP->fx_file, fixP->fx_line,
19763 _("invalid literal constant: pool needs to be closer"));
19764 else
19765 as_bad_where (fixP->fx_file, fixP->fx_line,
19766 _("bad immediate value for offset (%ld)"),
19767 (long) value);
19768 break;
19769 }
19770
19771 newval = md_chars_to_number (buf, INSN_SIZE);
19772 newval &= 0xff7ff000;
19773 newval |= value | (sign ? INDEX_UP : 0);
19774 md_number_to_chars (buf, newval, INSN_SIZE);
19775 break;
19776
19777 case BFD_RELOC_ARM_OFFSET_IMM8:
19778 case BFD_RELOC_ARM_HWLITERAL:
19779 sign = value >= 0;
19780
19781 if (value < 0)
19782 value = - value;
19783
19784 if (validate_offset_imm (value, 1) == FAIL)
19785 {
19786 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
19787 as_bad_where (fixP->fx_file, fixP->fx_line,
19788 _("invalid literal constant: pool needs to be closer"));
19789 else
19790 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19791 (long) value);
19792 break;
19793 }
19794
19795 newval = md_chars_to_number (buf, INSN_SIZE);
19796 newval &= 0xff7ff0f0;
19797 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
19798 md_number_to_chars (buf, newval, INSN_SIZE);
19799 break;
19800
19801 case BFD_RELOC_ARM_T32_OFFSET_U8:
19802 if (value < 0 || value > 1020 || value % 4 != 0)
19803 as_bad_where (fixP->fx_file, fixP->fx_line,
19804 _("bad immediate value for offset (%ld)"), (long) value);
19805 value /= 4;
19806
19807 newval = md_chars_to_number (buf+2, THUMB_SIZE);
19808 newval |= value;
19809 md_number_to_chars (buf+2, newval, THUMB_SIZE);
19810 break;
19811
19812 case BFD_RELOC_ARM_T32_OFFSET_IMM:
19813 /* This is a complicated relocation used for all varieties of Thumb32
19814 load/store instruction with immediate offset:
19815
19816 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19817 *4, optional writeback(W)
19818 (doubleword load/store)
19819
19820 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19821 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19822 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19823 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19824 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19825
19826 Uppercase letters indicate bits that are already encoded at
19827 this point. Lowercase letters are our problem. For the
19828 second block of instructions, the secondary opcode nybble
19829 (bits 8..11) is present, and bit 23 is zero, even if this is
19830 a PC-relative operation. */
19831 newval = md_chars_to_number (buf, THUMB_SIZE);
19832 newval <<= 16;
19833 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
19834
19835 if ((newval & 0xf0000000) == 0xe0000000)
19836 {
19837 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19838 if (value >= 0)
19839 newval |= (1 << 23);
19840 else
19841 value = -value;
19842 if (value % 4 != 0)
19843 {
19844 as_bad_where (fixP->fx_file, fixP->fx_line,
19845 _("offset not a multiple of 4"));
19846 break;
19847 }
19848 value /= 4;
19849 if (value > 0xff)
19850 {
19851 as_bad_where (fixP->fx_file, fixP->fx_line,
19852 _("offset out of range"));
19853 break;
19854 }
19855 newval &= ~0xff;
19856 }
19857 else if ((newval & 0x000f0000) == 0x000f0000)
19858 {
19859 /* PC-relative, 12-bit offset. */
19860 if (value >= 0)
19861 newval |= (1 << 23);
19862 else
19863 value = -value;
19864 if (value > 0xfff)
19865 {
19866 as_bad_where (fixP->fx_file, fixP->fx_line,
19867 _("offset out of range"));
19868 break;
19869 }
19870 newval &= ~0xfff;
19871 }
19872 else if ((newval & 0x00000100) == 0x00000100)
19873 {
19874 /* Writeback: 8-bit, +/- offset. */
19875 if (value >= 0)
19876 newval |= (1 << 9);
19877 else
19878 value = -value;
19879 if (value > 0xff)
19880 {
19881 as_bad_where (fixP->fx_file, fixP->fx_line,
19882 _("offset out of range"));
19883 break;
19884 }
19885 newval &= ~0xff;
19886 }
19887 else if ((newval & 0x00000f00) == 0x00000e00)
19888 {
19889 /* T-instruction: positive 8-bit offset. */
19890 if (value < 0 || value > 0xff)
19891 {
19892 as_bad_where (fixP->fx_file, fixP->fx_line,
19893 _("offset out of range"));
19894 break;
19895 }
19896 newval &= ~0xff;
19897 newval |= value;
19898 }
19899 else
19900 {
19901 /* Positive 12-bit or negative 8-bit offset. */
19902 int limit;
19903 if (value >= 0)
19904 {
19905 newval |= (1 << 23);
19906 limit = 0xfff;
19907 }
19908 else
19909 {
19910 value = -value;
19911 limit = 0xff;
19912 }
19913 if (value > limit)
19914 {
19915 as_bad_where (fixP->fx_file, fixP->fx_line,
19916 _("offset out of range"));
19917 break;
19918 }
19919 newval &= ~limit;
19920 }
19921
19922 newval |= value;
19923 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
19924 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
19925 break;
19926
19927 case BFD_RELOC_ARM_SHIFT_IMM:
19928 newval = md_chars_to_number (buf, INSN_SIZE);
19929 if (((unsigned long) value) > 32
19930 || (value == 32
19931 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
19932 {
19933 as_bad_where (fixP->fx_file, fixP->fx_line,
19934 _("shift expression is too large"));
19935 break;
19936 }
19937
19938 if (value == 0)
19939 /* Shifts of zero must be done as lsl. */
19940 newval &= ~0x60;
19941 else if (value == 32)
19942 value = 0;
19943 newval &= 0xfffff07f;
19944 newval |= (value & 0x1f) << 7;
19945 md_number_to_chars (buf, newval, INSN_SIZE);
19946 break;
19947
19948 case BFD_RELOC_ARM_T32_IMMEDIATE:
19949 case BFD_RELOC_ARM_T32_ADD_IMM:
19950 case BFD_RELOC_ARM_T32_IMM12:
19951 case BFD_RELOC_ARM_T32_ADD_PC12:
19952 /* We claim that this fixup has been processed here,
19953 even if in fact we generate an error because we do
19954 not have a reloc for it, so tc_gen_reloc will reject it. */
19955 fixP->fx_done = 1;
19956
19957 if (fixP->fx_addsy
19958 && ! S_IS_DEFINED (fixP->fx_addsy))
19959 {
19960 as_bad_where (fixP->fx_file, fixP->fx_line,
19961 _("undefined symbol %s used as an immediate value"),
19962 S_GET_NAME (fixP->fx_addsy));
19963 break;
19964 }
19965
19966 newval = md_chars_to_number (buf, THUMB_SIZE);
19967 newval <<= 16;
19968 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
19969
19970 newimm = FAIL;
19971 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
19972 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19973 {
19974 newimm = encode_thumb32_immediate (value);
19975 if (newimm == (unsigned int) FAIL)
19976 newimm = thumb32_negate_data_op (&newval, value);
19977 }
19978 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
19979 && newimm == (unsigned int) FAIL)
19980 {
19981 /* Turn add/sum into addw/subw. */
19982 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
19983 newval = (newval & 0xfeffffff) | 0x02000000;
19984
19985 /* 12 bit immediate for addw/subw. */
19986 if (value < 0)
19987 {
19988 value = -value;
19989 newval ^= 0x00a00000;
19990 }
19991 if (value > 0xfff)
19992 newimm = (unsigned int) FAIL;
19993 else
19994 newimm = value;
19995 }
19996
19997 if (newimm == (unsigned int)FAIL)
19998 {
19999 as_bad_where (fixP->fx_file, fixP->fx_line,
20000 _("invalid constant (%lx) after fixup"),
20001 (unsigned long) value);
20002 break;
20003 }
20004
20005 newval |= (newimm & 0x800) << 15;
20006 newval |= (newimm & 0x700) << 4;
20007 newval |= (newimm & 0x0ff);
20008
20009 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
20010 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
20011 break;
20012
20013 case BFD_RELOC_ARM_SMC:
20014 if (((unsigned long) value) > 0xffff)
20015 as_bad_where (fixP->fx_file, fixP->fx_line,
20016 _("invalid smc expression"));
20017 newval = md_chars_to_number (buf, INSN_SIZE);
20018 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
20019 md_number_to_chars (buf, newval, INSN_SIZE);
20020 break;
20021
20022 case BFD_RELOC_ARM_SWI:
20023 if (fixP->tc_fix_data != 0)
20024 {
20025 if (((unsigned long) value) > 0xff)
20026 as_bad_where (fixP->fx_file, fixP->fx_line,
20027 _("invalid swi expression"));
20028 newval = md_chars_to_number (buf, THUMB_SIZE);
20029 newval |= value;
20030 md_number_to_chars (buf, newval, THUMB_SIZE);
20031 }
20032 else
20033 {
20034 if (((unsigned long) value) > 0x00ffffff)
20035 as_bad_where (fixP->fx_file, fixP->fx_line,
20036 _("invalid swi expression"));
20037 newval = md_chars_to_number (buf, INSN_SIZE);
20038 newval |= value;
20039 md_number_to_chars (buf, newval, INSN_SIZE);
20040 }
20041 break;
20042
20043 case BFD_RELOC_ARM_MULTI:
20044 if (((unsigned long) value) > 0xffff)
20045 as_bad_where (fixP->fx_file, fixP->fx_line,
20046 _("invalid expression in load/store multiple"));
20047 newval = value | md_chars_to_number (buf, INSN_SIZE);
20048 md_number_to_chars (buf, newval, INSN_SIZE);
20049 break;
20050
20051 #ifdef OBJ_ELF
20052 case BFD_RELOC_ARM_PCREL_CALL:
20053
20054 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20055 && fixP->fx_addsy
20056 && !S_IS_EXTERNAL (fixP->fx_addsy)
20057 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20058 && THUMB_IS_FUNC (fixP->fx_addsy))
20059 /* Flip the bl to blx. This is a simple flip
20060 bit here because we generate PCREL_CALL for
20061 unconditional bls. */
20062 {
20063 newval = md_chars_to_number (buf, INSN_SIZE);
20064 newval = newval | 0x10000000;
20065 md_number_to_chars (buf, newval, INSN_SIZE);
20066 temp = 1;
20067 fixP->fx_done = 1;
20068 }
20069 else
20070 temp = 3;
20071 goto arm_branch_common;
20072
20073 case BFD_RELOC_ARM_PCREL_JUMP:
20074 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20075 && fixP->fx_addsy
20076 && !S_IS_EXTERNAL (fixP->fx_addsy)
20077 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20078 && THUMB_IS_FUNC (fixP->fx_addsy))
20079 {
20080 /* This would map to a bl<cond>, b<cond>,
20081 b<always> to a Thumb function. We
20082 need to force a relocation for this particular
20083 case. */
20084 newval = md_chars_to_number (buf, INSN_SIZE);
20085 fixP->fx_done = 0;
20086 }
20087
20088 case BFD_RELOC_ARM_PLT32:
20089 #endif
20090 case BFD_RELOC_ARM_PCREL_BRANCH:
20091 temp = 3;
20092 goto arm_branch_common;
20093
20094 case BFD_RELOC_ARM_PCREL_BLX:
20095
20096 temp = 1;
20097 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
20098 && fixP->fx_addsy
20099 && !S_IS_EXTERNAL (fixP->fx_addsy)
20100 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20101 && ARM_IS_FUNC (fixP->fx_addsy))
20102 {
20103 /* Flip the blx to a bl and warn. */
20104 const char *name = S_GET_NAME (fixP->fx_addsy);
20105 newval = 0xeb000000;
20106 as_warn_where (fixP->fx_file, fixP->fx_line,
20107 _("blx to '%s' an ARM ISA state function changed to bl"),
20108 name);
20109 md_number_to_chars (buf, newval, INSN_SIZE);
20110 temp = 3;
20111 fixP->fx_done = 1;
20112 }
20113
20114 #ifdef OBJ_ELF
20115 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20116 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
20117 #endif
20118
20119 arm_branch_common:
20120 /* We are going to store value (shifted right by two) in the
20121 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20122 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20123 also be be clear. */
20124 if (value & temp)
20125 as_bad_where (fixP->fx_file, fixP->fx_line,
20126 _("misaligned branch destination"));
20127 if ((value & (offsetT)0xfe000000) != (offsetT)0
20128 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
20129 as_bad_where (fixP->fx_file, fixP->fx_line,
20130 _("branch out of range"));
20131
20132 if (fixP->fx_done || !seg->use_rela_p)
20133 {
20134 newval = md_chars_to_number (buf, INSN_SIZE);
20135 newval |= (value >> 2) & 0x00ffffff;
20136 /* Set the H bit on BLX instructions. */
20137 if (temp == 1)
20138 {
20139 if (value & 2)
20140 newval |= 0x01000000;
20141 else
20142 newval &= ~0x01000000;
20143 }
20144 md_number_to_chars (buf, newval, INSN_SIZE);
20145 }
20146 break;
20147
20148 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
20149 /* CBZ can only branch forward. */
20150
20151 /* Attempts to use CBZ to branch to the next instruction
20152 (which, strictly speaking, are prohibited) will be turned into
20153 no-ops.
20154
20155 FIXME: It may be better to remove the instruction completely and
20156 perform relaxation. */
20157 if (value == -2)
20158 {
20159 newval = md_chars_to_number (buf, THUMB_SIZE);
20160 newval = 0xbf00; /* NOP encoding T1 */
20161 md_number_to_chars (buf, newval, THUMB_SIZE);
20162 }
20163 else
20164 {
20165 if (value & ~0x7e)
20166 as_bad_where (fixP->fx_file, fixP->fx_line,
20167 _("branch out of range"));
20168
20169 if (fixP->fx_done || !seg->use_rela_p)
20170 {
20171 newval = md_chars_to_number (buf, THUMB_SIZE);
20172 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
20173 md_number_to_chars (buf, newval, THUMB_SIZE);
20174 }
20175 }
20176 break;
20177
20178 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
20179 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
20180 as_bad_where (fixP->fx_file, fixP->fx_line,
20181 _("branch out of range"));
20182
20183 if (fixP->fx_done || !seg->use_rela_p)
20184 {
20185 newval = md_chars_to_number (buf, THUMB_SIZE);
20186 newval |= (value & 0x1ff) >> 1;
20187 md_number_to_chars (buf, newval, THUMB_SIZE);
20188 }
20189 break;
20190
20191 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
20192 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
20193 as_bad_where (fixP->fx_file, fixP->fx_line,
20194 _("branch out of range"));
20195
20196 if (fixP->fx_done || !seg->use_rela_p)
20197 {
20198 newval = md_chars_to_number (buf, THUMB_SIZE);
20199 newval |= (value & 0xfff) >> 1;
20200 md_number_to_chars (buf, newval, THUMB_SIZE);
20201 }
20202 break;
20203
20204 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20205 if (fixP->fx_addsy
20206 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20207 && !S_IS_EXTERNAL (fixP->fx_addsy)
20208 && S_IS_DEFINED (fixP->fx_addsy)
20209 && ARM_IS_FUNC (fixP->fx_addsy)
20210 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20211 {
20212 /* Force a relocation for a branch 20 bits wide. */
20213 fixP->fx_done = 0;
20214 }
20215 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
20216 as_bad_where (fixP->fx_file, fixP->fx_line,
20217 _("conditional branch out of range"));
20218
20219 if (fixP->fx_done || !seg->use_rela_p)
20220 {
20221 offsetT newval2;
20222 addressT S, J1, J2, lo, hi;
20223
20224 S = (value & 0x00100000) >> 20;
20225 J2 = (value & 0x00080000) >> 19;
20226 J1 = (value & 0x00040000) >> 18;
20227 hi = (value & 0x0003f000) >> 12;
20228 lo = (value & 0x00000ffe) >> 1;
20229
20230 newval = md_chars_to_number (buf, THUMB_SIZE);
20231 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20232 newval |= (S << 10) | hi;
20233 newval2 |= (J1 << 13) | (J2 << 11) | lo;
20234 md_number_to_chars (buf, newval, THUMB_SIZE);
20235 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20236 }
20237 break;
20238
20239 case BFD_RELOC_THUMB_PCREL_BLX:
20240
20241 /* If there is a blx from a thumb state function to
20242 another thumb function flip this to a bl and warn
20243 about it. */
20244
20245 if (fixP->fx_addsy
20246 && S_IS_DEFINED (fixP->fx_addsy)
20247 && !S_IS_EXTERNAL (fixP->fx_addsy)
20248 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20249 && THUMB_IS_FUNC (fixP->fx_addsy))
20250 {
20251 const char *name = S_GET_NAME (fixP->fx_addsy);
20252 as_warn_where (fixP->fx_file, fixP->fx_line,
20253 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20254 name);
20255 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20256 newval = newval | 0x1000;
20257 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20258 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20259 fixP->fx_done = 1;
20260 }
20261
20262
20263 goto thumb_bl_common;
20264
20265 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20266
20267 /* A bl from Thumb state ISA to an internal ARM state function
20268 is converted to a blx. */
20269 if (fixP->fx_addsy
20270 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20271 && !S_IS_EXTERNAL (fixP->fx_addsy)
20272 && S_IS_DEFINED (fixP->fx_addsy)
20273 && ARM_IS_FUNC (fixP->fx_addsy)
20274 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20275 {
20276 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20277 newval = newval & ~0x1000;
20278 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
20279 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
20280 fixP->fx_done = 1;
20281 }
20282
20283 thumb_bl_common:
20284
20285 #ifdef OBJ_ELF
20286 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4 &&
20287 fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20288 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
20289 #endif
20290
20291 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
20292 /* For a BLX instruction, make sure that the relocation is rounded up
20293 to a word boundary. This follows the semantics of the instruction
20294 which specifies that bit 1 of the target address will come from bit
20295 1 of the base address. */
20296 value = (value + 1) & ~ 1;
20297
20298
20299 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
20300 {
20301 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
20302 {
20303 as_bad_where (fixP->fx_file, fixP->fx_line,
20304 _("branch out of range"));
20305 }
20306 else if ((value & ~0x1ffffff)
20307 && ((value & ~0x1ffffff) != ~0x1ffffff))
20308 {
20309 as_bad_where (fixP->fx_file, fixP->fx_line,
20310 _("Thumb2 branch out of range"));
20311 }
20312 }
20313
20314 if (fixP->fx_done || !seg->use_rela_p)
20315 encode_thumb2_b_bl_offset (buf, value);
20316
20317 break;
20318
20319 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20320 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
20321 as_bad_where (fixP->fx_file, fixP->fx_line,
20322 _("branch out of range"));
20323
20324 if (fixP->fx_done || !seg->use_rela_p)
20325 encode_thumb2_b_bl_offset (buf, value);
20326
20327 break;
20328
20329 case BFD_RELOC_8:
20330 if (fixP->fx_done || !seg->use_rela_p)
20331 md_number_to_chars (buf, value, 1);
20332 break;
20333
20334 case BFD_RELOC_16:
20335 if (fixP->fx_done || !seg->use_rela_p)
20336 md_number_to_chars (buf, value, 2);
20337 break;
20338
20339 #ifdef OBJ_ELF
20340 case BFD_RELOC_ARM_TLS_GD32:
20341 case BFD_RELOC_ARM_TLS_LE32:
20342 case BFD_RELOC_ARM_TLS_IE32:
20343 case BFD_RELOC_ARM_TLS_LDM32:
20344 case BFD_RELOC_ARM_TLS_LDO32:
20345 S_SET_THREAD_LOCAL (fixP->fx_addsy);
20346 /* fall through */
20347
20348 case BFD_RELOC_ARM_GOT32:
20349 case BFD_RELOC_ARM_GOTOFF:
20350 if (fixP->fx_done || !seg->use_rela_p)
20351 md_number_to_chars (buf, 0, 4);
20352 break;
20353
20354 case BFD_RELOC_ARM_TARGET2:
20355 /* TARGET2 is not partial-inplace, so we need to write the
20356 addend here for REL targets, because it won't be written out
20357 during reloc processing later. */
20358 if (fixP->fx_done || !seg->use_rela_p)
20359 md_number_to_chars (buf, fixP->fx_offset, 4);
20360 break;
20361 #endif
20362
20363 case BFD_RELOC_RVA:
20364 case BFD_RELOC_32:
20365 case BFD_RELOC_ARM_TARGET1:
20366 case BFD_RELOC_ARM_ROSEGREL32:
20367 case BFD_RELOC_ARM_SBREL32:
20368 case BFD_RELOC_32_PCREL:
20369 #ifdef TE_PE
20370 case BFD_RELOC_32_SECREL:
20371 #endif
20372 if (fixP->fx_done || !seg->use_rela_p)
20373 #ifdef TE_WINCE
20374 /* For WinCE we only do this for pcrel fixups. */
20375 if (fixP->fx_done || fixP->fx_pcrel)
20376 #endif
20377 md_number_to_chars (buf, value, 4);
20378 break;
20379
20380 #ifdef OBJ_ELF
20381 case BFD_RELOC_ARM_PREL31:
20382 if (fixP->fx_done || !seg->use_rela_p)
20383 {
20384 newval = md_chars_to_number (buf, 4) & 0x80000000;
20385 if ((value ^ (value >> 1)) & 0x40000000)
20386 {
20387 as_bad_where (fixP->fx_file, fixP->fx_line,
20388 _("rel31 relocation overflow"));
20389 }
20390 newval |= value & 0x7fffffff;
20391 md_number_to_chars (buf, newval, 4);
20392 }
20393 break;
20394 #endif
20395
20396 case BFD_RELOC_ARM_CP_OFF_IMM:
20397 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20398 if (value < -1023 || value > 1023 || (value & 3))
20399 as_bad_where (fixP->fx_file, fixP->fx_line,
20400 _("co-processor offset out of range"));
20401 cp_off_common:
20402 sign = value >= 0;
20403 if (value < 0)
20404 value = -value;
20405 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20406 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20407 newval = md_chars_to_number (buf, INSN_SIZE);
20408 else
20409 newval = get_thumb32_insn (buf);
20410 newval &= 0xff7fff00;
20411 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
20412 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
20413 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
20414 md_number_to_chars (buf, newval, INSN_SIZE);
20415 else
20416 put_thumb32_insn (buf, newval);
20417 break;
20418
20419 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
20420 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
20421 if (value < -255 || value > 255)
20422 as_bad_where (fixP->fx_file, fixP->fx_line,
20423 _("co-processor offset out of range"));
20424 value *= 4;
20425 goto cp_off_common;
20426
20427 case BFD_RELOC_ARM_THUMB_OFFSET:
20428 newval = md_chars_to_number (buf, THUMB_SIZE);
20429 /* Exactly what ranges, and where the offset is inserted depends
20430 on the type of instruction, we can establish this from the
20431 top 4 bits. */
20432 switch (newval >> 12)
20433 {
20434 case 4: /* PC load. */
20435 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20436 forced to zero for these loads; md_pcrel_from has already
20437 compensated for this. */
20438 if (value & 3)
20439 as_bad_where (fixP->fx_file, fixP->fx_line,
20440 _("invalid offset, target not word aligned (0x%08lX)"),
20441 (((unsigned long) fixP->fx_frag->fr_address
20442 + (unsigned long) fixP->fx_where) & ~3)
20443 + (unsigned long) value);
20444
20445 if (value & ~0x3fc)
20446 as_bad_where (fixP->fx_file, fixP->fx_line,
20447 _("invalid offset, value too big (0x%08lX)"),
20448 (long) value);
20449
20450 newval |= value >> 2;
20451 break;
20452
20453 case 9: /* SP load/store. */
20454 if (value & ~0x3fc)
20455 as_bad_where (fixP->fx_file, fixP->fx_line,
20456 _("invalid offset, value too big (0x%08lX)"),
20457 (long) value);
20458 newval |= value >> 2;
20459 break;
20460
20461 case 6: /* Word load/store. */
20462 if (value & ~0x7c)
20463 as_bad_where (fixP->fx_file, fixP->fx_line,
20464 _("invalid offset, value too big (0x%08lX)"),
20465 (long) value);
20466 newval |= value << 4; /* 6 - 2. */
20467 break;
20468
20469 case 7: /* Byte load/store. */
20470 if (value & ~0x1f)
20471 as_bad_where (fixP->fx_file, fixP->fx_line,
20472 _("invalid offset, value too big (0x%08lX)"),
20473 (long) value);
20474 newval |= value << 6;
20475 break;
20476
20477 case 8: /* Halfword load/store. */
20478 if (value & ~0x3e)
20479 as_bad_where (fixP->fx_file, fixP->fx_line,
20480 _("invalid offset, value too big (0x%08lX)"),
20481 (long) value);
20482 newval |= value << 5; /* 6 - 1. */
20483 break;
20484
20485 default:
20486 as_bad_where (fixP->fx_file, fixP->fx_line,
20487 "Unable to process relocation for thumb opcode: %lx",
20488 (unsigned long) newval);
20489 break;
20490 }
20491 md_number_to_chars (buf, newval, THUMB_SIZE);
20492 break;
20493
20494 case BFD_RELOC_ARM_THUMB_ADD:
20495 /* This is a complicated relocation, since we use it for all of
20496 the following immediate relocations:
20497
20498 3bit ADD/SUB
20499 8bit ADD/SUB
20500 9bit ADD/SUB SP word-aligned
20501 10bit ADD PC/SP word-aligned
20502
20503 The type of instruction being processed is encoded in the
20504 instruction field:
20505
20506 0x8000 SUB
20507 0x00F0 Rd
20508 0x000F Rs
20509 */
20510 newval = md_chars_to_number (buf, THUMB_SIZE);
20511 {
20512 int rd = (newval >> 4) & 0xf;
20513 int rs = newval & 0xf;
20514 int subtract = !!(newval & 0x8000);
20515
20516 /* Check for HI regs, only very restricted cases allowed:
20517 Adjusting SP, and using PC or SP to get an address. */
20518 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
20519 || (rs > 7 && rs != REG_SP && rs != REG_PC))
20520 as_bad_where (fixP->fx_file, fixP->fx_line,
20521 _("invalid Hi register with immediate"));
20522
20523 /* If value is negative, choose the opposite instruction. */
20524 if (value < 0)
20525 {
20526 value = -value;
20527 subtract = !subtract;
20528 if (value < 0)
20529 as_bad_where (fixP->fx_file, fixP->fx_line,
20530 _("immediate value out of range"));
20531 }
20532
20533 if (rd == REG_SP)
20534 {
20535 if (value & ~0x1fc)
20536 as_bad_where (fixP->fx_file, fixP->fx_line,
20537 _("invalid immediate for stack address calculation"));
20538 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
20539 newval |= value >> 2;
20540 }
20541 else if (rs == REG_PC || rs == REG_SP)
20542 {
20543 if (subtract || value & ~0x3fc)
20544 as_bad_where (fixP->fx_file, fixP->fx_line,
20545 _("invalid immediate for address calculation (value = 0x%08lX)"),
20546 (unsigned long) value);
20547 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
20548 newval |= rd << 8;
20549 newval |= value >> 2;
20550 }
20551 else if (rs == rd)
20552 {
20553 if (value & ~0xff)
20554 as_bad_where (fixP->fx_file, fixP->fx_line,
20555 _("immediate value out of range"));
20556 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
20557 newval |= (rd << 8) | value;
20558 }
20559 else
20560 {
20561 if (value & ~0x7)
20562 as_bad_where (fixP->fx_file, fixP->fx_line,
20563 _("immediate value out of range"));
20564 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
20565 newval |= rd | (rs << 3) | (value << 6);
20566 }
20567 }
20568 md_number_to_chars (buf, newval, THUMB_SIZE);
20569 break;
20570
20571 case BFD_RELOC_ARM_THUMB_IMM:
20572 newval = md_chars_to_number (buf, THUMB_SIZE);
20573 if (value < 0 || value > 255)
20574 as_bad_where (fixP->fx_file, fixP->fx_line,
20575 _("invalid immediate: %ld is out of range"),
20576 (long) value);
20577 newval |= value;
20578 md_number_to_chars (buf, newval, THUMB_SIZE);
20579 break;
20580
20581 case BFD_RELOC_ARM_THUMB_SHIFT:
20582 /* 5bit shift value (0..32). LSL cannot take 32. */
20583 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
20584 temp = newval & 0xf800;
20585 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
20586 as_bad_where (fixP->fx_file, fixP->fx_line,
20587 _("invalid shift value: %ld"), (long) value);
20588 /* Shifts of zero must be encoded as LSL. */
20589 if (value == 0)
20590 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
20591 /* Shifts of 32 are encoded as zero. */
20592 else if (value == 32)
20593 value = 0;
20594 newval |= value << 6;
20595 md_number_to_chars (buf, newval, THUMB_SIZE);
20596 break;
20597
20598 case BFD_RELOC_VTABLE_INHERIT:
20599 case BFD_RELOC_VTABLE_ENTRY:
20600 fixP->fx_done = 0;
20601 return;
20602
20603 case BFD_RELOC_ARM_MOVW:
20604 case BFD_RELOC_ARM_MOVT:
20605 case BFD_RELOC_ARM_THUMB_MOVW:
20606 case BFD_RELOC_ARM_THUMB_MOVT:
20607 if (fixP->fx_done || !seg->use_rela_p)
20608 {
20609 /* REL format relocations are limited to a 16-bit addend. */
20610 if (!fixP->fx_done)
20611 {
20612 if (value < -0x8000 || value > 0x7fff)
20613 as_bad_where (fixP->fx_file, fixP->fx_line,
20614 _("offset out of range"));
20615 }
20616 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
20617 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20618 {
20619 value >>= 16;
20620 }
20621
20622 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
20623 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
20624 {
20625 newval = get_thumb32_insn (buf);
20626 newval &= 0xfbf08f00;
20627 newval |= (value & 0xf000) << 4;
20628 newval |= (value & 0x0800) << 15;
20629 newval |= (value & 0x0700) << 4;
20630 newval |= (value & 0x00ff);
20631 put_thumb32_insn (buf, newval);
20632 }
20633 else
20634 {
20635 newval = md_chars_to_number (buf, 4);
20636 newval &= 0xfff0f000;
20637 newval |= value & 0x0fff;
20638 newval |= (value & 0xf000) << 4;
20639 md_number_to_chars (buf, newval, 4);
20640 }
20641 }
20642 return;
20643
20644 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20645 case BFD_RELOC_ARM_ALU_PC_G0:
20646 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20647 case BFD_RELOC_ARM_ALU_PC_G1:
20648 case BFD_RELOC_ARM_ALU_PC_G2:
20649 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20650 case BFD_RELOC_ARM_ALU_SB_G0:
20651 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20652 case BFD_RELOC_ARM_ALU_SB_G1:
20653 case BFD_RELOC_ARM_ALU_SB_G2:
20654 gas_assert (!fixP->fx_done);
20655 if (!seg->use_rela_p)
20656 {
20657 bfd_vma insn;
20658 bfd_vma encoded_addend;
20659 bfd_vma addend_abs = abs (value);
20660
20661 /* Check that the absolute value of the addend can be
20662 expressed as an 8-bit constant plus a rotation. */
20663 encoded_addend = encode_arm_immediate (addend_abs);
20664 if (encoded_addend == (unsigned int) FAIL)
20665 as_bad_where (fixP->fx_file, fixP->fx_line,
20666 _("the offset 0x%08lX is not representable"),
20667 (unsigned long) addend_abs);
20668
20669 /* Extract the instruction. */
20670 insn = md_chars_to_number (buf, INSN_SIZE);
20671
20672 /* If the addend is positive, use an ADD instruction.
20673 Otherwise use a SUB. Take care not to destroy the S bit. */
20674 insn &= 0xff1fffff;
20675 if (value < 0)
20676 insn |= 1 << 22;
20677 else
20678 insn |= 1 << 23;
20679
20680 /* Place the encoded addend into the first 12 bits of the
20681 instruction. */
20682 insn &= 0xfffff000;
20683 insn |= encoded_addend;
20684
20685 /* Update the instruction. */
20686 md_number_to_chars (buf, insn, INSN_SIZE);
20687 }
20688 break;
20689
20690 case BFD_RELOC_ARM_LDR_PC_G0:
20691 case BFD_RELOC_ARM_LDR_PC_G1:
20692 case BFD_RELOC_ARM_LDR_PC_G2:
20693 case BFD_RELOC_ARM_LDR_SB_G0:
20694 case BFD_RELOC_ARM_LDR_SB_G1:
20695 case BFD_RELOC_ARM_LDR_SB_G2:
20696 gas_assert (!fixP->fx_done);
20697 if (!seg->use_rela_p)
20698 {
20699 bfd_vma insn;
20700 bfd_vma addend_abs = abs (value);
20701
20702 /* Check that the absolute value of the addend can be
20703 encoded in 12 bits. */
20704 if (addend_abs >= 0x1000)
20705 as_bad_where (fixP->fx_file, fixP->fx_line,
20706 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20707 (unsigned long) addend_abs);
20708
20709 /* Extract the instruction. */
20710 insn = md_chars_to_number (buf, INSN_SIZE);
20711
20712 /* If the addend is negative, clear bit 23 of the instruction.
20713 Otherwise set it. */
20714 if (value < 0)
20715 insn &= ~(1 << 23);
20716 else
20717 insn |= 1 << 23;
20718
20719 /* Place the absolute value of the addend into the first 12 bits
20720 of the instruction. */
20721 insn &= 0xfffff000;
20722 insn |= addend_abs;
20723
20724 /* Update the instruction. */
20725 md_number_to_chars (buf, insn, INSN_SIZE);
20726 }
20727 break;
20728
20729 case BFD_RELOC_ARM_LDRS_PC_G0:
20730 case BFD_RELOC_ARM_LDRS_PC_G1:
20731 case BFD_RELOC_ARM_LDRS_PC_G2:
20732 case BFD_RELOC_ARM_LDRS_SB_G0:
20733 case BFD_RELOC_ARM_LDRS_SB_G1:
20734 case BFD_RELOC_ARM_LDRS_SB_G2:
20735 gas_assert (!fixP->fx_done);
20736 if (!seg->use_rela_p)
20737 {
20738 bfd_vma insn;
20739 bfd_vma addend_abs = abs (value);
20740
20741 /* Check that the absolute value of the addend can be
20742 encoded in 8 bits. */
20743 if (addend_abs >= 0x100)
20744 as_bad_where (fixP->fx_file, fixP->fx_line,
20745 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20746 (unsigned long) addend_abs);
20747
20748 /* Extract the instruction. */
20749 insn = md_chars_to_number (buf, INSN_SIZE);
20750
20751 /* If the addend is negative, clear bit 23 of the instruction.
20752 Otherwise set it. */
20753 if (value < 0)
20754 insn &= ~(1 << 23);
20755 else
20756 insn |= 1 << 23;
20757
20758 /* Place the first four bits of the absolute value of the addend
20759 into the first 4 bits of the instruction, and the remaining
20760 four into bits 8 .. 11. */
20761 insn &= 0xfffff0f0;
20762 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
20763
20764 /* Update the instruction. */
20765 md_number_to_chars (buf, insn, INSN_SIZE);
20766 }
20767 break;
20768
20769 case BFD_RELOC_ARM_LDC_PC_G0:
20770 case BFD_RELOC_ARM_LDC_PC_G1:
20771 case BFD_RELOC_ARM_LDC_PC_G2:
20772 case BFD_RELOC_ARM_LDC_SB_G0:
20773 case BFD_RELOC_ARM_LDC_SB_G1:
20774 case BFD_RELOC_ARM_LDC_SB_G2:
20775 gas_assert (!fixP->fx_done);
20776 if (!seg->use_rela_p)
20777 {
20778 bfd_vma insn;
20779 bfd_vma addend_abs = abs (value);
20780
20781 /* Check that the absolute value of the addend is a multiple of
20782 four and, when divided by four, fits in 8 bits. */
20783 if (addend_abs & 0x3)
20784 as_bad_where (fixP->fx_file, fixP->fx_line,
20785 _("bad offset 0x%08lX (must be word-aligned)"),
20786 (unsigned long) addend_abs);
20787
20788 if ((addend_abs >> 2) > 0xff)
20789 as_bad_where (fixP->fx_file, fixP->fx_line,
20790 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20791 (unsigned long) addend_abs);
20792
20793 /* Extract the instruction. */
20794 insn = md_chars_to_number (buf, INSN_SIZE);
20795
20796 /* If the addend is negative, clear bit 23 of the instruction.
20797 Otherwise set it. */
20798 if (value < 0)
20799 insn &= ~(1 << 23);
20800 else
20801 insn |= 1 << 23;
20802
20803 /* Place the addend (divided by four) into the first eight
20804 bits of the instruction. */
20805 insn &= 0xfffffff0;
20806 insn |= addend_abs >> 2;
20807
20808 /* Update the instruction. */
20809 md_number_to_chars (buf, insn, INSN_SIZE);
20810 }
20811 break;
20812
20813 case BFD_RELOC_ARM_V4BX:
20814 /* This will need to go in the object file. */
20815 fixP->fx_done = 0;
20816 break;
20817
20818 case BFD_RELOC_UNUSED:
20819 default:
20820 as_bad_where (fixP->fx_file, fixP->fx_line,
20821 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
20822 }
20823 }
20824
20825 /* Translate internal representation of relocation info to BFD target
20826 format. */
20827
20828 arelent *
20829 tc_gen_reloc (asection *section, fixS *fixp)
20830 {
20831 arelent * reloc;
20832 bfd_reloc_code_real_type code;
20833
20834 reloc = (arelent *) xmalloc (sizeof (arelent));
20835
20836 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
20837 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
20838 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
20839
20840 if (fixp->fx_pcrel)
20841 {
20842 if (section->use_rela_p)
20843 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
20844 else
20845 fixp->fx_offset = reloc->address;
20846 }
20847 reloc->addend = fixp->fx_offset;
20848
20849 switch (fixp->fx_r_type)
20850 {
20851 case BFD_RELOC_8:
20852 if (fixp->fx_pcrel)
20853 {
20854 code = BFD_RELOC_8_PCREL;
20855 break;
20856 }
20857
20858 case BFD_RELOC_16:
20859 if (fixp->fx_pcrel)
20860 {
20861 code = BFD_RELOC_16_PCREL;
20862 break;
20863 }
20864
20865 case BFD_RELOC_32:
20866 if (fixp->fx_pcrel)
20867 {
20868 code = BFD_RELOC_32_PCREL;
20869 break;
20870 }
20871
20872 case BFD_RELOC_ARM_MOVW:
20873 if (fixp->fx_pcrel)
20874 {
20875 code = BFD_RELOC_ARM_MOVW_PCREL;
20876 break;
20877 }
20878
20879 case BFD_RELOC_ARM_MOVT:
20880 if (fixp->fx_pcrel)
20881 {
20882 code = BFD_RELOC_ARM_MOVT_PCREL;
20883 break;
20884 }
20885
20886 case BFD_RELOC_ARM_THUMB_MOVW:
20887 if (fixp->fx_pcrel)
20888 {
20889 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
20890 break;
20891 }
20892
20893 case BFD_RELOC_ARM_THUMB_MOVT:
20894 if (fixp->fx_pcrel)
20895 {
20896 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
20897 break;
20898 }
20899
20900 case BFD_RELOC_NONE:
20901 case BFD_RELOC_ARM_PCREL_BRANCH:
20902 case BFD_RELOC_ARM_PCREL_BLX:
20903 case BFD_RELOC_RVA:
20904 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20905 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20906 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20907 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20908 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20909 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20910 case BFD_RELOC_VTABLE_ENTRY:
20911 case BFD_RELOC_VTABLE_INHERIT:
20912 #ifdef TE_PE
20913 case BFD_RELOC_32_SECREL:
20914 #endif
20915 code = fixp->fx_r_type;
20916 break;
20917
20918 case BFD_RELOC_THUMB_PCREL_BLX:
20919 #ifdef OBJ_ELF
20920 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
20921 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
20922 else
20923 #endif
20924 code = BFD_RELOC_THUMB_PCREL_BLX;
20925 break;
20926
20927 case BFD_RELOC_ARM_LITERAL:
20928 case BFD_RELOC_ARM_HWLITERAL:
20929 /* If this is called then the a literal has
20930 been referenced across a section boundary. */
20931 as_bad_where (fixp->fx_file, fixp->fx_line,
20932 _("literal referenced across section boundary"));
20933 return NULL;
20934
20935 #ifdef OBJ_ELF
20936 case BFD_RELOC_ARM_GOT32:
20937 case BFD_RELOC_ARM_GOTOFF:
20938 case BFD_RELOC_ARM_PLT32:
20939 case BFD_RELOC_ARM_TARGET1:
20940 case BFD_RELOC_ARM_ROSEGREL32:
20941 case BFD_RELOC_ARM_SBREL32:
20942 case BFD_RELOC_ARM_PREL31:
20943 case BFD_RELOC_ARM_TARGET2:
20944 case BFD_RELOC_ARM_TLS_LE32:
20945 case BFD_RELOC_ARM_TLS_LDO32:
20946 case BFD_RELOC_ARM_PCREL_CALL:
20947 case BFD_RELOC_ARM_PCREL_JUMP:
20948 case BFD_RELOC_ARM_ALU_PC_G0_NC:
20949 case BFD_RELOC_ARM_ALU_PC_G0:
20950 case BFD_RELOC_ARM_ALU_PC_G1_NC:
20951 case BFD_RELOC_ARM_ALU_PC_G1:
20952 case BFD_RELOC_ARM_ALU_PC_G2:
20953 case BFD_RELOC_ARM_LDR_PC_G0:
20954 case BFD_RELOC_ARM_LDR_PC_G1:
20955 case BFD_RELOC_ARM_LDR_PC_G2:
20956 case BFD_RELOC_ARM_LDRS_PC_G0:
20957 case BFD_RELOC_ARM_LDRS_PC_G1:
20958 case BFD_RELOC_ARM_LDRS_PC_G2:
20959 case BFD_RELOC_ARM_LDC_PC_G0:
20960 case BFD_RELOC_ARM_LDC_PC_G1:
20961 case BFD_RELOC_ARM_LDC_PC_G2:
20962 case BFD_RELOC_ARM_ALU_SB_G0_NC:
20963 case BFD_RELOC_ARM_ALU_SB_G0:
20964 case BFD_RELOC_ARM_ALU_SB_G1_NC:
20965 case BFD_RELOC_ARM_ALU_SB_G1:
20966 case BFD_RELOC_ARM_ALU_SB_G2:
20967 case BFD_RELOC_ARM_LDR_SB_G0:
20968 case BFD_RELOC_ARM_LDR_SB_G1:
20969 case BFD_RELOC_ARM_LDR_SB_G2:
20970 case BFD_RELOC_ARM_LDRS_SB_G0:
20971 case BFD_RELOC_ARM_LDRS_SB_G1:
20972 case BFD_RELOC_ARM_LDRS_SB_G2:
20973 case BFD_RELOC_ARM_LDC_SB_G0:
20974 case BFD_RELOC_ARM_LDC_SB_G1:
20975 case BFD_RELOC_ARM_LDC_SB_G2:
20976 case BFD_RELOC_ARM_V4BX:
20977 code = fixp->fx_r_type;
20978 break;
20979
20980 case BFD_RELOC_ARM_TLS_GD32:
20981 case BFD_RELOC_ARM_TLS_IE32:
20982 case BFD_RELOC_ARM_TLS_LDM32:
20983 /* BFD will include the symbol's address in the addend.
20984 But we don't want that, so subtract it out again here. */
20985 if (!S_IS_COMMON (fixp->fx_addsy))
20986 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
20987 code = fixp->fx_r_type;
20988 break;
20989 #endif
20990
20991 case BFD_RELOC_ARM_IMMEDIATE:
20992 as_bad_where (fixp->fx_file, fixp->fx_line,
20993 _("internal relocation (type: IMMEDIATE) not fixed up"));
20994 return NULL;
20995
20996 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20997 as_bad_where (fixp->fx_file, fixp->fx_line,
20998 _("ADRL used for a symbol not defined in the same file"));
20999 return NULL;
21000
21001 case BFD_RELOC_ARM_OFFSET_IMM:
21002 if (section->use_rela_p)
21003 {
21004 code = fixp->fx_r_type;
21005 break;
21006 }
21007
21008 if (fixp->fx_addsy != NULL
21009 && !S_IS_DEFINED (fixp->fx_addsy)
21010 && S_IS_LOCAL (fixp->fx_addsy))
21011 {
21012 as_bad_where (fixp->fx_file, fixp->fx_line,
21013 _("undefined local label `%s'"),
21014 S_GET_NAME (fixp->fx_addsy));
21015 return NULL;
21016 }
21017
21018 as_bad_where (fixp->fx_file, fixp->fx_line,
21019 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21020 return NULL;
21021
21022 default:
21023 {
21024 char * type;
21025
21026 switch (fixp->fx_r_type)
21027 {
21028 case BFD_RELOC_NONE: type = "NONE"; break;
21029 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
21030 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
21031 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
21032 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
21033 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
21034 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
21035 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
21036 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
21037 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
21038 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
21039 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
21040 default: type = _("<unknown>"); break;
21041 }
21042 as_bad_where (fixp->fx_file, fixp->fx_line,
21043 _("cannot represent %s relocation in this object file format"),
21044 type);
21045 return NULL;
21046 }
21047 }
21048
21049 #ifdef OBJ_ELF
21050 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
21051 && GOT_symbol
21052 && fixp->fx_addsy == GOT_symbol)
21053 {
21054 code = BFD_RELOC_ARM_GOTPC;
21055 reloc->addend = fixp->fx_offset = reloc->address;
21056 }
21057 #endif
21058
21059 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
21060
21061 if (reloc->howto == NULL)
21062 {
21063 as_bad_where (fixp->fx_file, fixp->fx_line,
21064 _("cannot represent %s relocation in this object file format"),
21065 bfd_get_reloc_code_name (code));
21066 return NULL;
21067 }
21068
21069 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21070 vtable entry to be used in the relocation's section offset. */
21071 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21072 reloc->address = fixp->fx_offset;
21073
21074 return reloc;
21075 }
21076
21077 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21078
21079 void
21080 cons_fix_new_arm (fragS * frag,
21081 int where,
21082 int size,
21083 expressionS * exp)
21084 {
21085 bfd_reloc_code_real_type type;
21086 int pcrel = 0;
21087
21088 /* Pick a reloc.
21089 FIXME: @@ Should look at CPU word size. */
21090 switch (size)
21091 {
21092 case 1:
21093 type = BFD_RELOC_8;
21094 break;
21095 case 2:
21096 type = BFD_RELOC_16;
21097 break;
21098 case 4:
21099 default:
21100 type = BFD_RELOC_32;
21101 break;
21102 case 8:
21103 type = BFD_RELOC_64;
21104 break;
21105 }
21106
21107 #ifdef TE_PE
21108 if (exp->X_op == O_secrel)
21109 {
21110 exp->X_op = O_symbol;
21111 type = BFD_RELOC_32_SECREL;
21112 }
21113 #endif
21114
21115 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
21116 }
21117
21118 #if defined (OBJ_COFF)
21119 void
21120 arm_validate_fix (fixS * fixP)
21121 {
21122 /* If the destination of the branch is a defined symbol which does not have
21123 the THUMB_FUNC attribute, then we must be calling a function which has
21124 the (interfacearm) attribute. We look for the Thumb entry point to that
21125 function and change the branch to refer to that function instead. */
21126 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
21127 && fixP->fx_addsy != NULL
21128 && S_IS_DEFINED (fixP->fx_addsy)
21129 && ! THUMB_IS_FUNC (fixP->fx_addsy))
21130 {
21131 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
21132 }
21133 }
21134 #endif
21135
21136
21137 int
21138 arm_force_relocation (struct fix * fixp)
21139 {
21140 #if defined (OBJ_COFF) && defined (TE_PE)
21141 if (fixp->fx_r_type == BFD_RELOC_RVA)
21142 return 1;
21143 #endif
21144
21145 /* In case we have a call or a branch to a function in ARM ISA mode from
21146 a thumb function or vice-versa force the relocation. These relocations
21147 are cleared off for some cores that might have blx and simple transformations
21148 are possible. */
21149
21150 #ifdef OBJ_ELF
21151 switch (fixp->fx_r_type)
21152 {
21153 case BFD_RELOC_ARM_PCREL_JUMP:
21154 case BFD_RELOC_ARM_PCREL_CALL:
21155 case BFD_RELOC_THUMB_PCREL_BLX:
21156 if (THUMB_IS_FUNC (fixp->fx_addsy))
21157 return 1;
21158 break;
21159
21160 case BFD_RELOC_ARM_PCREL_BLX:
21161 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21162 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21163 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21164 if (ARM_IS_FUNC (fixp->fx_addsy))
21165 return 1;
21166 break;
21167
21168 default:
21169 break;
21170 }
21171 #endif
21172
21173 /* Resolve these relocations even if the symbol is extern or weak. */
21174 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
21175 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
21176 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
21177 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
21178 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21179 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
21180 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
21181 return 0;
21182
21183 /* Always leave these relocations for the linker. */
21184 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21185 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21186 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21187 return 1;
21188
21189 /* Always generate relocations against function symbols. */
21190 if (fixp->fx_r_type == BFD_RELOC_32
21191 && fixp->fx_addsy
21192 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
21193 return 1;
21194
21195 return generic_force_reloc (fixp);
21196 }
21197
21198 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21199 /* Relocations against function names must be left unadjusted,
21200 so that the linker can use this information to generate interworking
21201 stubs. The MIPS version of this function
21202 also prevents relocations that are mips-16 specific, but I do not
21203 know why it does this.
21204
21205 FIXME:
21206 There is one other problem that ought to be addressed here, but
21207 which currently is not: Taking the address of a label (rather
21208 than a function) and then later jumping to that address. Such
21209 addresses also ought to have their bottom bit set (assuming that
21210 they reside in Thumb code), but at the moment they will not. */
21211
21212 bfd_boolean
21213 arm_fix_adjustable (fixS * fixP)
21214 {
21215 if (fixP->fx_addsy == NULL)
21216 return 1;
21217
21218 /* Preserve relocations against symbols with function type. */
21219 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
21220 return FALSE;
21221
21222 if (THUMB_IS_FUNC (fixP->fx_addsy)
21223 && fixP->fx_subsy == NULL)
21224 return FALSE;
21225
21226 /* We need the symbol name for the VTABLE entries. */
21227 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
21228 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
21229 return FALSE;
21230
21231 /* Don't allow symbols to be discarded on GOT related relocs. */
21232 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
21233 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
21234 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
21235 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
21236 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
21237 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
21238 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
21239 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
21240 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
21241 return FALSE;
21242
21243 /* Similarly for group relocations. */
21244 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
21245 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
21246 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
21247 return FALSE;
21248
21249 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21250 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
21251 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21252 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
21253 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
21254 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21255 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
21256 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
21257 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
21258 return FALSE;
21259
21260 return TRUE;
21261 }
21262 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21263
21264 #ifdef OBJ_ELF
21265
21266 const char *
21267 elf32_arm_target_format (void)
21268 {
21269 #ifdef TE_SYMBIAN
21270 return (target_big_endian
21271 ? "elf32-bigarm-symbian"
21272 : "elf32-littlearm-symbian");
21273 #elif defined (TE_VXWORKS)
21274 return (target_big_endian
21275 ? "elf32-bigarm-vxworks"
21276 : "elf32-littlearm-vxworks");
21277 #else
21278 if (target_big_endian)
21279 return "elf32-bigarm";
21280 else
21281 return "elf32-littlearm";
21282 #endif
21283 }
21284
21285 void
21286 armelf_frob_symbol (symbolS * symp,
21287 int * puntp)
21288 {
21289 elf_frob_symbol (symp, puntp);
21290 }
21291 #endif
21292
21293 /* MD interface: Finalization. */
21294
21295 void
21296 arm_cleanup (void)
21297 {
21298 literal_pool * pool;
21299
21300 /* Ensure that all the IT blocks are properly closed. */
21301 check_it_blocks_finished ();
21302
21303 for (pool = list_of_pools; pool; pool = pool->next)
21304 {
21305 /* Put it at the end of the relevant section. */
21306 subseg_set (pool->section, pool->sub_section);
21307 #ifdef OBJ_ELF
21308 arm_elf_change_section ();
21309 #endif
21310 s_ltorg (0);
21311 }
21312 }
21313
21314 #ifdef OBJ_ELF
21315 /* Remove any excess mapping symbols generated for alignment frags in
21316 SEC. We may have created a mapping symbol before a zero byte
21317 alignment; remove it if there's a mapping symbol after the
21318 alignment. */
21319 static void
21320 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
21321 void *dummy ATTRIBUTE_UNUSED)
21322 {
21323 segment_info_type *seginfo = seg_info (sec);
21324 fragS *fragp;
21325
21326 if (seginfo == NULL || seginfo->frchainP == NULL)
21327 return;
21328
21329 for (fragp = seginfo->frchainP->frch_root;
21330 fragp != NULL;
21331 fragp = fragp->fr_next)
21332 {
21333 symbolS *sym = fragp->tc_frag_data.last_map;
21334 fragS *next = fragp->fr_next;
21335
21336 /* Variable-sized frags have been converted to fixed size by
21337 this point. But if this was variable-sized to start with,
21338 there will be a fixed-size frag after it. So don't handle
21339 next == NULL. */
21340 if (sym == NULL || next == NULL)
21341 continue;
21342
21343 if (S_GET_VALUE (sym) < next->fr_address)
21344 /* Not at the end of this frag. */
21345 continue;
21346 know (S_GET_VALUE (sym) == next->fr_address);
21347
21348 do
21349 {
21350 if (next->tc_frag_data.first_map != NULL)
21351 {
21352 /* Next frag starts with a mapping symbol. Discard this
21353 one. */
21354 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21355 break;
21356 }
21357
21358 if (next->fr_next == NULL)
21359 {
21360 /* This mapping symbol is at the end of the section. Discard
21361 it. */
21362 know (next->fr_fix == 0 && next->fr_var == 0);
21363 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
21364 break;
21365 }
21366
21367 /* As long as we have empty frags without any mapping symbols,
21368 keep looking. */
21369 /* If the next frag is non-empty and does not start with a
21370 mapping symbol, then this mapping symbol is required. */
21371 if (next->fr_address != next->fr_next->fr_address)
21372 break;
21373
21374 next = next->fr_next;
21375 }
21376 while (next != NULL);
21377 }
21378 }
21379 #endif
21380
21381 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21382 ARM ones. */
21383
21384 void
21385 arm_adjust_symtab (void)
21386 {
21387 #ifdef OBJ_COFF
21388 symbolS * sym;
21389
21390 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21391 {
21392 if (ARM_IS_THUMB (sym))
21393 {
21394 if (THUMB_IS_FUNC (sym))
21395 {
21396 /* Mark the symbol as a Thumb function. */
21397 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
21398 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
21399 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
21400
21401 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
21402 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
21403 else
21404 as_bad (_("%s: unexpected function type: %d"),
21405 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
21406 }
21407 else switch (S_GET_STORAGE_CLASS (sym))
21408 {
21409 case C_EXT:
21410 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
21411 break;
21412 case C_STAT:
21413 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
21414 break;
21415 case C_LABEL:
21416 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
21417 break;
21418 default:
21419 /* Do nothing. */
21420 break;
21421 }
21422 }
21423
21424 if (ARM_IS_INTERWORK (sym))
21425 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
21426 }
21427 #endif
21428 #ifdef OBJ_ELF
21429 symbolS * sym;
21430 char bind;
21431
21432 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
21433 {
21434 if (ARM_IS_THUMB (sym))
21435 {
21436 elf_symbol_type * elf_sym;
21437
21438 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
21439 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
21440
21441 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
21442 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
21443 {
21444 /* If it's a .thumb_func, declare it as so,
21445 otherwise tag label as .code 16. */
21446 if (THUMB_IS_FUNC (sym))
21447 elf_sym->internal_elf_sym.st_info =
21448 ELF_ST_INFO (bind, STT_ARM_TFUNC);
21449 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
21450 elf_sym->internal_elf_sym.st_info =
21451 ELF_ST_INFO (bind, STT_ARM_16BIT);
21452 }
21453 }
21454 }
21455
21456 /* Remove any overlapping mapping symbols generated by alignment frags. */
21457 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
21458 #endif
21459 }
21460
21461 /* MD interface: Initialization. */
21462
21463 static void
21464 set_constant_flonums (void)
21465 {
21466 int i;
21467
21468 for (i = 0; i < NUM_FLOAT_VALS; i++)
21469 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
21470 abort ();
21471 }
21472
21473 /* Auto-select Thumb mode if it's the only available instruction set for the
21474 given architecture. */
21475
21476 static void
21477 autoselect_thumb_from_cpu_variant (void)
21478 {
21479 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
21480 opcode_select (16);
21481 }
21482
21483 void
21484 md_begin (void)
21485 {
21486 unsigned mach;
21487 unsigned int i;
21488
21489 if ( (arm_ops_hsh = hash_new ()) == NULL
21490 || (arm_cond_hsh = hash_new ()) == NULL
21491 || (arm_shift_hsh = hash_new ()) == NULL
21492 || (arm_psr_hsh = hash_new ()) == NULL
21493 || (arm_v7m_psr_hsh = hash_new ()) == NULL
21494 || (arm_reg_hsh = hash_new ()) == NULL
21495 || (arm_reloc_hsh = hash_new ()) == NULL
21496 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
21497 as_fatal (_("virtual memory exhausted"));
21498
21499 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
21500 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
21501 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
21502 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
21503 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
21504 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
21505 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
21506 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
21507 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
21508 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
21509 (void *) (v7m_psrs + i));
21510 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
21511 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
21512 for (i = 0;
21513 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
21514 i++)
21515 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
21516 (void *) (barrier_opt_names + i));
21517 #ifdef OBJ_ELF
21518 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
21519 hash_insert (arm_reloc_hsh, reloc_names[i].name, (void *) (reloc_names + i));
21520 #endif
21521
21522 set_constant_flonums ();
21523
21524 /* Set the cpu variant based on the command-line options. We prefer
21525 -mcpu= over -march= if both are set (as for GCC); and we prefer
21526 -mfpu= over any other way of setting the floating point unit.
21527 Use of legacy options with new options are faulted. */
21528 if (legacy_cpu)
21529 {
21530 if (mcpu_cpu_opt || march_cpu_opt)
21531 as_bad (_("use of old and new-style options to set CPU type"));
21532
21533 mcpu_cpu_opt = legacy_cpu;
21534 }
21535 else if (!mcpu_cpu_opt)
21536 mcpu_cpu_opt = march_cpu_opt;
21537
21538 if (legacy_fpu)
21539 {
21540 if (mfpu_opt)
21541 as_bad (_("use of old and new-style options to set FPU type"));
21542
21543 mfpu_opt = legacy_fpu;
21544 }
21545 else if (!mfpu_opt)
21546 {
21547 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21548 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21549 /* Some environments specify a default FPU. If they don't, infer it
21550 from the processor. */
21551 if (mcpu_fpu_opt)
21552 mfpu_opt = mcpu_fpu_opt;
21553 else
21554 mfpu_opt = march_fpu_opt;
21555 #else
21556 mfpu_opt = &fpu_default;
21557 #endif
21558 }
21559
21560 if (!mfpu_opt)
21561 {
21562 if (mcpu_cpu_opt != NULL)
21563 mfpu_opt = &fpu_default;
21564 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
21565 mfpu_opt = &fpu_arch_vfp_v2;
21566 else
21567 mfpu_opt = &fpu_arch_fpa;
21568 }
21569
21570 #ifdef CPU_DEFAULT
21571 if (!mcpu_cpu_opt)
21572 {
21573 mcpu_cpu_opt = &cpu_default;
21574 selected_cpu = cpu_default;
21575 }
21576 #else
21577 if (mcpu_cpu_opt)
21578 selected_cpu = *mcpu_cpu_opt;
21579 else
21580 mcpu_cpu_opt = &arm_arch_any;
21581 #endif
21582
21583 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
21584
21585 autoselect_thumb_from_cpu_variant ();
21586
21587 arm_arch_used = thumb_arch_used = arm_arch_none;
21588
21589 #if defined OBJ_COFF || defined OBJ_ELF
21590 {
21591 unsigned int flags = 0;
21592
21593 #if defined OBJ_ELF
21594 flags = meabi_flags;
21595
21596 switch (meabi_flags)
21597 {
21598 case EF_ARM_EABI_UNKNOWN:
21599 #endif
21600 /* Set the flags in the private structure. */
21601 if (uses_apcs_26) flags |= F_APCS26;
21602 if (support_interwork) flags |= F_INTERWORK;
21603 if (uses_apcs_float) flags |= F_APCS_FLOAT;
21604 if (pic_code) flags |= F_PIC;
21605 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
21606 flags |= F_SOFT_FLOAT;
21607
21608 switch (mfloat_abi_opt)
21609 {
21610 case ARM_FLOAT_ABI_SOFT:
21611 case ARM_FLOAT_ABI_SOFTFP:
21612 flags |= F_SOFT_FLOAT;
21613 break;
21614
21615 case ARM_FLOAT_ABI_HARD:
21616 if (flags & F_SOFT_FLOAT)
21617 as_bad (_("hard-float conflicts with specified fpu"));
21618 break;
21619 }
21620
21621 /* Using pure-endian doubles (even if soft-float). */
21622 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
21623 flags |= F_VFP_FLOAT;
21624
21625 #if defined OBJ_ELF
21626 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
21627 flags |= EF_ARM_MAVERICK_FLOAT;
21628 break;
21629
21630 case EF_ARM_EABI_VER4:
21631 case EF_ARM_EABI_VER5:
21632 /* No additional flags to set. */
21633 break;
21634
21635 default:
21636 abort ();
21637 }
21638 #endif
21639 bfd_set_private_flags (stdoutput, flags);
21640
21641 /* We have run out flags in the COFF header to encode the
21642 status of ATPCS support, so instead we create a dummy,
21643 empty, debug section called .arm.atpcs. */
21644 if (atpcs)
21645 {
21646 asection * sec;
21647
21648 sec = bfd_make_section (stdoutput, ".arm.atpcs");
21649
21650 if (sec != NULL)
21651 {
21652 bfd_set_section_flags
21653 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
21654 bfd_set_section_size (stdoutput, sec, 0);
21655 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
21656 }
21657 }
21658 }
21659 #endif
21660
21661 /* Record the CPU type as well. */
21662 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
21663 mach = bfd_mach_arm_iWMMXt2;
21664 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
21665 mach = bfd_mach_arm_iWMMXt;
21666 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
21667 mach = bfd_mach_arm_XScale;
21668 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
21669 mach = bfd_mach_arm_ep9312;
21670 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
21671 mach = bfd_mach_arm_5TE;
21672 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
21673 {
21674 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21675 mach = bfd_mach_arm_5T;
21676 else
21677 mach = bfd_mach_arm_5;
21678 }
21679 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
21680 {
21681 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
21682 mach = bfd_mach_arm_4T;
21683 else
21684 mach = bfd_mach_arm_4;
21685 }
21686 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
21687 mach = bfd_mach_arm_3M;
21688 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
21689 mach = bfd_mach_arm_3;
21690 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
21691 mach = bfd_mach_arm_2a;
21692 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
21693 mach = bfd_mach_arm_2;
21694 else
21695 mach = bfd_mach_arm_unknown;
21696
21697 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
21698 }
21699
21700 /* Command line processing. */
21701
21702 /* md_parse_option
21703 Invocation line includes a switch not recognized by the base assembler.
21704 See if it's a processor-specific option.
21705
21706 This routine is somewhat complicated by the need for backwards
21707 compatibility (since older releases of gcc can't be changed).
21708 The new options try to make the interface as compatible as
21709 possible with GCC.
21710
21711 New options (supported) are:
21712
21713 -mcpu=<cpu name> Assemble for selected processor
21714 -march=<architecture name> Assemble for selected architecture
21715 -mfpu=<fpu architecture> Assemble for selected FPU.
21716 -EB/-mbig-endian Big-endian
21717 -EL/-mlittle-endian Little-endian
21718 -k Generate PIC code
21719 -mthumb Start in Thumb mode
21720 -mthumb-interwork Code supports ARM/Thumb interworking
21721
21722 -m[no-]warn-deprecated Warn about deprecated features
21723
21724 For now we will also provide support for:
21725
21726 -mapcs-32 32-bit Program counter
21727 -mapcs-26 26-bit Program counter
21728 -macps-float Floats passed in FP registers
21729 -mapcs-reentrant Reentrant code
21730 -matpcs
21731 (sometime these will probably be replaced with -mapcs=<list of options>
21732 and -matpcs=<list of options>)
21733
21734 The remaining options are only supported for back-wards compatibility.
21735 Cpu variants, the arm part is optional:
21736 -m[arm]1 Currently not supported.
21737 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21738 -m[arm]3 Arm 3 processor
21739 -m[arm]6[xx], Arm 6 processors
21740 -m[arm]7[xx][t][[d]m] Arm 7 processors
21741 -m[arm]8[10] Arm 8 processors
21742 -m[arm]9[20][tdmi] Arm 9 processors
21743 -mstrongarm[110[0]] StrongARM processors
21744 -mxscale XScale processors
21745 -m[arm]v[2345[t[e]]] Arm architectures
21746 -mall All (except the ARM1)
21747 FP variants:
21748 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21749 -mfpe-old (No float load/store multiples)
21750 -mvfpxd VFP Single precision
21751 -mvfp All VFP
21752 -mno-fpu Disable all floating point instructions
21753
21754 The following CPU names are recognized:
21755 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21756 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21757 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21758 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21759 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21760 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21761 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21762
21763 */
21764
21765 const char * md_shortopts = "m:k";
21766
21767 #ifdef ARM_BI_ENDIAN
21768 #define OPTION_EB (OPTION_MD_BASE + 0)
21769 #define OPTION_EL (OPTION_MD_BASE + 1)
21770 #else
21771 #if TARGET_BYTES_BIG_ENDIAN
21772 #define OPTION_EB (OPTION_MD_BASE + 0)
21773 #else
21774 #define OPTION_EL (OPTION_MD_BASE + 1)
21775 #endif
21776 #endif
21777 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21778
21779 struct option md_longopts[] =
21780 {
21781 #ifdef OPTION_EB
21782 {"EB", no_argument, NULL, OPTION_EB},
21783 #endif
21784 #ifdef OPTION_EL
21785 {"EL", no_argument, NULL, OPTION_EL},
21786 #endif
21787 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
21788 {NULL, no_argument, NULL, 0}
21789 };
21790
21791 size_t md_longopts_size = sizeof (md_longopts);
21792
21793 struct arm_option_table
21794 {
21795 char *option; /* Option name to match. */
21796 char *help; /* Help information. */
21797 int *var; /* Variable to change. */
21798 int value; /* What to change it to. */
21799 char *deprecated; /* If non-null, print this message. */
21800 };
21801
21802 struct arm_option_table arm_opts[] =
21803 {
21804 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
21805 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
21806 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21807 &support_interwork, 1, NULL},
21808 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
21809 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
21810 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
21811 1, NULL},
21812 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
21813 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
21814 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
21815 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
21816 NULL},
21817
21818 /* These are recognized by the assembler, but have no affect on code. */
21819 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
21820 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
21821
21822 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
21823 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21824 &warn_on_deprecated, 0, NULL},
21825 {NULL, NULL, NULL, 0, NULL}
21826 };
21827
21828 struct arm_legacy_option_table
21829 {
21830 char *option; /* Option name to match. */
21831 const arm_feature_set **var; /* Variable to change. */
21832 const arm_feature_set value; /* What to change it to. */
21833 char *deprecated; /* If non-null, print this message. */
21834 };
21835
21836 const struct arm_legacy_option_table arm_legacy_opts[] =
21837 {
21838 /* DON'T add any new processors to this list -- we want the whole list
21839 to go away... Add them to the processors table instead. */
21840 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21841 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
21842 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21843 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
21844 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21845 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
21846 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21847 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
21848 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21849 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
21850 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21851 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
21852 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21853 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
21854 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21855 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
21856 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21857 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
21858 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21859 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
21860 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21861 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
21862 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21863 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
21864 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21865 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
21866 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21867 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
21868 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21869 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
21870 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21871 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
21872 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21873 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
21874 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21875 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
21876 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21877 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
21878 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21879 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
21880 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21881 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
21882 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21883 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
21884 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21885 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
21886 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21887 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21888 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21889 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
21890 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21891 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
21892 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21893 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
21894 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21895 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
21896 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21897 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
21898 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21899 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
21900 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21901 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
21902 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21903 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
21904 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21905 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
21906 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21907 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
21908 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
21909 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
21910 N_("use -mcpu=strongarm110")},
21911 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
21912 N_("use -mcpu=strongarm1100")},
21913 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
21914 N_("use -mcpu=strongarm1110")},
21915 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
21916 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
21917 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
21918
21919 /* Architecture variants -- don't add any more to this list either. */
21920 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21921 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
21922 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21923 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
21924 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21925 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
21926 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21927 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
21928 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21929 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
21930 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21931 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
21932 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21933 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
21934 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21935 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
21936 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21937 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
21938
21939 /* Floating point variants -- don't add any more to this list either. */
21940 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
21941 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
21942 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
21943 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
21944 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
21945
21946 {NULL, NULL, ARM_ARCH_NONE, NULL}
21947 };
21948
21949 struct arm_cpu_option_table
21950 {
21951 char *name;
21952 const arm_feature_set value;
21953 /* For some CPUs we assume an FPU unless the user explicitly sets
21954 -mfpu=... */
21955 const arm_feature_set default_fpu;
21956 /* The canonical name of the CPU, or NULL to use NAME converted to upper
21957 case. */
21958 const char *canonical_name;
21959 };
21960
21961 /* This list should, at a minimum, contain all the cpu names
21962 recognized by GCC. */
21963 static const struct arm_cpu_option_table arm_cpus[] =
21964 {
21965 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
21966 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
21967 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
21968 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21969 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
21970 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21971 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21972 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21973 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21974 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21975 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21976 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21977 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21978 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21979 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21980 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
21981 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21982 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21983 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21984 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21985 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21986 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21987 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21988 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21989 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21990 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21991 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21992 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
21993 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21994 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21995 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
21996 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21997 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21998 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
21999 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22000 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22001 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22002 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22003 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22004 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
22005 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22006 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22007 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22008 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
22009 {"fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22010 {"fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
22011 /* For V5 or later processors we default to using VFP; but the user
22012 should really set the FPU type explicitly. */
22013 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22014 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22015 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22016 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
22017 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22018 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22019 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
22020 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22021 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
22022 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
22023 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22024 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22025 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22026 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22027 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22028 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
22029 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
22030 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22031 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22032 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
22033 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
22034 {"fa626te", ARM_ARCH_V5TE, FPU_NONE, NULL},
22035 {"fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
22036 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
22037 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
22038 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
22039 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
22040 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
22041 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
22042 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
22043 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
22044 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
22045 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
22046 {"cortex-a5", ARM_ARCH_V7A, FPU_NONE, NULL},
22047 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22048 | FPU_NEON_EXT_V1),
22049 NULL},
22050 {"cortex-a9", ARM_ARCH_V7A, ARM_FEATURE (0, FPU_VFP_V3
22051 | FPU_NEON_EXT_V1),
22052 NULL},
22053 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
22054 {"cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16, NULL},
22055 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
22056 {"cortex-m1", ARM_ARCH_V6M, FPU_NONE, NULL},
22057 {"cortex-m0", ARM_ARCH_V6M, FPU_NONE, NULL},
22058 /* ??? XSCALE is really an architecture. */
22059 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22060 /* ??? iwmmxt is not a processor. */
22061 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
22062 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
22063 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
22064 /* Maverick */
22065 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
22066 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
22067 };
22068
22069 struct arm_arch_option_table
22070 {
22071 char *name;
22072 const arm_feature_set value;
22073 const arm_feature_set default_fpu;
22074 };
22075
22076 /* This list should, at a minimum, contain all the architecture names
22077 recognized by GCC. */
22078 static const struct arm_arch_option_table arm_archs[] =
22079 {
22080 {"all", ARM_ANY, FPU_ARCH_FPA},
22081 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
22082 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
22083 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
22084 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
22085 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
22086 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
22087 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
22088 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
22089 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
22090 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
22091 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
22092 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
22093 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
22094 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
22095 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
22096 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
22097 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
22098 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
22099 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
22100 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
22101 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
22102 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
22103 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
22104 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
22105 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
22106 {"armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP},
22107 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
22108 /* The official spelling of the ARMv7 profile variants is the dashed form.
22109 Accept the non-dashed form for compatibility with old toolchains. */
22110 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22111 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22112 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22113 {"armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP},
22114 {"armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP},
22115 {"armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP},
22116 {"armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP},
22117 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
22118 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
22119 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
22120 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
22121 };
22122
22123 /* ISA extensions in the co-processor space. */
22124 struct arm_option_cpu_value_table
22125 {
22126 char *name;
22127 const arm_feature_set value;
22128 };
22129
22130 static const struct arm_option_cpu_value_table arm_extensions[] =
22131 {
22132 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
22133 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
22134 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
22135 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
22136 {NULL, ARM_ARCH_NONE}
22137 };
22138
22139 /* This list should, at a minimum, contain all the fpu names
22140 recognized by GCC. */
22141 static const struct arm_option_cpu_value_table arm_fpus[] =
22142 {
22143 {"softfpa", FPU_NONE},
22144 {"fpe", FPU_ARCH_FPE},
22145 {"fpe2", FPU_ARCH_FPE},
22146 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
22147 {"fpa", FPU_ARCH_FPA},
22148 {"fpa10", FPU_ARCH_FPA},
22149 {"fpa11", FPU_ARCH_FPA},
22150 {"arm7500fe", FPU_ARCH_FPA},
22151 {"softvfp", FPU_ARCH_VFP},
22152 {"softvfp+vfp", FPU_ARCH_VFP_V2},
22153 {"vfp", FPU_ARCH_VFP_V2},
22154 {"vfp9", FPU_ARCH_VFP_V2},
22155 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
22156 {"vfp10", FPU_ARCH_VFP_V2},
22157 {"vfp10-r0", FPU_ARCH_VFP_V1},
22158 {"vfpxd", FPU_ARCH_VFP_V1xD},
22159 {"vfpv2", FPU_ARCH_VFP_V2},
22160 {"vfpv3", FPU_ARCH_VFP_V3},
22161 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
22162 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
22163 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
22164 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
22165 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
22166 {"arm1020t", FPU_ARCH_VFP_V1},
22167 {"arm1020e", FPU_ARCH_VFP_V2},
22168 {"arm1136jfs", FPU_ARCH_VFP_V2},
22169 {"arm1136jf-s", FPU_ARCH_VFP_V2},
22170 {"maverick", FPU_ARCH_MAVERICK},
22171 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
22172 {"neon-fp16", FPU_ARCH_NEON_FP16},
22173 {"vfpv4", FPU_ARCH_VFP_V4},
22174 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
22175 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
22176 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
22177 {NULL, ARM_ARCH_NONE}
22178 };
22179
22180 struct arm_option_value_table
22181 {
22182 char *name;
22183 long value;
22184 };
22185
22186 static const struct arm_option_value_table arm_float_abis[] =
22187 {
22188 {"hard", ARM_FLOAT_ABI_HARD},
22189 {"softfp", ARM_FLOAT_ABI_SOFTFP},
22190 {"soft", ARM_FLOAT_ABI_SOFT},
22191 {NULL, 0}
22192 };
22193
22194 #ifdef OBJ_ELF
22195 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22196 static const struct arm_option_value_table arm_eabis[] =
22197 {
22198 {"gnu", EF_ARM_EABI_UNKNOWN},
22199 {"4", EF_ARM_EABI_VER4},
22200 {"5", EF_ARM_EABI_VER5},
22201 {NULL, 0}
22202 };
22203 #endif
22204
22205 struct arm_long_option_table
22206 {
22207 char * option; /* Substring to match. */
22208 char * help; /* Help information. */
22209 int (* func) (char * subopt); /* Function to decode sub-option. */
22210 char * deprecated; /* If non-null, print this message. */
22211 };
22212
22213 static bfd_boolean
22214 arm_parse_extension (char * str, const arm_feature_set **opt_p)
22215 {
22216 arm_feature_set *ext_set = (arm_feature_set *)
22217 xmalloc (sizeof (arm_feature_set));
22218
22219 /* Copy the feature set, so that we can modify it. */
22220 *ext_set = **opt_p;
22221 *opt_p = ext_set;
22222
22223 while (str != NULL && *str != 0)
22224 {
22225 const struct arm_option_cpu_value_table * opt;
22226 char * ext;
22227 int optlen;
22228
22229 if (*str != '+')
22230 {
22231 as_bad (_("invalid architectural extension"));
22232 return FALSE;
22233 }
22234
22235 str++;
22236 ext = strchr (str, '+');
22237
22238 if (ext != NULL)
22239 optlen = ext - str;
22240 else
22241 optlen = strlen (str);
22242
22243 if (optlen == 0)
22244 {
22245 as_bad (_("missing architectural extension"));
22246 return FALSE;
22247 }
22248
22249 for (opt = arm_extensions; opt->name != NULL; opt++)
22250 if (strncmp (opt->name, str, optlen) == 0)
22251 {
22252 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
22253 break;
22254 }
22255
22256 if (opt->name == NULL)
22257 {
22258 as_bad (_("unknown architectural extension `%s'"), str);
22259 return FALSE;
22260 }
22261
22262 str = ext;
22263 };
22264
22265 return TRUE;
22266 }
22267
22268 static bfd_boolean
22269 arm_parse_cpu (char * str)
22270 {
22271 const struct arm_cpu_option_table * opt;
22272 char * ext = strchr (str, '+');
22273 int optlen;
22274
22275 if (ext != NULL)
22276 optlen = ext - str;
22277 else
22278 optlen = strlen (str);
22279
22280 if (optlen == 0)
22281 {
22282 as_bad (_("missing cpu name `%s'"), str);
22283 return FALSE;
22284 }
22285
22286 for (opt = arm_cpus; opt->name != NULL; opt++)
22287 if (strncmp (opt->name, str, optlen) == 0)
22288 {
22289 mcpu_cpu_opt = &opt->value;
22290 mcpu_fpu_opt = &opt->default_fpu;
22291 if (opt->canonical_name)
22292 strcpy (selected_cpu_name, opt->canonical_name);
22293 else
22294 {
22295 int i;
22296
22297 for (i = 0; i < optlen; i++)
22298 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22299 selected_cpu_name[i] = 0;
22300 }
22301
22302 if (ext != NULL)
22303 return arm_parse_extension (ext, &mcpu_cpu_opt);
22304
22305 return TRUE;
22306 }
22307
22308 as_bad (_("unknown cpu `%s'"), str);
22309 return FALSE;
22310 }
22311
22312 static bfd_boolean
22313 arm_parse_arch (char * str)
22314 {
22315 const struct arm_arch_option_table *opt;
22316 char *ext = strchr (str, '+');
22317 int optlen;
22318
22319 if (ext != NULL)
22320 optlen = ext - str;
22321 else
22322 optlen = strlen (str);
22323
22324 if (optlen == 0)
22325 {
22326 as_bad (_("missing architecture name `%s'"), str);
22327 return FALSE;
22328 }
22329
22330 for (opt = arm_archs; opt->name != NULL; opt++)
22331 if (streq (opt->name, str))
22332 {
22333 march_cpu_opt = &opt->value;
22334 march_fpu_opt = &opt->default_fpu;
22335 strcpy (selected_cpu_name, opt->name);
22336
22337 if (ext != NULL)
22338 return arm_parse_extension (ext, &march_cpu_opt);
22339
22340 return TRUE;
22341 }
22342
22343 as_bad (_("unknown architecture `%s'\n"), str);
22344 return FALSE;
22345 }
22346
22347 static bfd_boolean
22348 arm_parse_fpu (char * str)
22349 {
22350 const struct arm_option_cpu_value_table * opt;
22351
22352 for (opt = arm_fpus; opt->name != NULL; opt++)
22353 if (streq (opt->name, str))
22354 {
22355 mfpu_opt = &opt->value;
22356 return TRUE;
22357 }
22358
22359 as_bad (_("unknown floating point format `%s'\n"), str);
22360 return FALSE;
22361 }
22362
22363 static bfd_boolean
22364 arm_parse_float_abi (char * str)
22365 {
22366 const struct arm_option_value_table * opt;
22367
22368 for (opt = arm_float_abis; opt->name != NULL; opt++)
22369 if (streq (opt->name, str))
22370 {
22371 mfloat_abi_opt = opt->value;
22372 return TRUE;
22373 }
22374
22375 as_bad (_("unknown floating point abi `%s'\n"), str);
22376 return FALSE;
22377 }
22378
22379 #ifdef OBJ_ELF
22380 static bfd_boolean
22381 arm_parse_eabi (char * str)
22382 {
22383 const struct arm_option_value_table *opt;
22384
22385 for (opt = arm_eabis; opt->name != NULL; opt++)
22386 if (streq (opt->name, str))
22387 {
22388 meabi_flags = opt->value;
22389 return TRUE;
22390 }
22391 as_bad (_("unknown EABI `%s'\n"), str);
22392 return FALSE;
22393 }
22394 #endif
22395
22396 static bfd_boolean
22397 arm_parse_it_mode (char * str)
22398 {
22399 bfd_boolean ret = TRUE;
22400
22401 if (streq ("arm", str))
22402 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
22403 else if (streq ("thumb", str))
22404 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
22405 else if (streq ("always", str))
22406 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
22407 else if (streq ("never", str))
22408 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
22409 else
22410 {
22411 as_bad (_("unknown implicit IT mode `%s', should be "\
22412 "arm, thumb, always, or never."), str);
22413 ret = FALSE;
22414 }
22415
22416 return ret;
22417 }
22418
22419 struct arm_long_option_table arm_long_opts[] =
22420 {
22421 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22422 arm_parse_cpu, NULL},
22423 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22424 arm_parse_arch, NULL},
22425 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22426 arm_parse_fpu, NULL},
22427 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22428 arm_parse_float_abi, NULL},
22429 #ifdef OBJ_ELF
22430 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22431 arm_parse_eabi, NULL},
22432 #endif
22433 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22434 arm_parse_it_mode, NULL},
22435 {NULL, NULL, 0, NULL}
22436 };
22437
22438 int
22439 md_parse_option (int c, char * arg)
22440 {
22441 struct arm_option_table *opt;
22442 const struct arm_legacy_option_table *fopt;
22443 struct arm_long_option_table *lopt;
22444
22445 switch (c)
22446 {
22447 #ifdef OPTION_EB
22448 case OPTION_EB:
22449 target_big_endian = 1;
22450 break;
22451 #endif
22452
22453 #ifdef OPTION_EL
22454 case OPTION_EL:
22455 target_big_endian = 0;
22456 break;
22457 #endif
22458
22459 case OPTION_FIX_V4BX:
22460 fix_v4bx = TRUE;
22461 break;
22462
22463 case 'a':
22464 /* Listing option. Just ignore these, we don't support additional
22465 ones. */
22466 return 0;
22467
22468 default:
22469 for (opt = arm_opts; opt->option != NULL; opt++)
22470 {
22471 if (c == opt->option[0]
22472 && ((arg == NULL && opt->option[1] == 0)
22473 || streq (arg, opt->option + 1)))
22474 {
22475 /* If the option is deprecated, tell the user. */
22476 if (warn_on_deprecated && opt->deprecated != NULL)
22477 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22478 arg ? arg : "", _(opt->deprecated));
22479
22480 if (opt->var != NULL)
22481 *opt->var = opt->value;
22482
22483 return 1;
22484 }
22485 }
22486
22487 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
22488 {
22489 if (c == fopt->option[0]
22490 && ((arg == NULL && fopt->option[1] == 0)
22491 || streq (arg, fopt->option + 1)))
22492 {
22493 /* If the option is deprecated, tell the user. */
22494 if (warn_on_deprecated && fopt->deprecated != NULL)
22495 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
22496 arg ? arg : "", _(fopt->deprecated));
22497
22498 if (fopt->var != NULL)
22499 *fopt->var = &fopt->value;
22500
22501 return 1;
22502 }
22503 }
22504
22505 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22506 {
22507 /* These options are expected to have an argument. */
22508 if (c == lopt->option[0]
22509 && arg != NULL
22510 && strncmp (arg, lopt->option + 1,
22511 strlen (lopt->option + 1)) == 0)
22512 {
22513 /* If the option is deprecated, tell the user. */
22514 if (warn_on_deprecated && lopt->deprecated != NULL)
22515 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
22516 _(lopt->deprecated));
22517
22518 /* Call the sup-option parser. */
22519 return lopt->func (arg + strlen (lopt->option) - 1);
22520 }
22521 }
22522
22523 return 0;
22524 }
22525
22526 return 1;
22527 }
22528
22529 void
22530 md_show_usage (FILE * fp)
22531 {
22532 struct arm_option_table *opt;
22533 struct arm_long_option_table *lopt;
22534
22535 fprintf (fp, _(" ARM-specific assembler options:\n"));
22536
22537 for (opt = arm_opts; opt->option != NULL; opt++)
22538 if (opt->help != NULL)
22539 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
22540
22541 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
22542 if (lopt->help != NULL)
22543 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
22544
22545 #ifdef OPTION_EB
22546 fprintf (fp, _("\
22547 -EB assemble code for a big-endian cpu\n"));
22548 #endif
22549
22550 #ifdef OPTION_EL
22551 fprintf (fp, _("\
22552 -EL assemble code for a little-endian cpu\n"));
22553 #endif
22554
22555 fprintf (fp, _("\
22556 --fix-v4bx Allow BX in ARMv4 code\n"));
22557 }
22558
22559
22560 #ifdef OBJ_ELF
22561 typedef struct
22562 {
22563 int val;
22564 arm_feature_set flags;
22565 } cpu_arch_ver_table;
22566
22567 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22568 least features first. */
22569 static const cpu_arch_ver_table cpu_arch_ver[] =
22570 {
22571 {1, ARM_ARCH_V4},
22572 {2, ARM_ARCH_V4T},
22573 {3, ARM_ARCH_V5},
22574 {3, ARM_ARCH_V5T},
22575 {4, ARM_ARCH_V5TE},
22576 {5, ARM_ARCH_V5TEJ},
22577 {6, ARM_ARCH_V6},
22578 {7, ARM_ARCH_V6Z},
22579 {9, ARM_ARCH_V6K},
22580 {11, ARM_ARCH_V6M},
22581 {8, ARM_ARCH_V6T2},
22582 {10, ARM_ARCH_V7A},
22583 {10, ARM_ARCH_V7R},
22584 {10, ARM_ARCH_V7M},
22585 {0, ARM_ARCH_NONE}
22586 };
22587
22588 /* Set an attribute if it has not already been set by the user. */
22589 static void
22590 aeabi_set_attribute_int (int tag, int value)
22591 {
22592 if (tag < 1
22593 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22594 || !attributes_set_explicitly[tag])
22595 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
22596 }
22597
22598 static void
22599 aeabi_set_attribute_string (int tag, const char *value)
22600 {
22601 if (tag < 1
22602 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
22603 || !attributes_set_explicitly[tag])
22604 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
22605 }
22606
22607 /* Set the public EABI object attributes. */
22608 static void
22609 aeabi_set_public_attributes (void)
22610 {
22611 int arch;
22612 arm_feature_set flags;
22613 arm_feature_set tmp;
22614 const cpu_arch_ver_table *p;
22615
22616 /* Choose the architecture based on the capabilities of the requested cpu
22617 (if any) and/or the instructions actually used. */
22618 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
22619 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
22620 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
22621 /*Allow the user to override the reported architecture. */
22622 if (object_arch)
22623 {
22624 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
22625 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
22626 }
22627
22628 tmp = flags;
22629 arch = 0;
22630 for (p = cpu_arch_ver; p->val; p++)
22631 {
22632 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
22633 {
22634 arch = p->val;
22635 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
22636 }
22637 }
22638
22639 /* The table lookup above finds the last architecture to contribute
22640 a new feature. Unfortunately, Tag13 is a subset of the union of
22641 v6T2 and v7-M, so it is never seen as contributing a new feature.
22642 We can not search for the last entry which is entirely used,
22643 because if no CPU is specified we build up only those flags
22644 actually used. Perhaps we should separate out the specified
22645 and implicit cases. Avoid taking this path for -march=all by
22646 checking for contradictory v7-A / v7-M features. */
22647 if (arch == 10
22648 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
22649 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
22650 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
22651 arch = 13;
22652
22653 /* Tag_CPU_name. */
22654 if (selected_cpu_name[0])
22655 {
22656 char *q;
22657
22658 q = selected_cpu_name;
22659 if (strncmp (q, "armv", 4) == 0)
22660 {
22661 int i;
22662
22663 q += 4;
22664 for (i = 0; q[i]; i++)
22665 q[i] = TOUPPER (q[i]);
22666 }
22667 aeabi_set_attribute_string (Tag_CPU_name, q);
22668 }
22669
22670 /* Tag_CPU_arch. */
22671 aeabi_set_attribute_int (Tag_CPU_arch, arch);
22672
22673 /* Tag_CPU_arch_profile. */
22674 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
22675 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'A');
22676 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
22677 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'R');
22678 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
22679 aeabi_set_attribute_int (Tag_CPU_arch_profile, 'M');
22680
22681 /* Tag_ARM_ISA_use. */
22682 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
22683 || arch == 0)
22684 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
22685
22686 /* Tag_THUMB_ISA_use. */
22687 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
22688 || arch == 0)
22689 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
22690 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
22691
22692 /* Tag_VFP_arch. */
22693 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
22694 aeabi_set_attribute_int (Tag_VFP_arch,
22695 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
22696 ? 5 : 6);
22697 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
22698 aeabi_set_attribute_int (Tag_VFP_arch, 3);
22699 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
22700 aeabi_set_attribute_int (Tag_VFP_arch, 4);
22701 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
22702 aeabi_set_attribute_int (Tag_VFP_arch, 2);
22703 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
22704 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
22705 aeabi_set_attribute_int (Tag_VFP_arch, 1);
22706
22707 /* Tag_WMMX_arch. */
22708 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
22709 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
22710 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
22711 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
22712
22713 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22714 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
22715 aeabi_set_attribute_int
22716 (Tag_Advanced_SIMD_arch, (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma)
22717 ? 2 : 1));
22718
22719 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22720 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16))
22721 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
22722 }
22723
22724 /* Add the default contents for the .ARM.attributes section. */
22725 void
22726 arm_md_end (void)
22727 {
22728 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22729 return;
22730
22731 aeabi_set_public_attributes ();
22732 }
22733 #endif /* OBJ_ELF */
22734
22735
22736 /* Parse a .cpu directive. */
22737
22738 static void
22739 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
22740 {
22741 const struct arm_cpu_option_table *opt;
22742 char *name;
22743 char saved_char;
22744
22745 name = input_line_pointer;
22746 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22747 input_line_pointer++;
22748 saved_char = *input_line_pointer;
22749 *input_line_pointer = 0;
22750
22751 /* Skip the first "all" entry. */
22752 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
22753 if (streq (opt->name, name))
22754 {
22755 mcpu_cpu_opt = &opt->value;
22756 selected_cpu = opt->value;
22757 if (opt->canonical_name)
22758 strcpy (selected_cpu_name, opt->canonical_name);
22759 else
22760 {
22761 int i;
22762 for (i = 0; opt->name[i]; i++)
22763 selected_cpu_name[i] = TOUPPER (opt->name[i]);
22764 selected_cpu_name[i] = 0;
22765 }
22766 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22767 *input_line_pointer = saved_char;
22768 demand_empty_rest_of_line ();
22769 return;
22770 }
22771 as_bad (_("unknown cpu `%s'"), name);
22772 *input_line_pointer = saved_char;
22773 ignore_rest_of_line ();
22774 }
22775
22776
22777 /* Parse a .arch directive. */
22778
22779 static void
22780 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
22781 {
22782 const struct arm_arch_option_table *opt;
22783 char saved_char;
22784 char *name;
22785
22786 name = input_line_pointer;
22787 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22788 input_line_pointer++;
22789 saved_char = *input_line_pointer;
22790 *input_line_pointer = 0;
22791
22792 /* Skip the first "all" entry. */
22793 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22794 if (streq (opt->name, name))
22795 {
22796 mcpu_cpu_opt = &opt->value;
22797 selected_cpu = opt->value;
22798 strcpy (selected_cpu_name, opt->name);
22799 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22800 *input_line_pointer = saved_char;
22801 demand_empty_rest_of_line ();
22802 return;
22803 }
22804
22805 as_bad (_("unknown architecture `%s'\n"), name);
22806 *input_line_pointer = saved_char;
22807 ignore_rest_of_line ();
22808 }
22809
22810
22811 /* Parse a .object_arch directive. */
22812
22813 static void
22814 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
22815 {
22816 const struct arm_arch_option_table *opt;
22817 char saved_char;
22818 char *name;
22819
22820 name = input_line_pointer;
22821 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22822 input_line_pointer++;
22823 saved_char = *input_line_pointer;
22824 *input_line_pointer = 0;
22825
22826 /* Skip the first "all" entry. */
22827 for (opt = arm_archs + 1; opt->name != NULL; opt++)
22828 if (streq (opt->name, name))
22829 {
22830 object_arch = &opt->value;
22831 *input_line_pointer = saved_char;
22832 demand_empty_rest_of_line ();
22833 return;
22834 }
22835
22836 as_bad (_("unknown architecture `%s'\n"), name);
22837 *input_line_pointer = saved_char;
22838 ignore_rest_of_line ();
22839 }
22840
22841 /* Parse a .fpu directive. */
22842
22843 static void
22844 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
22845 {
22846 const struct arm_option_cpu_value_table *opt;
22847 char saved_char;
22848 char *name;
22849
22850 name = input_line_pointer;
22851 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
22852 input_line_pointer++;
22853 saved_char = *input_line_pointer;
22854 *input_line_pointer = 0;
22855
22856 for (opt = arm_fpus; opt->name != NULL; opt++)
22857 if (streq (opt->name, name))
22858 {
22859 mfpu_opt = &opt->value;
22860 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22861 *input_line_pointer = saved_char;
22862 demand_empty_rest_of_line ();
22863 return;
22864 }
22865
22866 as_bad (_("unknown floating point format `%s'\n"), name);
22867 *input_line_pointer = saved_char;
22868 ignore_rest_of_line ();
22869 }
22870
22871 /* Copy symbol information. */
22872
22873 void
22874 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
22875 {
22876 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
22877 }
22878
22879 #ifdef OBJ_ELF
22880 /* Given a symbolic attribute NAME, return the proper integer value.
22881 Returns -1 if the attribute is not known. */
22882
22883 int
22884 arm_convert_symbolic_attribute (const char *name)
22885 {
22886 static const struct
22887 {
22888 const char * name;
22889 const int tag;
22890 }
22891 attribute_table[] =
22892 {
22893 /* When you modify this table you should
22894 also modify the list in doc/c-arm.texi. */
22895 #define T(tag) {#tag, tag}
22896 T (Tag_CPU_raw_name),
22897 T (Tag_CPU_name),
22898 T (Tag_CPU_arch),
22899 T (Tag_CPU_arch_profile),
22900 T (Tag_ARM_ISA_use),
22901 T (Tag_THUMB_ISA_use),
22902 T (Tag_VFP_arch),
22903 T (Tag_WMMX_arch),
22904 T (Tag_Advanced_SIMD_arch),
22905 T (Tag_PCS_config),
22906 T (Tag_ABI_PCS_R9_use),
22907 T (Tag_ABI_PCS_RW_data),
22908 T (Tag_ABI_PCS_RO_data),
22909 T (Tag_ABI_PCS_GOT_use),
22910 T (Tag_ABI_PCS_wchar_t),
22911 T (Tag_ABI_FP_rounding),
22912 T (Tag_ABI_FP_denormal),
22913 T (Tag_ABI_FP_exceptions),
22914 T (Tag_ABI_FP_user_exceptions),
22915 T (Tag_ABI_FP_number_model),
22916 T (Tag_ABI_align8_needed),
22917 T (Tag_ABI_align8_preserved),
22918 T (Tag_ABI_enum_size),
22919 T (Tag_ABI_HardFP_use),
22920 T (Tag_ABI_VFP_args),
22921 T (Tag_ABI_WMMX_args),
22922 T (Tag_ABI_optimization_goals),
22923 T (Tag_ABI_FP_optimization_goals),
22924 T (Tag_compatibility),
22925 T (Tag_CPU_unaligned_access),
22926 T (Tag_VFP_HP_extension),
22927 T (Tag_ABI_FP_16bit_format),
22928 T (Tag_nodefaults),
22929 T (Tag_also_compatible_with),
22930 T (Tag_conformance),
22931 T (Tag_T2EE_use),
22932 T (Tag_Virtualization_use),
22933 T (Tag_MPextension_use)
22934 #undef T
22935 };
22936 unsigned int i;
22937
22938 if (name == NULL)
22939 return -1;
22940
22941 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
22942 if (streq (name, attribute_table[i].name))
22943 return attribute_table[i].tag;
22944
22945 return -1;
22946 }
22947
22948
22949 /* Apply sym value for relocations only in the case that
22950 they are for local symbols and you have the respective
22951 architectural feature for blx and simple switches. */
22952 int
22953 arm_apply_sym_value (struct fix * fixP)
22954 {
22955 if (fixP->fx_addsy
22956 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22957 && !S_IS_EXTERNAL (fixP->fx_addsy))
22958 {
22959 switch (fixP->fx_r_type)
22960 {
22961 case BFD_RELOC_ARM_PCREL_BLX:
22962 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22963 if (ARM_IS_FUNC (fixP->fx_addsy))
22964 return 1;
22965 break;
22966
22967 case BFD_RELOC_ARM_PCREL_CALL:
22968 case BFD_RELOC_THUMB_PCREL_BLX:
22969 if (THUMB_IS_FUNC (fixP->fx_addsy))
22970 return 1;
22971 break;
22972
22973 default:
22974 break;
22975 }
22976
22977 }
22978 return 0;
22979 }
22980 #endif /* OBJ_ELF */
This page took 1.356911 seconds and 5 git commands to generate.