Fix an internal error in GAS when assembling a bogus piece of source code.
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2015 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210
211 static const arm_feature_set arm_arch_any = ARM_ANY;
212 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
213 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
214 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
215 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
216
217 static const arm_feature_set arm_cext_iwmmxt2 =
218 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
219 static const arm_feature_set arm_cext_iwmmxt =
220 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
221 static const arm_feature_set arm_cext_xscale =
222 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
223 static const arm_feature_set arm_cext_maverick =
224 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
225 static const arm_feature_set fpu_fpa_ext_v1 =
226 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
227 static const arm_feature_set fpu_fpa_ext_v2 =
228 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
229 static const arm_feature_set fpu_vfp_ext_v1xd =
230 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
231 static const arm_feature_set fpu_vfp_ext_v1 =
232 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
233 static const arm_feature_set fpu_vfp_ext_v2 =
234 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
235 static const arm_feature_set fpu_vfp_ext_v3xd =
236 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
237 static const arm_feature_set fpu_vfp_ext_v3 =
238 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
239 static const arm_feature_set fpu_vfp_ext_d32 =
240 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
241 static const arm_feature_set fpu_neon_ext_v1 =
242 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
243 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
244 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
245 static const arm_feature_set fpu_vfp_fp16 =
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
247 static const arm_feature_set fpu_neon_ext_fma =
248 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
249 static const arm_feature_set fpu_vfp_ext_fma =
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
251 static const arm_feature_set fpu_vfp_ext_armv8 =
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
253 static const arm_feature_set fpu_vfp_ext_armv8xd =
254 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
255 static const arm_feature_set fpu_neon_ext_armv8 =
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
257 static const arm_feature_set fpu_crypto_ext_armv8 =
258 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
259 static const arm_feature_set crc_ext_armv8 =
260 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
261
262 static int mfloat_abi_opt = -1;
263 /* Record user cpu selection for object attributes. */
264 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
265 /* Must be long enough to hold any of the names in arm_cpus. */
266 static char selected_cpu_name[16];
267
268 extern FLONUM_TYPE generic_floating_point_number;
269
270 /* Return if no cpu was selected on command-line. */
271 static bfd_boolean
272 no_cpu_selected (void)
273 {
274 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
275 }
276
277 #ifdef OBJ_ELF
278 # ifdef EABI_DEFAULT
279 static int meabi_flags = EABI_DEFAULT;
280 # else
281 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
282 # endif
283
284 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
285
286 bfd_boolean
287 arm_is_eabi (void)
288 {
289 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
290 }
291 #endif
292
293 #ifdef OBJ_ELF
294 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
295 symbolS * GOT_symbol;
296 #endif
297
298 /* 0: assemble for ARM,
299 1: assemble for Thumb,
300 2: assemble for Thumb even though target CPU does not support thumb
301 instructions. */
302 static int thumb_mode = 0;
303 /* A value distinct from the possible values for thumb_mode that we
304 can use to record whether thumb_mode has been copied into the
305 tc_frag_data field of a frag. */
306 #define MODE_RECORDED (1 << 4)
307
308 /* Specifies the intrinsic IT insn behavior mode. */
309 enum implicit_it_mode
310 {
311 IMPLICIT_IT_MODE_NEVER = 0x00,
312 IMPLICIT_IT_MODE_ARM = 0x01,
313 IMPLICIT_IT_MODE_THUMB = 0x02,
314 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
315 };
316 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
317
318 /* If unified_syntax is true, we are processing the new unified
319 ARM/Thumb syntax. Important differences from the old ARM mode:
320
321 - Immediate operands do not require a # prefix.
322 - Conditional affixes always appear at the end of the
323 instruction. (For backward compatibility, those instructions
324 that formerly had them in the middle, continue to accept them
325 there.)
326 - The IT instruction may appear, and if it does is validated
327 against subsequent conditional affixes. It does not generate
328 machine code.
329
330 Important differences from the old Thumb mode:
331
332 - Immediate operands do not require a # prefix.
333 - Most of the V6T2 instructions are only available in unified mode.
334 - The .N and .W suffixes are recognized and honored (it is an error
335 if they cannot be honored).
336 - All instructions set the flags if and only if they have an 's' affix.
337 - Conditional affixes may be used. They are validated against
338 preceding IT instructions. Unlike ARM mode, you cannot use a
339 conditional affix except in the scope of an IT instruction. */
340
341 static bfd_boolean unified_syntax = FALSE;
342
343 /* An immediate operand can start with #, and ld*, st*, pld operands
344 can contain [ and ]. We need to tell APP not to elide whitespace
345 before a [, which can appear as the first operand for pld.
346 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
347 const char arm_symbol_chars[] = "#[]{}";
348
349 enum neon_el_type
350 {
351 NT_invtype,
352 NT_untyped,
353 NT_integer,
354 NT_float,
355 NT_poly,
356 NT_signed,
357 NT_unsigned
358 };
359
360 struct neon_type_el
361 {
362 enum neon_el_type type;
363 unsigned size;
364 };
365
366 #define NEON_MAX_TYPE_ELS 4
367
368 struct neon_type
369 {
370 struct neon_type_el el[NEON_MAX_TYPE_ELS];
371 unsigned elems;
372 };
373
374 enum it_instruction_type
375 {
376 OUTSIDE_IT_INSN,
377 INSIDE_IT_INSN,
378 INSIDE_IT_LAST_INSN,
379 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
380 if inside, should be the last one. */
381 NEUTRAL_IT_INSN, /* This could be either inside or outside,
382 i.e. BKPT and NOP. */
383 IT_INSN /* The IT insn has been parsed. */
384 };
385
386 /* The maximum number of operands we need. */
387 #define ARM_IT_MAX_OPERANDS 6
388
389 struct arm_it
390 {
391 const char * error;
392 unsigned long instruction;
393 int size;
394 int size_req;
395 int cond;
396 /* "uncond_value" is set to the value in place of the conditional field in
397 unconditional versions of the instruction, or -1 if nothing is
398 appropriate. */
399 int uncond_value;
400 struct neon_type vectype;
401 /* This does not indicate an actual NEON instruction, only that
402 the mnemonic accepts neon-style type suffixes. */
403 int is_neon;
404 /* Set to the opcode if the instruction needs relaxation.
405 Zero if the instruction is not relaxed. */
406 unsigned long relax;
407 struct
408 {
409 bfd_reloc_code_real_type type;
410 expressionS exp;
411 int pc_rel;
412 } reloc;
413
414 enum it_instruction_type it_insn_type;
415
416 struct
417 {
418 unsigned reg;
419 signed int imm;
420 struct neon_type_el vectype;
421 unsigned present : 1; /* Operand present. */
422 unsigned isreg : 1; /* Operand was a register. */
423 unsigned immisreg : 1; /* .imm field is a second register. */
424 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
425 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
426 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
427 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
428 instructions. This allows us to disambiguate ARM <-> vector insns. */
429 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
430 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
431 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
432 unsigned issingle : 1; /* Operand is VFP single-precision register. */
433 unsigned hasreloc : 1; /* Operand has relocation suffix. */
434 unsigned writeback : 1; /* Operand has trailing ! */
435 unsigned preind : 1; /* Preindexed address. */
436 unsigned postind : 1; /* Postindexed address. */
437 unsigned negative : 1; /* Index register was negated. */
438 unsigned shifted : 1; /* Shift applied to operation. */
439 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
440 } operands[ARM_IT_MAX_OPERANDS];
441 };
442
443 static struct arm_it inst;
444
445 #define NUM_FLOAT_VALS 8
446
447 const char * fp_const[] =
448 {
449 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
450 };
451
452 /* Number of littlenums required to hold an extended precision number. */
453 #define MAX_LITTLENUMS 6
454
455 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
456
457 #define FAIL (-1)
458 #define SUCCESS (0)
459
460 #define SUFF_S 1
461 #define SUFF_D 2
462 #define SUFF_E 3
463 #define SUFF_P 4
464
465 #define CP_T_X 0x00008000
466 #define CP_T_Y 0x00400000
467
468 #define CONDS_BIT 0x00100000
469 #define LOAD_BIT 0x00100000
470
471 #define DOUBLE_LOAD_FLAG 0x00000001
472
473 struct asm_cond
474 {
475 const char * template_name;
476 unsigned long value;
477 };
478
479 #define COND_ALWAYS 0xE
480
481 struct asm_psr
482 {
483 const char * template_name;
484 unsigned long field;
485 };
486
487 struct asm_barrier_opt
488 {
489 const char * template_name;
490 unsigned long value;
491 const arm_feature_set arch;
492 };
493
494 /* The bit that distinguishes CPSR and SPSR. */
495 #define SPSR_BIT (1 << 22)
496
497 /* The individual PSR flag bits. */
498 #define PSR_c (1 << 16)
499 #define PSR_x (1 << 17)
500 #define PSR_s (1 << 18)
501 #define PSR_f (1 << 19)
502
503 struct reloc_entry
504 {
505 char * name;
506 bfd_reloc_code_real_type reloc;
507 };
508
509 enum vfp_reg_pos
510 {
511 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
512 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
513 };
514
515 enum vfp_ldstm_type
516 {
517 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
518 };
519
520 /* Bits for DEFINED field in neon_typed_alias. */
521 #define NTA_HASTYPE 1
522 #define NTA_HASINDEX 2
523
524 struct neon_typed_alias
525 {
526 unsigned char defined;
527 unsigned char index;
528 struct neon_type_el eltype;
529 };
530
531 /* ARM register categories. This includes coprocessor numbers and various
532 architecture extensions' registers. */
533 enum arm_reg_type
534 {
535 REG_TYPE_RN,
536 REG_TYPE_CP,
537 REG_TYPE_CN,
538 REG_TYPE_FN,
539 REG_TYPE_VFS,
540 REG_TYPE_VFD,
541 REG_TYPE_NQ,
542 REG_TYPE_VFSD,
543 REG_TYPE_NDQ,
544 REG_TYPE_NSDQ,
545 REG_TYPE_VFC,
546 REG_TYPE_MVF,
547 REG_TYPE_MVD,
548 REG_TYPE_MVFX,
549 REG_TYPE_MVDX,
550 REG_TYPE_MVAX,
551 REG_TYPE_DSPSC,
552 REG_TYPE_MMXWR,
553 REG_TYPE_MMXWC,
554 REG_TYPE_MMXWCG,
555 REG_TYPE_XSCALE,
556 REG_TYPE_RNB
557 };
558
559 /* Structure for a hash table entry for a register.
560 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
561 information which states whether a vector type or index is specified (for a
562 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
563 struct reg_entry
564 {
565 const char * name;
566 unsigned int number;
567 unsigned char type;
568 unsigned char builtin;
569 struct neon_typed_alias * neon;
570 };
571
572 /* Diagnostics used when we don't get a register of the expected type. */
573 const char * const reg_expected_msgs[] =
574 {
575 N_("ARM register expected"),
576 N_("bad or missing co-processor number"),
577 N_("co-processor register expected"),
578 N_("FPA register expected"),
579 N_("VFP single precision register expected"),
580 N_("VFP/Neon double precision register expected"),
581 N_("Neon quad precision register expected"),
582 N_("VFP single or double precision register expected"),
583 N_("Neon double or quad precision register expected"),
584 N_("VFP single, double or Neon quad precision register expected"),
585 N_("VFP system register expected"),
586 N_("Maverick MVF register expected"),
587 N_("Maverick MVD register expected"),
588 N_("Maverick MVFX register expected"),
589 N_("Maverick MVDX register expected"),
590 N_("Maverick MVAX register expected"),
591 N_("Maverick DSPSC register expected"),
592 N_("iWMMXt data register expected"),
593 N_("iWMMXt control register expected"),
594 N_("iWMMXt scalar register expected"),
595 N_("XScale accumulator register expected"),
596 };
597
598 /* Some well known registers that we refer to directly elsewhere. */
599 #define REG_R12 12
600 #define REG_SP 13
601 #define REG_LR 14
602 #define REG_PC 15
603
604 /* ARM instructions take 4bytes in the object file, Thumb instructions
605 take 2: */
606 #define INSN_SIZE 4
607
608 struct asm_opcode
609 {
610 /* Basic string to match. */
611 const char * template_name;
612
613 /* Parameters to instruction. */
614 unsigned int operands[8];
615
616 /* Conditional tag - see opcode_lookup. */
617 unsigned int tag : 4;
618
619 /* Basic instruction code. */
620 unsigned int avalue : 28;
621
622 /* Thumb-format instruction code. */
623 unsigned int tvalue;
624
625 /* Which architecture variant provides this instruction. */
626 const arm_feature_set * avariant;
627 const arm_feature_set * tvariant;
628
629 /* Function to call to encode instruction in ARM format. */
630 void (* aencode) (void);
631
632 /* Function to call to encode instruction in Thumb format. */
633 void (* tencode) (void);
634 };
635
636 /* Defines for various bits that we will want to toggle. */
637 #define INST_IMMEDIATE 0x02000000
638 #define OFFSET_REG 0x02000000
639 #define HWOFFSET_IMM 0x00400000
640 #define SHIFT_BY_REG 0x00000010
641 #define PRE_INDEX 0x01000000
642 #define INDEX_UP 0x00800000
643 #define WRITE_BACK 0x00200000
644 #define LDM_TYPE_2_OR_3 0x00400000
645 #define CPSI_MMOD 0x00020000
646
647 #define LITERAL_MASK 0xf000f000
648 #define OPCODE_MASK 0xfe1fffff
649 #define V4_STR_BIT 0x00000020
650 #define VLDR_VMOV_SAME 0x0040f000
651
652 #define T2_SUBS_PC_LR 0xf3de8f00
653
654 #define DATA_OP_SHIFT 21
655
656 #define T2_OPCODE_MASK 0xfe1fffff
657 #define T2_DATA_OP_SHIFT 21
658
659 #define A_COND_MASK 0xf0000000
660 #define A_PUSH_POP_OP_MASK 0x0fff0000
661
662 /* Opcodes for pushing/poping registers to/from the stack. */
663 #define A1_OPCODE_PUSH 0x092d0000
664 #define A2_OPCODE_PUSH 0x052d0004
665 #define A2_OPCODE_POP 0x049d0004
666
667 /* Codes to distinguish the arithmetic instructions. */
668 #define OPCODE_AND 0
669 #define OPCODE_EOR 1
670 #define OPCODE_SUB 2
671 #define OPCODE_RSB 3
672 #define OPCODE_ADD 4
673 #define OPCODE_ADC 5
674 #define OPCODE_SBC 6
675 #define OPCODE_RSC 7
676 #define OPCODE_TST 8
677 #define OPCODE_TEQ 9
678 #define OPCODE_CMP 10
679 #define OPCODE_CMN 11
680 #define OPCODE_ORR 12
681 #define OPCODE_MOV 13
682 #define OPCODE_BIC 14
683 #define OPCODE_MVN 15
684
685 #define T2_OPCODE_AND 0
686 #define T2_OPCODE_BIC 1
687 #define T2_OPCODE_ORR 2
688 #define T2_OPCODE_ORN 3
689 #define T2_OPCODE_EOR 4
690 #define T2_OPCODE_ADD 8
691 #define T2_OPCODE_ADC 10
692 #define T2_OPCODE_SBC 11
693 #define T2_OPCODE_SUB 13
694 #define T2_OPCODE_RSB 14
695
696 #define T_OPCODE_MUL 0x4340
697 #define T_OPCODE_TST 0x4200
698 #define T_OPCODE_CMN 0x42c0
699 #define T_OPCODE_NEG 0x4240
700 #define T_OPCODE_MVN 0x43c0
701
702 #define T_OPCODE_ADD_R3 0x1800
703 #define T_OPCODE_SUB_R3 0x1a00
704 #define T_OPCODE_ADD_HI 0x4400
705 #define T_OPCODE_ADD_ST 0xb000
706 #define T_OPCODE_SUB_ST 0xb080
707 #define T_OPCODE_ADD_SP 0xa800
708 #define T_OPCODE_ADD_PC 0xa000
709 #define T_OPCODE_ADD_I8 0x3000
710 #define T_OPCODE_SUB_I8 0x3800
711 #define T_OPCODE_ADD_I3 0x1c00
712 #define T_OPCODE_SUB_I3 0x1e00
713
714 #define T_OPCODE_ASR_R 0x4100
715 #define T_OPCODE_LSL_R 0x4080
716 #define T_OPCODE_LSR_R 0x40c0
717 #define T_OPCODE_ROR_R 0x41c0
718 #define T_OPCODE_ASR_I 0x1000
719 #define T_OPCODE_LSL_I 0x0000
720 #define T_OPCODE_LSR_I 0x0800
721
722 #define T_OPCODE_MOV_I8 0x2000
723 #define T_OPCODE_CMP_I8 0x2800
724 #define T_OPCODE_CMP_LR 0x4280
725 #define T_OPCODE_MOV_HR 0x4600
726 #define T_OPCODE_CMP_HR 0x4500
727
728 #define T_OPCODE_LDR_PC 0x4800
729 #define T_OPCODE_LDR_SP 0x9800
730 #define T_OPCODE_STR_SP 0x9000
731 #define T_OPCODE_LDR_IW 0x6800
732 #define T_OPCODE_STR_IW 0x6000
733 #define T_OPCODE_LDR_IH 0x8800
734 #define T_OPCODE_STR_IH 0x8000
735 #define T_OPCODE_LDR_IB 0x7800
736 #define T_OPCODE_STR_IB 0x7000
737 #define T_OPCODE_LDR_RW 0x5800
738 #define T_OPCODE_STR_RW 0x5000
739 #define T_OPCODE_LDR_RH 0x5a00
740 #define T_OPCODE_STR_RH 0x5200
741 #define T_OPCODE_LDR_RB 0x5c00
742 #define T_OPCODE_STR_RB 0x5400
743
744 #define T_OPCODE_PUSH 0xb400
745 #define T_OPCODE_POP 0xbc00
746
747 #define T_OPCODE_BRANCH 0xe000
748
749 #define THUMB_SIZE 2 /* Size of thumb instruction. */
750 #define THUMB_PP_PC_LR 0x0100
751 #define THUMB_LOAD_BIT 0x0800
752 #define THUMB2_LOAD_BIT 0x00100000
753
754 #define BAD_ARGS _("bad arguments to instruction")
755 #define BAD_SP _("r13 not allowed here")
756 #define BAD_PC _("r15 not allowed here")
757 #define BAD_COND _("instruction cannot be conditional")
758 #define BAD_OVERLAP _("registers may not be the same")
759 #define BAD_HIREG _("lo register required")
760 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
761 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
762 #define BAD_BRANCH _("branch must be last instruction in IT block")
763 #define BAD_NOT_IT _("instruction not allowed in IT block")
764 #define BAD_FPU _("selected FPU does not support instruction")
765 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
766 #define BAD_IT_COND _("incorrect condition in IT block")
767 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
768 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
769 #define BAD_PC_ADDRESSING \
770 _("cannot use register index with PC-relative addressing")
771 #define BAD_PC_WRITEBACK \
772 _("cannot use writeback with PC-relative addressing")
773 #define BAD_RANGE _("branch out of range")
774 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
775
776 static struct hash_control * arm_ops_hsh;
777 static struct hash_control * arm_cond_hsh;
778 static struct hash_control * arm_shift_hsh;
779 static struct hash_control * arm_psr_hsh;
780 static struct hash_control * arm_v7m_psr_hsh;
781 static struct hash_control * arm_reg_hsh;
782 static struct hash_control * arm_reloc_hsh;
783 static struct hash_control * arm_barrier_opt_hsh;
784
785 /* Stuff needed to resolve the label ambiguity
786 As:
787 ...
788 label: <insn>
789 may differ from:
790 ...
791 label:
792 <insn> */
793
794 symbolS * last_label_seen;
795 static int label_is_thumb_function_name = FALSE;
796
797 /* Literal pool structure. Held on a per-section
798 and per-sub-section basis. */
799
800 #define MAX_LITERAL_POOL_SIZE 1024
801 typedef struct literal_pool
802 {
803 expressionS literals [MAX_LITERAL_POOL_SIZE];
804 unsigned int next_free_entry;
805 unsigned int id;
806 symbolS * symbol;
807 segT section;
808 subsegT sub_section;
809 #ifdef OBJ_ELF
810 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
811 #endif
812 struct literal_pool * next;
813 unsigned int alignment;
814 } literal_pool;
815
816 /* Pointer to a linked list of literal pools. */
817 literal_pool * list_of_pools = NULL;
818
819 typedef enum asmfunc_states
820 {
821 OUTSIDE_ASMFUNC,
822 WAITING_ASMFUNC_NAME,
823 WAITING_ENDASMFUNC
824 } asmfunc_states;
825
826 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
827
828 #ifdef OBJ_ELF
829 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
830 #else
831 static struct current_it now_it;
832 #endif
833
834 static inline int
835 now_it_compatible (int cond)
836 {
837 return (cond & ~1) == (now_it.cc & ~1);
838 }
839
840 static inline int
841 conditional_insn (void)
842 {
843 return inst.cond != COND_ALWAYS;
844 }
845
846 static int in_it_block (void);
847
848 static int handle_it_state (void);
849
850 static void force_automatic_it_block_close (void);
851
852 static void it_fsm_post_encode (void);
853
854 #define set_it_insn_type(type) \
855 do \
856 { \
857 inst.it_insn_type = type; \
858 if (handle_it_state () == FAIL) \
859 return; \
860 } \
861 while (0)
862
863 #define set_it_insn_type_nonvoid(type, failret) \
864 do \
865 { \
866 inst.it_insn_type = type; \
867 if (handle_it_state () == FAIL) \
868 return failret; \
869 } \
870 while(0)
871
872 #define set_it_insn_type_last() \
873 do \
874 { \
875 if (inst.cond == COND_ALWAYS) \
876 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
877 else \
878 set_it_insn_type (INSIDE_IT_LAST_INSN); \
879 } \
880 while (0)
881
882 /* Pure syntax. */
883
884 /* This array holds the chars that always start a comment. If the
885 pre-processor is disabled, these aren't very useful. */
886 char arm_comment_chars[] = "@";
887
888 /* This array holds the chars that only start a comment at the beginning of
889 a line. If the line seems to have the form '# 123 filename'
890 .line and .file directives will appear in the pre-processed output. */
891 /* Note that input_file.c hand checks for '#' at the beginning of the
892 first line of the input file. This is because the compiler outputs
893 #NO_APP at the beginning of its output. */
894 /* Also note that comments like this one will always work. */
895 const char line_comment_chars[] = "#";
896
897 char arm_line_separator_chars[] = ";";
898
899 /* Chars that can be used to separate mant
900 from exp in floating point numbers. */
901 const char EXP_CHARS[] = "eE";
902
903 /* Chars that mean this number is a floating point constant. */
904 /* As in 0f12.456 */
905 /* or 0d1.2345e12 */
906
907 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
908
909 /* Prefix characters that indicate the start of an immediate
910 value. */
911 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
912
913 /* Separator character handling. */
914
915 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
916
917 static inline int
918 skip_past_char (char ** str, char c)
919 {
920 /* PR gas/14987: Allow for whitespace before the expected character. */
921 skip_whitespace (*str);
922
923 if (**str == c)
924 {
925 (*str)++;
926 return SUCCESS;
927 }
928 else
929 return FAIL;
930 }
931
932 #define skip_past_comma(str) skip_past_char (str, ',')
933
934 /* Arithmetic expressions (possibly involving symbols). */
935
936 /* Return TRUE if anything in the expression is a bignum. */
937
938 static int
939 walk_no_bignums (symbolS * sp)
940 {
941 if (symbol_get_value_expression (sp)->X_op == O_big)
942 return 1;
943
944 if (symbol_get_value_expression (sp)->X_add_symbol)
945 {
946 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
947 || (symbol_get_value_expression (sp)->X_op_symbol
948 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
949 }
950
951 return 0;
952 }
953
954 static int in_my_get_expression = 0;
955
956 /* Third argument to my_get_expression. */
957 #define GE_NO_PREFIX 0
958 #define GE_IMM_PREFIX 1
959 #define GE_OPT_PREFIX 2
960 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
961 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
962 #define GE_OPT_PREFIX_BIG 3
963
964 static int
965 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
966 {
967 char * save_in;
968 segT seg;
969
970 /* In unified syntax, all prefixes are optional. */
971 if (unified_syntax)
972 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
973 : GE_OPT_PREFIX;
974
975 switch (prefix_mode)
976 {
977 case GE_NO_PREFIX: break;
978 case GE_IMM_PREFIX:
979 if (!is_immediate_prefix (**str))
980 {
981 inst.error = _("immediate expression requires a # prefix");
982 return FAIL;
983 }
984 (*str)++;
985 break;
986 case GE_OPT_PREFIX:
987 case GE_OPT_PREFIX_BIG:
988 if (is_immediate_prefix (**str))
989 (*str)++;
990 break;
991 default: abort ();
992 }
993
994 memset (ep, 0, sizeof (expressionS));
995
996 save_in = input_line_pointer;
997 input_line_pointer = *str;
998 in_my_get_expression = 1;
999 seg = expression (ep);
1000 in_my_get_expression = 0;
1001
1002 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1003 {
1004 /* We found a bad or missing expression in md_operand(). */
1005 *str = input_line_pointer;
1006 input_line_pointer = save_in;
1007 if (inst.error == NULL)
1008 inst.error = (ep->X_op == O_absent
1009 ? _("missing expression") :_("bad expression"));
1010 return 1;
1011 }
1012
1013 #ifdef OBJ_AOUT
1014 if (seg != absolute_section
1015 && seg != text_section
1016 && seg != data_section
1017 && seg != bss_section
1018 && seg != undefined_section)
1019 {
1020 inst.error = _("bad segment");
1021 *str = input_line_pointer;
1022 input_line_pointer = save_in;
1023 return 1;
1024 }
1025 #else
1026 (void) seg;
1027 #endif
1028
1029 /* Get rid of any bignums now, so that we don't generate an error for which
1030 we can't establish a line number later on. Big numbers are never valid
1031 in instructions, which is where this routine is always called. */
1032 if (prefix_mode != GE_OPT_PREFIX_BIG
1033 && (ep->X_op == O_big
1034 || (ep->X_add_symbol
1035 && (walk_no_bignums (ep->X_add_symbol)
1036 || (ep->X_op_symbol
1037 && walk_no_bignums (ep->X_op_symbol))))))
1038 {
1039 inst.error = _("invalid constant");
1040 *str = input_line_pointer;
1041 input_line_pointer = save_in;
1042 return 1;
1043 }
1044
1045 *str = input_line_pointer;
1046 input_line_pointer = save_in;
1047 return 0;
1048 }
1049
1050 /* Turn a string in input_line_pointer into a floating point constant
1051 of type TYPE, and store the appropriate bytes in *LITP. The number
1052 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1053 returned, or NULL on OK.
1054
1055 Note that fp constants aren't represent in the normal way on the ARM.
1056 In big endian mode, things are as expected. However, in little endian
1057 mode fp constants are big-endian word-wise, and little-endian byte-wise
1058 within the words. For example, (double) 1.1 in big endian mode is
1059 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1060 the byte sequence 99 99 f1 3f 9a 99 99 99.
1061
1062 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1063
1064 char *
1065 md_atof (int type, char * litP, int * sizeP)
1066 {
1067 int prec;
1068 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1069 char *t;
1070 int i;
1071
1072 switch (type)
1073 {
1074 case 'f':
1075 case 'F':
1076 case 's':
1077 case 'S':
1078 prec = 2;
1079 break;
1080
1081 case 'd':
1082 case 'D':
1083 case 'r':
1084 case 'R':
1085 prec = 4;
1086 break;
1087
1088 case 'x':
1089 case 'X':
1090 prec = 5;
1091 break;
1092
1093 case 'p':
1094 case 'P':
1095 prec = 5;
1096 break;
1097
1098 default:
1099 *sizeP = 0;
1100 return _("Unrecognized or unsupported floating point constant");
1101 }
1102
1103 t = atof_ieee (input_line_pointer, type, words);
1104 if (t)
1105 input_line_pointer = t;
1106 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1107
1108 if (target_big_endian)
1109 {
1110 for (i = 0; i < prec; i++)
1111 {
1112 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1113 litP += sizeof (LITTLENUM_TYPE);
1114 }
1115 }
1116 else
1117 {
1118 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1119 for (i = prec - 1; i >= 0; i--)
1120 {
1121 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1122 litP += sizeof (LITTLENUM_TYPE);
1123 }
1124 else
1125 /* For a 4 byte float the order of elements in `words' is 1 0.
1126 For an 8 byte float the order is 1 0 3 2. */
1127 for (i = 0; i < prec; i += 2)
1128 {
1129 md_number_to_chars (litP, (valueT) words[i + 1],
1130 sizeof (LITTLENUM_TYPE));
1131 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1132 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1133 litP += 2 * sizeof (LITTLENUM_TYPE);
1134 }
1135 }
1136
1137 return NULL;
1138 }
1139
1140 /* We handle all bad expressions here, so that we can report the faulty
1141 instruction in the error message. */
1142 void
1143 md_operand (expressionS * exp)
1144 {
1145 if (in_my_get_expression)
1146 exp->X_op = O_illegal;
1147 }
1148
1149 /* Immediate values. */
1150
1151 /* Generic immediate-value read function for use in directives.
1152 Accepts anything that 'expression' can fold to a constant.
1153 *val receives the number. */
1154 #ifdef OBJ_ELF
1155 static int
1156 immediate_for_directive (int *val)
1157 {
1158 expressionS exp;
1159 exp.X_op = O_illegal;
1160
1161 if (is_immediate_prefix (*input_line_pointer))
1162 {
1163 input_line_pointer++;
1164 expression (&exp);
1165 }
1166
1167 if (exp.X_op != O_constant)
1168 {
1169 as_bad (_("expected #constant"));
1170 ignore_rest_of_line ();
1171 return FAIL;
1172 }
1173 *val = exp.X_add_number;
1174 return SUCCESS;
1175 }
1176 #endif
1177
1178 /* Register parsing. */
1179
1180 /* Generic register parser. CCP points to what should be the
1181 beginning of a register name. If it is indeed a valid register
1182 name, advance CCP over it and return the reg_entry structure;
1183 otherwise return NULL. Does not issue diagnostics. */
1184
1185 static struct reg_entry *
1186 arm_reg_parse_multi (char **ccp)
1187 {
1188 char *start = *ccp;
1189 char *p;
1190 struct reg_entry *reg;
1191
1192 skip_whitespace (start);
1193
1194 #ifdef REGISTER_PREFIX
1195 if (*start != REGISTER_PREFIX)
1196 return NULL;
1197 start++;
1198 #endif
1199 #ifdef OPTIONAL_REGISTER_PREFIX
1200 if (*start == OPTIONAL_REGISTER_PREFIX)
1201 start++;
1202 #endif
1203
1204 p = start;
1205 if (!ISALPHA (*p) || !is_name_beginner (*p))
1206 return NULL;
1207
1208 do
1209 p++;
1210 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1211
1212 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1213
1214 if (!reg)
1215 return NULL;
1216
1217 *ccp = p;
1218 return reg;
1219 }
1220
1221 static int
1222 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1223 enum arm_reg_type type)
1224 {
1225 /* Alternative syntaxes are accepted for a few register classes. */
1226 switch (type)
1227 {
1228 case REG_TYPE_MVF:
1229 case REG_TYPE_MVD:
1230 case REG_TYPE_MVFX:
1231 case REG_TYPE_MVDX:
1232 /* Generic coprocessor register names are allowed for these. */
1233 if (reg && reg->type == REG_TYPE_CN)
1234 return reg->number;
1235 break;
1236
1237 case REG_TYPE_CP:
1238 /* For backward compatibility, a bare number is valid here. */
1239 {
1240 unsigned long processor = strtoul (start, ccp, 10);
1241 if (*ccp != start && processor <= 15)
1242 return processor;
1243 }
1244
1245 case REG_TYPE_MMXWC:
1246 /* WC includes WCG. ??? I'm not sure this is true for all
1247 instructions that take WC registers. */
1248 if (reg && reg->type == REG_TYPE_MMXWCG)
1249 return reg->number;
1250 break;
1251
1252 default:
1253 break;
1254 }
1255
1256 return FAIL;
1257 }
1258
1259 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1260 return value is the register number or FAIL. */
1261
1262 static int
1263 arm_reg_parse (char **ccp, enum arm_reg_type type)
1264 {
1265 char *start = *ccp;
1266 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1267 int ret;
1268
1269 /* Do not allow a scalar (reg+index) to parse as a register. */
1270 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1271 return FAIL;
1272
1273 if (reg && reg->type == type)
1274 return reg->number;
1275
1276 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1277 return ret;
1278
1279 *ccp = start;
1280 return FAIL;
1281 }
1282
1283 /* Parse a Neon type specifier. *STR should point at the leading '.'
1284 character. Does no verification at this stage that the type fits the opcode
1285 properly. E.g.,
1286
1287 .i32.i32.s16
1288 .s32.f32
1289 .u16
1290
1291 Can all be legally parsed by this function.
1292
1293 Fills in neon_type struct pointer with parsed information, and updates STR
1294 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1295 type, FAIL if not. */
1296
1297 static int
1298 parse_neon_type (struct neon_type *type, char **str)
1299 {
1300 char *ptr = *str;
1301
1302 if (type)
1303 type->elems = 0;
1304
1305 while (type->elems < NEON_MAX_TYPE_ELS)
1306 {
1307 enum neon_el_type thistype = NT_untyped;
1308 unsigned thissize = -1u;
1309
1310 if (*ptr != '.')
1311 break;
1312
1313 ptr++;
1314
1315 /* Just a size without an explicit type. */
1316 if (ISDIGIT (*ptr))
1317 goto parsesize;
1318
1319 switch (TOLOWER (*ptr))
1320 {
1321 case 'i': thistype = NT_integer; break;
1322 case 'f': thistype = NT_float; break;
1323 case 'p': thistype = NT_poly; break;
1324 case 's': thistype = NT_signed; break;
1325 case 'u': thistype = NT_unsigned; break;
1326 case 'd':
1327 thistype = NT_float;
1328 thissize = 64;
1329 ptr++;
1330 goto done;
1331 default:
1332 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1333 return FAIL;
1334 }
1335
1336 ptr++;
1337
1338 /* .f is an abbreviation for .f32. */
1339 if (thistype == NT_float && !ISDIGIT (*ptr))
1340 thissize = 32;
1341 else
1342 {
1343 parsesize:
1344 thissize = strtoul (ptr, &ptr, 10);
1345
1346 if (thissize != 8 && thissize != 16 && thissize != 32
1347 && thissize != 64)
1348 {
1349 as_bad (_("bad size %d in type specifier"), thissize);
1350 return FAIL;
1351 }
1352 }
1353
1354 done:
1355 if (type)
1356 {
1357 type->el[type->elems].type = thistype;
1358 type->el[type->elems].size = thissize;
1359 type->elems++;
1360 }
1361 }
1362
1363 /* Empty/missing type is not a successful parse. */
1364 if (type->elems == 0)
1365 return FAIL;
1366
1367 *str = ptr;
1368
1369 return SUCCESS;
1370 }
1371
1372 /* Errors may be set multiple times during parsing or bit encoding
1373 (particularly in the Neon bits), but usually the earliest error which is set
1374 will be the most meaningful. Avoid overwriting it with later (cascading)
1375 errors by calling this function. */
1376
1377 static void
1378 first_error (const char *err)
1379 {
1380 if (!inst.error)
1381 inst.error = err;
1382 }
1383
1384 /* Parse a single type, e.g. ".s32", leading period included. */
1385 static int
1386 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1387 {
1388 char *str = *ccp;
1389 struct neon_type optype;
1390
1391 if (*str == '.')
1392 {
1393 if (parse_neon_type (&optype, &str) == SUCCESS)
1394 {
1395 if (optype.elems == 1)
1396 *vectype = optype.el[0];
1397 else
1398 {
1399 first_error (_("only one type should be specified for operand"));
1400 return FAIL;
1401 }
1402 }
1403 else
1404 {
1405 first_error (_("vector type expected"));
1406 return FAIL;
1407 }
1408 }
1409 else
1410 return FAIL;
1411
1412 *ccp = str;
1413
1414 return SUCCESS;
1415 }
1416
1417 /* Special meanings for indices (which have a range of 0-7), which will fit into
1418 a 4-bit integer. */
1419
1420 #define NEON_ALL_LANES 15
1421 #define NEON_INTERLEAVE_LANES 14
1422
1423 /* Parse either a register or a scalar, with an optional type. Return the
1424 register number, and optionally fill in the actual type of the register
1425 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1426 type/index information in *TYPEINFO. */
1427
1428 static int
1429 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1430 enum arm_reg_type *rtype,
1431 struct neon_typed_alias *typeinfo)
1432 {
1433 char *str = *ccp;
1434 struct reg_entry *reg = arm_reg_parse_multi (&str);
1435 struct neon_typed_alias atype;
1436 struct neon_type_el parsetype;
1437
1438 atype.defined = 0;
1439 atype.index = -1;
1440 atype.eltype.type = NT_invtype;
1441 atype.eltype.size = -1;
1442
1443 /* Try alternate syntax for some types of register. Note these are mutually
1444 exclusive with the Neon syntax extensions. */
1445 if (reg == NULL)
1446 {
1447 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1448 if (altreg != FAIL)
1449 *ccp = str;
1450 if (typeinfo)
1451 *typeinfo = atype;
1452 return altreg;
1453 }
1454
1455 /* Undo polymorphism when a set of register types may be accepted. */
1456 if ((type == REG_TYPE_NDQ
1457 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1458 || (type == REG_TYPE_VFSD
1459 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1460 || (type == REG_TYPE_NSDQ
1461 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1462 || reg->type == REG_TYPE_NQ))
1463 || (type == REG_TYPE_MMXWC
1464 && (reg->type == REG_TYPE_MMXWCG)))
1465 type = (enum arm_reg_type) reg->type;
1466
1467 if (type != reg->type)
1468 return FAIL;
1469
1470 if (reg->neon)
1471 atype = *reg->neon;
1472
1473 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1474 {
1475 if ((atype.defined & NTA_HASTYPE) != 0)
1476 {
1477 first_error (_("can't redefine type for operand"));
1478 return FAIL;
1479 }
1480 atype.defined |= NTA_HASTYPE;
1481 atype.eltype = parsetype;
1482 }
1483
1484 if (skip_past_char (&str, '[') == SUCCESS)
1485 {
1486 if (type != REG_TYPE_VFD)
1487 {
1488 first_error (_("only D registers may be indexed"));
1489 return FAIL;
1490 }
1491
1492 if ((atype.defined & NTA_HASINDEX) != 0)
1493 {
1494 first_error (_("can't change index for operand"));
1495 return FAIL;
1496 }
1497
1498 atype.defined |= NTA_HASINDEX;
1499
1500 if (skip_past_char (&str, ']') == SUCCESS)
1501 atype.index = NEON_ALL_LANES;
1502 else
1503 {
1504 expressionS exp;
1505
1506 my_get_expression (&exp, &str, GE_NO_PREFIX);
1507
1508 if (exp.X_op != O_constant)
1509 {
1510 first_error (_("constant expression required"));
1511 return FAIL;
1512 }
1513
1514 if (skip_past_char (&str, ']') == FAIL)
1515 return FAIL;
1516
1517 atype.index = exp.X_add_number;
1518 }
1519 }
1520
1521 if (typeinfo)
1522 *typeinfo = atype;
1523
1524 if (rtype)
1525 *rtype = type;
1526
1527 *ccp = str;
1528
1529 return reg->number;
1530 }
1531
1532 /* Like arm_reg_parse, but allow allow the following extra features:
1533 - If RTYPE is non-zero, return the (possibly restricted) type of the
1534 register (e.g. Neon double or quad reg when either has been requested).
1535 - If this is a Neon vector type with additional type information, fill
1536 in the struct pointed to by VECTYPE (if non-NULL).
1537 This function will fault on encountering a scalar. */
1538
1539 static int
1540 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1541 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1542 {
1543 struct neon_typed_alias atype;
1544 char *str = *ccp;
1545 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1546
1547 if (reg == FAIL)
1548 return FAIL;
1549
1550 /* Do not allow regname(... to parse as a register. */
1551 if (*str == '(')
1552 return FAIL;
1553
1554 /* Do not allow a scalar (reg+index) to parse as a register. */
1555 if ((atype.defined & NTA_HASINDEX) != 0)
1556 {
1557 first_error (_("register operand expected, but got scalar"));
1558 return FAIL;
1559 }
1560
1561 if (vectype)
1562 *vectype = atype.eltype;
1563
1564 *ccp = str;
1565
1566 return reg;
1567 }
1568
1569 #define NEON_SCALAR_REG(X) ((X) >> 4)
1570 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1571
1572 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1573 have enough information to be able to do a good job bounds-checking. So, we
1574 just do easy checks here, and do further checks later. */
1575
1576 static int
1577 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1578 {
1579 int reg;
1580 char *str = *ccp;
1581 struct neon_typed_alias atype;
1582
1583 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1584
1585 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1586 return FAIL;
1587
1588 if (atype.index == NEON_ALL_LANES)
1589 {
1590 first_error (_("scalar must have an index"));
1591 return FAIL;
1592 }
1593 else if (atype.index >= 64 / elsize)
1594 {
1595 first_error (_("scalar index out of range"));
1596 return FAIL;
1597 }
1598
1599 if (type)
1600 *type = atype.eltype;
1601
1602 *ccp = str;
1603
1604 return reg * 16 + atype.index;
1605 }
1606
1607 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1608
1609 static long
1610 parse_reg_list (char ** strp)
1611 {
1612 char * str = * strp;
1613 long range = 0;
1614 int another_range;
1615
1616 /* We come back here if we get ranges concatenated by '+' or '|'. */
1617 do
1618 {
1619 skip_whitespace (str);
1620
1621 another_range = 0;
1622
1623 if (*str == '{')
1624 {
1625 int in_range = 0;
1626 int cur_reg = -1;
1627
1628 str++;
1629 do
1630 {
1631 int reg;
1632
1633 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1634 {
1635 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1636 return FAIL;
1637 }
1638
1639 if (in_range)
1640 {
1641 int i;
1642
1643 if (reg <= cur_reg)
1644 {
1645 first_error (_("bad range in register list"));
1646 return FAIL;
1647 }
1648
1649 for (i = cur_reg + 1; i < reg; i++)
1650 {
1651 if (range & (1 << i))
1652 as_tsktsk
1653 (_("Warning: duplicated register (r%d) in register list"),
1654 i);
1655 else
1656 range |= 1 << i;
1657 }
1658 in_range = 0;
1659 }
1660
1661 if (range & (1 << reg))
1662 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1663 reg);
1664 else if (reg <= cur_reg)
1665 as_tsktsk (_("Warning: register range not in ascending order"));
1666
1667 range |= 1 << reg;
1668 cur_reg = reg;
1669 }
1670 while (skip_past_comma (&str) != FAIL
1671 || (in_range = 1, *str++ == '-'));
1672 str--;
1673
1674 if (skip_past_char (&str, '}') == FAIL)
1675 {
1676 first_error (_("missing `}'"));
1677 return FAIL;
1678 }
1679 }
1680 else
1681 {
1682 expressionS exp;
1683
1684 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1685 return FAIL;
1686
1687 if (exp.X_op == O_constant)
1688 {
1689 if (exp.X_add_number
1690 != (exp.X_add_number & 0x0000ffff))
1691 {
1692 inst.error = _("invalid register mask");
1693 return FAIL;
1694 }
1695
1696 if ((range & exp.X_add_number) != 0)
1697 {
1698 int regno = range & exp.X_add_number;
1699
1700 regno &= -regno;
1701 regno = (1 << regno) - 1;
1702 as_tsktsk
1703 (_("Warning: duplicated register (r%d) in register list"),
1704 regno);
1705 }
1706
1707 range |= exp.X_add_number;
1708 }
1709 else
1710 {
1711 if (inst.reloc.type != 0)
1712 {
1713 inst.error = _("expression too complex");
1714 return FAIL;
1715 }
1716
1717 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1718 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1719 inst.reloc.pc_rel = 0;
1720 }
1721 }
1722
1723 if (*str == '|' || *str == '+')
1724 {
1725 str++;
1726 another_range = 1;
1727 }
1728 }
1729 while (another_range);
1730
1731 *strp = str;
1732 return range;
1733 }
1734
1735 /* Types of registers in a list. */
1736
1737 enum reg_list_els
1738 {
1739 REGLIST_VFP_S,
1740 REGLIST_VFP_D,
1741 REGLIST_NEON_D
1742 };
1743
1744 /* Parse a VFP register list. If the string is invalid return FAIL.
1745 Otherwise return the number of registers, and set PBASE to the first
1746 register. Parses registers of type ETYPE.
1747 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1748 - Q registers can be used to specify pairs of D registers
1749 - { } can be omitted from around a singleton register list
1750 FIXME: This is not implemented, as it would require backtracking in
1751 some cases, e.g.:
1752 vtbl.8 d3,d4,d5
1753 This could be done (the meaning isn't really ambiguous), but doesn't
1754 fit in well with the current parsing framework.
1755 - 32 D registers may be used (also true for VFPv3).
1756 FIXME: Types are ignored in these register lists, which is probably a
1757 bug. */
1758
1759 static int
1760 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1761 {
1762 char *str = *ccp;
1763 int base_reg;
1764 int new_base;
1765 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1766 int max_regs = 0;
1767 int count = 0;
1768 int warned = 0;
1769 unsigned long mask = 0;
1770 int i;
1771
1772 if (skip_past_char (&str, '{') == FAIL)
1773 {
1774 inst.error = _("expecting {");
1775 return FAIL;
1776 }
1777
1778 switch (etype)
1779 {
1780 case REGLIST_VFP_S:
1781 regtype = REG_TYPE_VFS;
1782 max_regs = 32;
1783 break;
1784
1785 case REGLIST_VFP_D:
1786 regtype = REG_TYPE_VFD;
1787 break;
1788
1789 case REGLIST_NEON_D:
1790 regtype = REG_TYPE_NDQ;
1791 break;
1792 }
1793
1794 if (etype != REGLIST_VFP_S)
1795 {
1796 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1797 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1798 {
1799 max_regs = 32;
1800 if (thumb_mode)
1801 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1802 fpu_vfp_ext_d32);
1803 else
1804 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1805 fpu_vfp_ext_d32);
1806 }
1807 else
1808 max_regs = 16;
1809 }
1810
1811 base_reg = max_regs;
1812
1813 do
1814 {
1815 int setmask = 1, addregs = 1;
1816
1817 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1818
1819 if (new_base == FAIL)
1820 {
1821 first_error (_(reg_expected_msgs[regtype]));
1822 return FAIL;
1823 }
1824
1825 if (new_base >= max_regs)
1826 {
1827 first_error (_("register out of range in list"));
1828 return FAIL;
1829 }
1830
1831 /* Note: a value of 2 * n is returned for the register Q<n>. */
1832 if (regtype == REG_TYPE_NQ)
1833 {
1834 setmask = 3;
1835 addregs = 2;
1836 }
1837
1838 if (new_base < base_reg)
1839 base_reg = new_base;
1840
1841 if (mask & (setmask << new_base))
1842 {
1843 first_error (_("invalid register list"));
1844 return FAIL;
1845 }
1846
1847 if ((mask >> new_base) != 0 && ! warned)
1848 {
1849 as_tsktsk (_("register list not in ascending order"));
1850 warned = 1;
1851 }
1852
1853 mask |= setmask << new_base;
1854 count += addregs;
1855
1856 if (*str == '-') /* We have the start of a range expression */
1857 {
1858 int high_range;
1859
1860 str++;
1861
1862 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1863 == FAIL)
1864 {
1865 inst.error = gettext (reg_expected_msgs[regtype]);
1866 return FAIL;
1867 }
1868
1869 if (high_range >= max_regs)
1870 {
1871 first_error (_("register out of range in list"));
1872 return FAIL;
1873 }
1874
1875 if (regtype == REG_TYPE_NQ)
1876 high_range = high_range + 1;
1877
1878 if (high_range <= new_base)
1879 {
1880 inst.error = _("register range not in ascending order");
1881 return FAIL;
1882 }
1883
1884 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1885 {
1886 if (mask & (setmask << new_base))
1887 {
1888 inst.error = _("invalid register list");
1889 return FAIL;
1890 }
1891
1892 mask |= setmask << new_base;
1893 count += addregs;
1894 }
1895 }
1896 }
1897 while (skip_past_comma (&str) != FAIL);
1898
1899 str++;
1900
1901 /* Sanity check -- should have raised a parse error above. */
1902 if (count == 0 || count > max_regs)
1903 abort ();
1904
1905 *pbase = base_reg;
1906
1907 /* Final test -- the registers must be consecutive. */
1908 mask >>= base_reg;
1909 for (i = 0; i < count; i++)
1910 {
1911 if ((mask & (1u << i)) == 0)
1912 {
1913 inst.error = _("non-contiguous register range");
1914 return FAIL;
1915 }
1916 }
1917
1918 *ccp = str;
1919
1920 return count;
1921 }
1922
1923 /* True if two alias types are the same. */
1924
1925 static bfd_boolean
1926 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1927 {
1928 if (!a && !b)
1929 return TRUE;
1930
1931 if (!a || !b)
1932 return FALSE;
1933
1934 if (a->defined != b->defined)
1935 return FALSE;
1936
1937 if ((a->defined & NTA_HASTYPE) != 0
1938 && (a->eltype.type != b->eltype.type
1939 || a->eltype.size != b->eltype.size))
1940 return FALSE;
1941
1942 if ((a->defined & NTA_HASINDEX) != 0
1943 && (a->index != b->index))
1944 return FALSE;
1945
1946 return TRUE;
1947 }
1948
1949 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1950 The base register is put in *PBASE.
1951 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1952 the return value.
1953 The register stride (minus one) is put in bit 4 of the return value.
1954 Bits [6:5] encode the list length (minus one).
1955 The type of the list elements is put in *ELTYPE, if non-NULL. */
1956
1957 #define NEON_LANE(X) ((X) & 0xf)
1958 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1959 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1960
1961 static int
1962 parse_neon_el_struct_list (char **str, unsigned *pbase,
1963 struct neon_type_el *eltype)
1964 {
1965 char *ptr = *str;
1966 int base_reg = -1;
1967 int reg_incr = -1;
1968 int count = 0;
1969 int lane = -1;
1970 int leading_brace = 0;
1971 enum arm_reg_type rtype = REG_TYPE_NDQ;
1972 const char *const incr_error = _("register stride must be 1 or 2");
1973 const char *const type_error = _("mismatched element/structure types in list");
1974 struct neon_typed_alias firsttype;
1975
1976 if (skip_past_char (&ptr, '{') == SUCCESS)
1977 leading_brace = 1;
1978
1979 do
1980 {
1981 struct neon_typed_alias atype;
1982 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1983
1984 if (getreg == FAIL)
1985 {
1986 first_error (_(reg_expected_msgs[rtype]));
1987 return FAIL;
1988 }
1989
1990 if (base_reg == -1)
1991 {
1992 base_reg = getreg;
1993 if (rtype == REG_TYPE_NQ)
1994 {
1995 reg_incr = 1;
1996 }
1997 firsttype = atype;
1998 }
1999 else if (reg_incr == -1)
2000 {
2001 reg_incr = getreg - base_reg;
2002 if (reg_incr < 1 || reg_incr > 2)
2003 {
2004 first_error (_(incr_error));
2005 return FAIL;
2006 }
2007 }
2008 else if (getreg != base_reg + reg_incr * count)
2009 {
2010 first_error (_(incr_error));
2011 return FAIL;
2012 }
2013
2014 if (! neon_alias_types_same (&atype, &firsttype))
2015 {
2016 first_error (_(type_error));
2017 return FAIL;
2018 }
2019
2020 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2021 modes. */
2022 if (ptr[0] == '-')
2023 {
2024 struct neon_typed_alias htype;
2025 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2026 if (lane == -1)
2027 lane = NEON_INTERLEAVE_LANES;
2028 else if (lane != NEON_INTERLEAVE_LANES)
2029 {
2030 first_error (_(type_error));
2031 return FAIL;
2032 }
2033 if (reg_incr == -1)
2034 reg_incr = 1;
2035 else if (reg_incr != 1)
2036 {
2037 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2038 return FAIL;
2039 }
2040 ptr++;
2041 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2042 if (hireg == FAIL)
2043 {
2044 first_error (_(reg_expected_msgs[rtype]));
2045 return FAIL;
2046 }
2047 if (! neon_alias_types_same (&htype, &firsttype))
2048 {
2049 first_error (_(type_error));
2050 return FAIL;
2051 }
2052 count += hireg + dregs - getreg;
2053 continue;
2054 }
2055
2056 /* If we're using Q registers, we can't use [] or [n] syntax. */
2057 if (rtype == REG_TYPE_NQ)
2058 {
2059 count += 2;
2060 continue;
2061 }
2062
2063 if ((atype.defined & NTA_HASINDEX) != 0)
2064 {
2065 if (lane == -1)
2066 lane = atype.index;
2067 else if (lane != atype.index)
2068 {
2069 first_error (_(type_error));
2070 return FAIL;
2071 }
2072 }
2073 else if (lane == -1)
2074 lane = NEON_INTERLEAVE_LANES;
2075 else if (lane != NEON_INTERLEAVE_LANES)
2076 {
2077 first_error (_(type_error));
2078 return FAIL;
2079 }
2080 count++;
2081 }
2082 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2083
2084 /* No lane set by [x]. We must be interleaving structures. */
2085 if (lane == -1)
2086 lane = NEON_INTERLEAVE_LANES;
2087
2088 /* Sanity check. */
2089 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2090 || (count > 1 && reg_incr == -1))
2091 {
2092 first_error (_("error parsing element/structure list"));
2093 return FAIL;
2094 }
2095
2096 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2097 {
2098 first_error (_("expected }"));
2099 return FAIL;
2100 }
2101
2102 if (reg_incr == -1)
2103 reg_incr = 1;
2104
2105 if (eltype)
2106 *eltype = firsttype.eltype;
2107
2108 *pbase = base_reg;
2109 *str = ptr;
2110
2111 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2112 }
2113
2114 /* Parse an explicit relocation suffix on an expression. This is
2115 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2116 arm_reloc_hsh contains no entries, so this function can only
2117 succeed if there is no () after the word. Returns -1 on error,
2118 BFD_RELOC_UNUSED if there wasn't any suffix. */
2119
2120 static int
2121 parse_reloc (char **str)
2122 {
2123 struct reloc_entry *r;
2124 char *p, *q;
2125
2126 if (**str != '(')
2127 return BFD_RELOC_UNUSED;
2128
2129 p = *str + 1;
2130 q = p;
2131
2132 while (*q && *q != ')' && *q != ',')
2133 q++;
2134 if (*q != ')')
2135 return -1;
2136
2137 if ((r = (struct reloc_entry *)
2138 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2139 return -1;
2140
2141 *str = q + 1;
2142 return r->reloc;
2143 }
2144
2145 /* Directives: register aliases. */
2146
2147 static struct reg_entry *
2148 insert_reg_alias (char *str, unsigned number, int type)
2149 {
2150 struct reg_entry *new_reg;
2151 const char *name;
2152
2153 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2154 {
2155 if (new_reg->builtin)
2156 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2157
2158 /* Only warn about a redefinition if it's not defined as the
2159 same register. */
2160 else if (new_reg->number != number || new_reg->type != type)
2161 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2162
2163 return NULL;
2164 }
2165
2166 name = xstrdup (str);
2167 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2168
2169 new_reg->name = name;
2170 new_reg->number = number;
2171 new_reg->type = type;
2172 new_reg->builtin = FALSE;
2173 new_reg->neon = NULL;
2174
2175 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2176 abort ();
2177
2178 return new_reg;
2179 }
2180
2181 static void
2182 insert_neon_reg_alias (char *str, int number, int type,
2183 struct neon_typed_alias *atype)
2184 {
2185 struct reg_entry *reg = insert_reg_alias (str, number, type);
2186
2187 if (!reg)
2188 {
2189 first_error (_("attempt to redefine typed alias"));
2190 return;
2191 }
2192
2193 if (atype)
2194 {
2195 reg->neon = (struct neon_typed_alias *)
2196 xmalloc (sizeof (struct neon_typed_alias));
2197 *reg->neon = *atype;
2198 }
2199 }
2200
2201 /* Look for the .req directive. This is of the form:
2202
2203 new_register_name .req existing_register_name
2204
2205 If we find one, or if it looks sufficiently like one that we want to
2206 handle any error here, return TRUE. Otherwise return FALSE. */
2207
2208 static bfd_boolean
2209 create_register_alias (char * newname, char *p)
2210 {
2211 struct reg_entry *old;
2212 char *oldname, *nbuf;
2213 size_t nlen;
2214
2215 /* The input scrubber ensures that whitespace after the mnemonic is
2216 collapsed to single spaces. */
2217 oldname = p;
2218 if (strncmp (oldname, " .req ", 6) != 0)
2219 return FALSE;
2220
2221 oldname += 6;
2222 if (*oldname == '\0')
2223 return FALSE;
2224
2225 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2226 if (!old)
2227 {
2228 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2229 return TRUE;
2230 }
2231
2232 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2233 the desired alias name, and p points to its end. If not, then
2234 the desired alias name is in the global original_case_string. */
2235 #ifdef TC_CASE_SENSITIVE
2236 nlen = p - newname;
2237 #else
2238 newname = original_case_string;
2239 nlen = strlen (newname);
2240 #endif
2241
2242 nbuf = (char *) alloca (nlen + 1);
2243 memcpy (nbuf, newname, nlen);
2244 nbuf[nlen] = '\0';
2245
2246 /* Create aliases under the new name as stated; an all-lowercase
2247 version of the new name; and an all-uppercase version of the new
2248 name. */
2249 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2250 {
2251 for (p = nbuf; *p; p++)
2252 *p = TOUPPER (*p);
2253
2254 if (strncmp (nbuf, newname, nlen))
2255 {
2256 /* If this attempt to create an additional alias fails, do not bother
2257 trying to create the all-lower case alias. We will fail and issue
2258 a second, duplicate error message. This situation arises when the
2259 programmer does something like:
2260 foo .req r0
2261 Foo .req r1
2262 The second .req creates the "Foo" alias but then fails to create
2263 the artificial FOO alias because it has already been created by the
2264 first .req. */
2265 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2266 return TRUE;
2267 }
2268
2269 for (p = nbuf; *p; p++)
2270 *p = TOLOWER (*p);
2271
2272 if (strncmp (nbuf, newname, nlen))
2273 insert_reg_alias (nbuf, old->number, old->type);
2274 }
2275
2276 return TRUE;
2277 }
2278
2279 /* Create a Neon typed/indexed register alias using directives, e.g.:
2280 X .dn d5.s32[1]
2281 Y .qn 6.s16
2282 Z .dn d7
2283 T .dn Z[0]
2284 These typed registers can be used instead of the types specified after the
2285 Neon mnemonic, so long as all operands given have types. Types can also be
2286 specified directly, e.g.:
2287 vadd d0.s32, d1.s32, d2.s32 */
2288
2289 static bfd_boolean
2290 create_neon_reg_alias (char *newname, char *p)
2291 {
2292 enum arm_reg_type basetype;
2293 struct reg_entry *basereg;
2294 struct reg_entry mybasereg;
2295 struct neon_type ntype;
2296 struct neon_typed_alias typeinfo;
2297 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2298 int namelen;
2299
2300 typeinfo.defined = 0;
2301 typeinfo.eltype.type = NT_invtype;
2302 typeinfo.eltype.size = -1;
2303 typeinfo.index = -1;
2304
2305 nameend = p;
2306
2307 if (strncmp (p, " .dn ", 5) == 0)
2308 basetype = REG_TYPE_VFD;
2309 else if (strncmp (p, " .qn ", 5) == 0)
2310 basetype = REG_TYPE_NQ;
2311 else
2312 return FALSE;
2313
2314 p += 5;
2315
2316 if (*p == '\0')
2317 return FALSE;
2318
2319 basereg = arm_reg_parse_multi (&p);
2320
2321 if (basereg && basereg->type != basetype)
2322 {
2323 as_bad (_("bad type for register"));
2324 return FALSE;
2325 }
2326
2327 if (basereg == NULL)
2328 {
2329 expressionS exp;
2330 /* Try parsing as an integer. */
2331 my_get_expression (&exp, &p, GE_NO_PREFIX);
2332 if (exp.X_op != O_constant)
2333 {
2334 as_bad (_("expression must be constant"));
2335 return FALSE;
2336 }
2337 basereg = &mybasereg;
2338 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2339 : exp.X_add_number;
2340 basereg->neon = 0;
2341 }
2342
2343 if (basereg->neon)
2344 typeinfo = *basereg->neon;
2345
2346 if (parse_neon_type (&ntype, &p) == SUCCESS)
2347 {
2348 /* We got a type. */
2349 if (typeinfo.defined & NTA_HASTYPE)
2350 {
2351 as_bad (_("can't redefine the type of a register alias"));
2352 return FALSE;
2353 }
2354
2355 typeinfo.defined |= NTA_HASTYPE;
2356 if (ntype.elems != 1)
2357 {
2358 as_bad (_("you must specify a single type only"));
2359 return FALSE;
2360 }
2361 typeinfo.eltype = ntype.el[0];
2362 }
2363
2364 if (skip_past_char (&p, '[') == SUCCESS)
2365 {
2366 expressionS exp;
2367 /* We got a scalar index. */
2368
2369 if (typeinfo.defined & NTA_HASINDEX)
2370 {
2371 as_bad (_("can't redefine the index of a scalar alias"));
2372 return FALSE;
2373 }
2374
2375 my_get_expression (&exp, &p, GE_NO_PREFIX);
2376
2377 if (exp.X_op != O_constant)
2378 {
2379 as_bad (_("scalar index must be constant"));
2380 return FALSE;
2381 }
2382
2383 typeinfo.defined |= NTA_HASINDEX;
2384 typeinfo.index = exp.X_add_number;
2385
2386 if (skip_past_char (&p, ']') == FAIL)
2387 {
2388 as_bad (_("expecting ]"));
2389 return FALSE;
2390 }
2391 }
2392
2393 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2394 the desired alias name, and p points to its end. If not, then
2395 the desired alias name is in the global original_case_string. */
2396 #ifdef TC_CASE_SENSITIVE
2397 namelen = nameend - newname;
2398 #else
2399 newname = original_case_string;
2400 namelen = strlen (newname);
2401 #endif
2402
2403 namebuf = (char *) alloca (namelen + 1);
2404 strncpy (namebuf, newname, namelen);
2405 namebuf[namelen] = '\0';
2406
2407 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2408 typeinfo.defined != 0 ? &typeinfo : NULL);
2409
2410 /* Insert name in all uppercase. */
2411 for (p = namebuf; *p; p++)
2412 *p = TOUPPER (*p);
2413
2414 if (strncmp (namebuf, newname, namelen))
2415 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2416 typeinfo.defined != 0 ? &typeinfo : NULL);
2417
2418 /* Insert name in all lowercase. */
2419 for (p = namebuf; *p; p++)
2420 *p = TOLOWER (*p);
2421
2422 if (strncmp (namebuf, newname, namelen))
2423 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2424 typeinfo.defined != 0 ? &typeinfo : NULL);
2425
2426 return TRUE;
2427 }
2428
2429 /* Should never be called, as .req goes between the alias and the
2430 register name, not at the beginning of the line. */
2431
2432 static void
2433 s_req (int a ATTRIBUTE_UNUSED)
2434 {
2435 as_bad (_("invalid syntax for .req directive"));
2436 }
2437
2438 static void
2439 s_dn (int a ATTRIBUTE_UNUSED)
2440 {
2441 as_bad (_("invalid syntax for .dn directive"));
2442 }
2443
2444 static void
2445 s_qn (int a ATTRIBUTE_UNUSED)
2446 {
2447 as_bad (_("invalid syntax for .qn directive"));
2448 }
2449
2450 /* The .unreq directive deletes an alias which was previously defined
2451 by .req. For example:
2452
2453 my_alias .req r11
2454 .unreq my_alias */
2455
2456 static void
2457 s_unreq (int a ATTRIBUTE_UNUSED)
2458 {
2459 char * name;
2460 char saved_char;
2461
2462 name = input_line_pointer;
2463
2464 while (*input_line_pointer != 0
2465 && *input_line_pointer != ' '
2466 && *input_line_pointer != '\n')
2467 ++input_line_pointer;
2468
2469 saved_char = *input_line_pointer;
2470 *input_line_pointer = 0;
2471
2472 if (!*name)
2473 as_bad (_("invalid syntax for .unreq directive"));
2474 else
2475 {
2476 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2477 name);
2478
2479 if (!reg)
2480 as_bad (_("unknown register alias '%s'"), name);
2481 else if (reg->builtin)
2482 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2483 name);
2484 else
2485 {
2486 char * p;
2487 char * nbuf;
2488
2489 hash_delete (arm_reg_hsh, name, FALSE);
2490 free ((char *) reg->name);
2491 if (reg->neon)
2492 free (reg->neon);
2493 free (reg);
2494
2495 /* Also locate the all upper case and all lower case versions.
2496 Do not complain if we cannot find one or the other as it
2497 was probably deleted above. */
2498
2499 nbuf = strdup (name);
2500 for (p = nbuf; *p; p++)
2501 *p = TOUPPER (*p);
2502 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2503 if (reg)
2504 {
2505 hash_delete (arm_reg_hsh, nbuf, FALSE);
2506 free ((char *) reg->name);
2507 if (reg->neon)
2508 free (reg->neon);
2509 free (reg);
2510 }
2511
2512 for (p = nbuf; *p; p++)
2513 *p = TOLOWER (*p);
2514 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2515 if (reg)
2516 {
2517 hash_delete (arm_reg_hsh, nbuf, FALSE);
2518 free ((char *) reg->name);
2519 if (reg->neon)
2520 free (reg->neon);
2521 free (reg);
2522 }
2523
2524 free (nbuf);
2525 }
2526 }
2527
2528 *input_line_pointer = saved_char;
2529 demand_empty_rest_of_line ();
2530 }
2531
2532 /* Directives: Instruction set selection. */
2533
2534 #ifdef OBJ_ELF
2535 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2536 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2537 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2538 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2539
2540 /* Create a new mapping symbol for the transition to STATE. */
2541
2542 static void
2543 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2544 {
2545 symbolS * symbolP;
2546 const char * symname;
2547 int type;
2548
2549 switch (state)
2550 {
2551 case MAP_DATA:
2552 symname = "$d";
2553 type = BSF_NO_FLAGS;
2554 break;
2555 case MAP_ARM:
2556 symname = "$a";
2557 type = BSF_NO_FLAGS;
2558 break;
2559 case MAP_THUMB:
2560 symname = "$t";
2561 type = BSF_NO_FLAGS;
2562 break;
2563 default:
2564 abort ();
2565 }
2566
2567 symbolP = symbol_new (symname, now_seg, value, frag);
2568 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2569
2570 switch (state)
2571 {
2572 case MAP_ARM:
2573 THUMB_SET_FUNC (symbolP, 0);
2574 ARM_SET_THUMB (symbolP, 0);
2575 ARM_SET_INTERWORK (symbolP, support_interwork);
2576 break;
2577
2578 case MAP_THUMB:
2579 THUMB_SET_FUNC (symbolP, 1);
2580 ARM_SET_THUMB (symbolP, 1);
2581 ARM_SET_INTERWORK (symbolP, support_interwork);
2582 break;
2583
2584 case MAP_DATA:
2585 default:
2586 break;
2587 }
2588
2589 /* Save the mapping symbols for future reference. Also check that
2590 we do not place two mapping symbols at the same offset within a
2591 frag. We'll handle overlap between frags in
2592 check_mapping_symbols.
2593
2594 If .fill or other data filling directive generates zero sized data,
2595 the mapping symbol for the following code will have the same value
2596 as the one generated for the data filling directive. In this case,
2597 we replace the old symbol with the new one at the same address. */
2598 if (value == 0)
2599 {
2600 if (frag->tc_frag_data.first_map != NULL)
2601 {
2602 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2603 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2604 }
2605 frag->tc_frag_data.first_map = symbolP;
2606 }
2607 if (frag->tc_frag_data.last_map != NULL)
2608 {
2609 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2610 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2611 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2612 }
2613 frag->tc_frag_data.last_map = symbolP;
2614 }
2615
2616 /* We must sometimes convert a region marked as code to data during
2617 code alignment, if an odd number of bytes have to be padded. The
2618 code mapping symbol is pushed to an aligned address. */
2619
2620 static void
2621 insert_data_mapping_symbol (enum mstate state,
2622 valueT value, fragS *frag, offsetT bytes)
2623 {
2624 /* If there was already a mapping symbol, remove it. */
2625 if (frag->tc_frag_data.last_map != NULL
2626 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2627 {
2628 symbolS *symp = frag->tc_frag_data.last_map;
2629
2630 if (value == 0)
2631 {
2632 know (frag->tc_frag_data.first_map == symp);
2633 frag->tc_frag_data.first_map = NULL;
2634 }
2635 frag->tc_frag_data.last_map = NULL;
2636 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2637 }
2638
2639 make_mapping_symbol (MAP_DATA, value, frag);
2640 make_mapping_symbol (state, value + bytes, frag);
2641 }
2642
2643 static void mapping_state_2 (enum mstate state, int max_chars);
2644
2645 /* Set the mapping state to STATE. Only call this when about to
2646 emit some STATE bytes to the file. */
2647
2648 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2649 void
2650 mapping_state (enum mstate state)
2651 {
2652 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2653
2654 if (mapstate == state)
2655 /* The mapping symbol has already been emitted.
2656 There is nothing else to do. */
2657 return;
2658
2659 if (state == MAP_ARM || state == MAP_THUMB)
2660 /* PR gas/12931
2661 All ARM instructions require 4-byte alignment.
2662 (Almost) all Thumb instructions require 2-byte alignment.
2663
2664 When emitting instructions into any section, mark the section
2665 appropriately.
2666
2667 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2668 but themselves require 2-byte alignment; this applies to some
2669 PC- relative forms. However, these cases will invovle implicit
2670 literal pool generation or an explicit .align >=2, both of
2671 which will cause the section to me marked with sufficient
2672 alignment. Thus, we don't handle those cases here. */
2673 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2674
2675 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2676 /* This case will be evaluated later. */
2677 return;
2678
2679 mapping_state_2 (state, 0);
2680 }
2681
2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
2683 allocated. Put the mapping symbol that far back. */
2684
2685 static void
2686 mapping_state_2 (enum mstate state, int max_chars)
2687 {
2688 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2689
2690 if (!SEG_NORMAL (now_seg))
2691 return;
2692
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2696 return;
2697
2698 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2699 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2700 {
2701 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2702 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2703
2704 if (add_symbol)
2705 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2706 }
2707
2708 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2709 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2710 }
2711 #undef TRANSITION
2712 #else
2713 #define mapping_state(x) ((void)0)
2714 #define mapping_state_2(x, y) ((void)0)
2715 #endif
2716
2717 /* Find the real, Thumb encoded start of a Thumb function. */
2718
2719 #ifdef OBJ_COFF
2720 static symbolS *
2721 find_real_start (symbolS * symbolP)
2722 {
2723 char * real_start;
2724 const char * name = S_GET_NAME (symbolP);
2725 symbolS * new_target;
2726
2727 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2728 #define STUB_NAME ".real_start_of"
2729
2730 if (name == NULL)
2731 abort ();
2732
2733 /* The compiler may generate BL instructions to local labels because
2734 it needs to perform a branch to a far away location. These labels
2735 do not have a corresponding ".real_start_of" label. We check
2736 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2737 the ".real_start_of" convention for nonlocal branches. */
2738 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2739 return symbolP;
2740
2741 real_start = ACONCAT ((STUB_NAME, name, NULL));
2742 new_target = symbol_find (real_start);
2743
2744 if (new_target == NULL)
2745 {
2746 as_warn (_("Failed to find real start of function: %s\n"), name);
2747 new_target = symbolP;
2748 }
2749
2750 return new_target;
2751 }
2752 #endif
2753
2754 static void
2755 opcode_select (int width)
2756 {
2757 switch (width)
2758 {
2759 case 16:
2760 if (! thumb_mode)
2761 {
2762 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2763 as_bad (_("selected processor does not support THUMB opcodes"));
2764
2765 thumb_mode = 1;
2766 /* No need to force the alignment, since we will have been
2767 coming from ARM mode, which is word-aligned. */
2768 record_alignment (now_seg, 1);
2769 }
2770 break;
2771
2772 case 32:
2773 if (thumb_mode)
2774 {
2775 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2776 as_bad (_("selected processor does not support ARM opcodes"));
2777
2778 thumb_mode = 0;
2779
2780 if (!need_pass_2)
2781 frag_align (2, 0, 0);
2782
2783 record_alignment (now_seg, 1);
2784 }
2785 break;
2786
2787 default:
2788 as_bad (_("invalid instruction size selected (%d)"), width);
2789 }
2790 }
2791
2792 static void
2793 s_arm (int ignore ATTRIBUTE_UNUSED)
2794 {
2795 opcode_select (32);
2796 demand_empty_rest_of_line ();
2797 }
2798
2799 static void
2800 s_thumb (int ignore ATTRIBUTE_UNUSED)
2801 {
2802 opcode_select (16);
2803 demand_empty_rest_of_line ();
2804 }
2805
2806 static void
2807 s_code (int unused ATTRIBUTE_UNUSED)
2808 {
2809 int temp;
2810
2811 temp = get_absolute_expression ();
2812 switch (temp)
2813 {
2814 case 16:
2815 case 32:
2816 opcode_select (temp);
2817 break;
2818
2819 default:
2820 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2821 }
2822 }
2823
2824 static void
2825 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2826 {
2827 /* If we are not already in thumb mode go into it, EVEN if
2828 the target processor does not support thumb instructions.
2829 This is used by gcc/config/arm/lib1funcs.asm for example
2830 to compile interworking support functions even if the
2831 target processor should not support interworking. */
2832 if (! thumb_mode)
2833 {
2834 thumb_mode = 2;
2835 record_alignment (now_seg, 1);
2836 }
2837
2838 demand_empty_rest_of_line ();
2839 }
2840
2841 static void
2842 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2843 {
2844 s_thumb (0);
2845
2846 /* The following label is the name/address of the start of a Thumb function.
2847 We need to know this for the interworking support. */
2848 label_is_thumb_function_name = TRUE;
2849 }
2850
2851 /* Perform a .set directive, but also mark the alias as
2852 being a thumb function. */
2853
2854 static void
2855 s_thumb_set (int equiv)
2856 {
2857 /* XXX the following is a duplicate of the code for s_set() in read.c
2858 We cannot just call that code as we need to get at the symbol that
2859 is created. */
2860 char * name;
2861 char delim;
2862 char * end_name;
2863 symbolS * symbolP;
2864
2865 /* Especial apologies for the random logic:
2866 This just grew, and could be parsed much more simply!
2867 Dean - in haste. */
2868 name = input_line_pointer;
2869 delim = get_symbol_end ();
2870 end_name = input_line_pointer;
2871 *end_name = delim;
2872
2873 if (*input_line_pointer != ',')
2874 {
2875 *end_name = 0;
2876 as_bad (_("expected comma after name \"%s\""), name);
2877 *end_name = delim;
2878 ignore_rest_of_line ();
2879 return;
2880 }
2881
2882 input_line_pointer++;
2883 *end_name = 0;
2884
2885 if (name[0] == '.' && name[1] == '\0')
2886 {
2887 /* XXX - this should not happen to .thumb_set. */
2888 abort ();
2889 }
2890
2891 if ((symbolP = symbol_find (name)) == NULL
2892 && (symbolP = md_undefined_symbol (name)) == NULL)
2893 {
2894 #ifndef NO_LISTING
2895 /* When doing symbol listings, play games with dummy fragments living
2896 outside the normal fragment chain to record the file and line info
2897 for this symbol. */
2898 if (listing & LISTING_SYMBOLS)
2899 {
2900 extern struct list_info_struct * listing_tail;
2901 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2902
2903 memset (dummy_frag, 0, sizeof (fragS));
2904 dummy_frag->fr_type = rs_fill;
2905 dummy_frag->line = listing_tail;
2906 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2907 dummy_frag->fr_symbol = symbolP;
2908 }
2909 else
2910 #endif
2911 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2912
2913 #ifdef OBJ_COFF
2914 /* "set" symbols are local unless otherwise specified. */
2915 SF_SET_LOCAL (symbolP);
2916 #endif /* OBJ_COFF */
2917 } /* Make a new symbol. */
2918
2919 symbol_table_insert (symbolP);
2920
2921 * end_name = delim;
2922
2923 if (equiv
2924 && S_IS_DEFINED (symbolP)
2925 && S_GET_SEGMENT (symbolP) != reg_section)
2926 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2927
2928 pseudo_set (symbolP);
2929
2930 demand_empty_rest_of_line ();
2931
2932 /* XXX Now we come to the Thumb specific bit of code. */
2933
2934 THUMB_SET_FUNC (symbolP, 1);
2935 ARM_SET_THUMB (symbolP, 1);
2936 #if defined OBJ_ELF || defined OBJ_COFF
2937 ARM_SET_INTERWORK (symbolP, support_interwork);
2938 #endif
2939 }
2940
2941 /* Directives: Mode selection. */
2942
2943 /* .syntax [unified|divided] - choose the new unified syntax
2944 (same for Arm and Thumb encoding, modulo slight differences in what
2945 can be represented) or the old divergent syntax for each mode. */
2946 static void
2947 s_syntax (int unused ATTRIBUTE_UNUSED)
2948 {
2949 char *name, delim;
2950
2951 name = input_line_pointer;
2952 delim = get_symbol_end ();
2953
2954 if (!strcasecmp (name, "unified"))
2955 unified_syntax = TRUE;
2956 else if (!strcasecmp (name, "divided"))
2957 unified_syntax = FALSE;
2958 else
2959 {
2960 as_bad (_("unrecognized syntax mode \"%s\""), name);
2961 return;
2962 }
2963 *input_line_pointer = delim;
2964 demand_empty_rest_of_line ();
2965 }
2966
2967 /* Directives: sectioning and alignment. */
2968
2969 /* Same as s_align_ptwo but align 0 => align 2. */
2970
2971 static void
2972 s_align (int unused ATTRIBUTE_UNUSED)
2973 {
2974 int temp;
2975 bfd_boolean fill_p;
2976 long temp_fill;
2977 long max_alignment = 15;
2978
2979 temp = get_absolute_expression ();
2980 if (temp > max_alignment)
2981 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2982 else if (temp < 0)
2983 {
2984 as_bad (_("alignment negative. 0 assumed."));
2985 temp = 0;
2986 }
2987
2988 if (*input_line_pointer == ',')
2989 {
2990 input_line_pointer++;
2991 temp_fill = get_absolute_expression ();
2992 fill_p = TRUE;
2993 }
2994 else
2995 {
2996 fill_p = FALSE;
2997 temp_fill = 0;
2998 }
2999
3000 if (!temp)
3001 temp = 2;
3002
3003 /* Only make a frag if we HAVE to. */
3004 if (temp && !need_pass_2)
3005 {
3006 if (!fill_p && subseg_text_p (now_seg))
3007 frag_align_code (temp, 0);
3008 else
3009 frag_align (temp, (int) temp_fill, 0);
3010 }
3011 demand_empty_rest_of_line ();
3012
3013 record_alignment (now_seg, temp);
3014 }
3015
3016 static void
3017 s_bss (int ignore ATTRIBUTE_UNUSED)
3018 {
3019 /* We don't support putting frags in the BSS segment, we fake it by
3020 marking in_bss, then looking at s_skip for clues. */
3021 subseg_set (bss_section, 0);
3022 demand_empty_rest_of_line ();
3023
3024 #ifdef md_elf_section_change_hook
3025 md_elf_section_change_hook ();
3026 #endif
3027 }
3028
3029 static void
3030 s_even (int ignore ATTRIBUTE_UNUSED)
3031 {
3032 /* Never make frag if expect extra pass. */
3033 if (!need_pass_2)
3034 frag_align (1, 0, 0);
3035
3036 record_alignment (now_seg, 1);
3037
3038 demand_empty_rest_of_line ();
3039 }
3040
3041 /* Directives: CodeComposer Studio. */
3042
3043 /* .ref (for CodeComposer Studio syntax only). */
3044 static void
3045 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3046 {
3047 if (codecomposer_syntax)
3048 ignore_rest_of_line ();
3049 else
3050 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3051 }
3052
3053 /* If name is not NULL, then it is used for marking the beginning of a
3054 function, wherease if it is NULL then it means the function end. */
3055 static void
3056 asmfunc_debug (const char * name)
3057 {
3058 static const char * last_name = NULL;
3059
3060 if (name != NULL)
3061 {
3062 gas_assert (last_name == NULL);
3063 last_name = name;
3064
3065 if (debug_type == DEBUG_STABS)
3066 stabs_generate_asm_func (name, name);
3067 }
3068 else
3069 {
3070 gas_assert (last_name != NULL);
3071
3072 if (debug_type == DEBUG_STABS)
3073 stabs_generate_asm_endfunc (last_name, last_name);
3074
3075 last_name = NULL;
3076 }
3077 }
3078
3079 static void
3080 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3081 {
3082 if (codecomposer_syntax)
3083 {
3084 switch (asmfunc_state)
3085 {
3086 case OUTSIDE_ASMFUNC:
3087 asmfunc_state = WAITING_ASMFUNC_NAME;
3088 break;
3089
3090 case WAITING_ASMFUNC_NAME:
3091 as_bad (_(".asmfunc repeated."));
3092 break;
3093
3094 case WAITING_ENDASMFUNC:
3095 as_bad (_(".asmfunc without function."));
3096 break;
3097 }
3098 demand_empty_rest_of_line ();
3099 }
3100 else
3101 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3102 }
3103
3104 static void
3105 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3106 {
3107 if (codecomposer_syntax)
3108 {
3109 switch (asmfunc_state)
3110 {
3111 case OUTSIDE_ASMFUNC:
3112 as_bad (_(".endasmfunc without a .asmfunc."));
3113 break;
3114
3115 case WAITING_ASMFUNC_NAME:
3116 as_bad (_(".endasmfunc without function."));
3117 break;
3118
3119 case WAITING_ENDASMFUNC:
3120 asmfunc_state = OUTSIDE_ASMFUNC;
3121 asmfunc_debug (NULL);
3122 break;
3123 }
3124 demand_empty_rest_of_line ();
3125 }
3126 else
3127 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3128 }
3129
3130 static void
3131 s_ccs_def (int name)
3132 {
3133 if (codecomposer_syntax)
3134 s_globl (name);
3135 else
3136 as_bad (_(".def pseudo-op only available with -mccs flag."));
3137 }
3138
3139 /* Directives: Literal pools. */
3140
3141 static literal_pool *
3142 find_literal_pool (void)
3143 {
3144 literal_pool * pool;
3145
3146 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3147 {
3148 if (pool->section == now_seg
3149 && pool->sub_section == now_subseg)
3150 break;
3151 }
3152
3153 return pool;
3154 }
3155
3156 static literal_pool *
3157 find_or_make_literal_pool (void)
3158 {
3159 /* Next literal pool ID number. */
3160 static unsigned int latest_pool_num = 1;
3161 literal_pool * pool;
3162
3163 pool = find_literal_pool ();
3164
3165 if (pool == NULL)
3166 {
3167 /* Create a new pool. */
3168 pool = (literal_pool *) xmalloc (sizeof (* pool));
3169 if (! pool)
3170 return NULL;
3171
3172 pool->next_free_entry = 0;
3173 pool->section = now_seg;
3174 pool->sub_section = now_subseg;
3175 pool->next = list_of_pools;
3176 pool->symbol = NULL;
3177 pool->alignment = 2;
3178
3179 /* Add it to the list. */
3180 list_of_pools = pool;
3181 }
3182
3183 /* New pools, and emptied pools, will have a NULL symbol. */
3184 if (pool->symbol == NULL)
3185 {
3186 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3187 (valueT) 0, &zero_address_frag);
3188 pool->id = latest_pool_num ++;
3189 }
3190
3191 /* Done. */
3192 return pool;
3193 }
3194
3195 /* Add the literal in the global 'inst'
3196 structure to the relevant literal pool. */
3197
3198 static int
3199 add_to_lit_pool (unsigned int nbytes)
3200 {
3201 #define PADDING_SLOT 0x1
3202 #define LIT_ENTRY_SIZE_MASK 0xFF
3203 literal_pool * pool;
3204 unsigned int entry, pool_size = 0;
3205 bfd_boolean padding_slot_p = FALSE;
3206 unsigned imm1 = 0;
3207 unsigned imm2 = 0;
3208
3209 if (nbytes == 8)
3210 {
3211 imm1 = inst.operands[1].imm;
3212 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3213 : inst.reloc.exp.X_unsigned ? 0
3214 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3215 if (target_big_endian)
3216 {
3217 imm1 = imm2;
3218 imm2 = inst.operands[1].imm;
3219 }
3220 }
3221
3222 pool = find_or_make_literal_pool ();
3223
3224 /* Check if this literal value is already in the pool. */
3225 for (entry = 0; entry < pool->next_free_entry; entry ++)
3226 {
3227 if (nbytes == 4)
3228 {
3229 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3230 && (inst.reloc.exp.X_op == O_constant)
3231 && (pool->literals[entry].X_add_number
3232 == inst.reloc.exp.X_add_number)
3233 && (pool->literals[entry].X_md == nbytes)
3234 && (pool->literals[entry].X_unsigned
3235 == inst.reloc.exp.X_unsigned))
3236 break;
3237
3238 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3239 && (inst.reloc.exp.X_op == O_symbol)
3240 && (pool->literals[entry].X_add_number
3241 == inst.reloc.exp.X_add_number)
3242 && (pool->literals[entry].X_add_symbol
3243 == inst.reloc.exp.X_add_symbol)
3244 && (pool->literals[entry].X_op_symbol
3245 == inst.reloc.exp.X_op_symbol)
3246 && (pool->literals[entry].X_md == nbytes))
3247 break;
3248 }
3249 else if ((nbytes == 8)
3250 && !(pool_size & 0x7)
3251 && ((entry + 1) != pool->next_free_entry)
3252 && (pool->literals[entry].X_op == O_constant)
3253 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3254 && (pool->literals[entry].X_unsigned
3255 == inst.reloc.exp.X_unsigned)
3256 && (pool->literals[entry + 1].X_op == O_constant)
3257 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3258 && (pool->literals[entry + 1].X_unsigned
3259 == inst.reloc.exp.X_unsigned))
3260 break;
3261
3262 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3263 if (padding_slot_p && (nbytes == 4))
3264 break;
3265
3266 pool_size += 4;
3267 }
3268
3269 /* Do we need to create a new entry? */
3270 if (entry == pool->next_free_entry)
3271 {
3272 if (entry >= MAX_LITERAL_POOL_SIZE)
3273 {
3274 inst.error = _("literal pool overflow");
3275 return FAIL;
3276 }
3277
3278 if (nbytes == 8)
3279 {
3280 /* For 8-byte entries, we align to an 8-byte boundary,
3281 and split it into two 4-byte entries, because on 32-bit
3282 host, 8-byte constants are treated as big num, thus
3283 saved in "generic_bignum" which will be overwritten
3284 by later assignments.
3285
3286 We also need to make sure there is enough space for
3287 the split.
3288
3289 We also check to make sure the literal operand is a
3290 constant number. */
3291 if (!(inst.reloc.exp.X_op == O_constant
3292 || inst.reloc.exp.X_op == O_big))
3293 {
3294 inst.error = _("invalid type for literal pool");
3295 return FAIL;
3296 }
3297 else if (pool_size & 0x7)
3298 {
3299 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3300 {
3301 inst.error = _("literal pool overflow");
3302 return FAIL;
3303 }
3304
3305 pool->literals[entry] = inst.reloc.exp;
3306 pool->literals[entry].X_add_number = 0;
3307 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3308 pool->next_free_entry += 1;
3309 pool_size += 4;
3310 }
3311 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3312 {
3313 inst.error = _("literal pool overflow");
3314 return FAIL;
3315 }
3316
3317 pool->literals[entry] = inst.reloc.exp;
3318 pool->literals[entry].X_op = O_constant;
3319 pool->literals[entry].X_add_number = imm1;
3320 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3321 pool->literals[entry++].X_md = 4;
3322 pool->literals[entry] = inst.reloc.exp;
3323 pool->literals[entry].X_op = O_constant;
3324 pool->literals[entry].X_add_number = imm2;
3325 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3326 pool->literals[entry].X_md = 4;
3327 pool->alignment = 3;
3328 pool->next_free_entry += 1;
3329 }
3330 else
3331 {
3332 pool->literals[entry] = inst.reloc.exp;
3333 pool->literals[entry].X_md = 4;
3334 }
3335
3336 #ifdef OBJ_ELF
3337 /* PR ld/12974: Record the location of the first source line to reference
3338 this entry in the literal pool. If it turns out during linking that the
3339 symbol does not exist we will be able to give an accurate line number for
3340 the (first use of the) missing reference. */
3341 if (debug_type == DEBUG_DWARF2)
3342 dwarf2_where (pool->locs + entry);
3343 #endif
3344 pool->next_free_entry += 1;
3345 }
3346 else if (padding_slot_p)
3347 {
3348 pool->literals[entry] = inst.reloc.exp;
3349 pool->literals[entry].X_md = nbytes;
3350 }
3351
3352 inst.reloc.exp.X_op = O_symbol;
3353 inst.reloc.exp.X_add_number = pool_size;
3354 inst.reloc.exp.X_add_symbol = pool->symbol;
3355
3356 return SUCCESS;
3357 }
3358
3359 bfd_boolean
3360 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3361 {
3362 bfd_boolean ret = TRUE;
3363
3364 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3365 {
3366 const char *label = rest;
3367
3368 while (!is_end_of_line[(int) label[-1]])
3369 --label;
3370
3371 if (*label == '.')
3372 {
3373 as_bad (_("Invalid label '%s'"), label);
3374 ret = FALSE;
3375 }
3376
3377 asmfunc_debug (label);
3378
3379 asmfunc_state = WAITING_ENDASMFUNC;
3380 }
3381
3382 return ret;
3383 }
3384
3385 /* Can't use symbol_new here, so have to create a symbol and then at
3386 a later date assign it a value. Thats what these functions do. */
3387
3388 static void
3389 symbol_locate (symbolS * symbolP,
3390 const char * name, /* It is copied, the caller can modify. */
3391 segT segment, /* Segment identifier (SEG_<something>). */
3392 valueT valu, /* Symbol value. */
3393 fragS * frag) /* Associated fragment. */
3394 {
3395 size_t name_length;
3396 char * preserved_copy_of_name;
3397
3398 name_length = strlen (name) + 1; /* +1 for \0. */
3399 obstack_grow (&notes, name, name_length);
3400 preserved_copy_of_name = (char *) obstack_finish (&notes);
3401
3402 #ifdef tc_canonicalize_symbol_name
3403 preserved_copy_of_name =
3404 tc_canonicalize_symbol_name (preserved_copy_of_name);
3405 #endif
3406
3407 S_SET_NAME (symbolP, preserved_copy_of_name);
3408
3409 S_SET_SEGMENT (symbolP, segment);
3410 S_SET_VALUE (symbolP, valu);
3411 symbol_clear_list_pointers (symbolP);
3412
3413 symbol_set_frag (symbolP, frag);
3414
3415 /* Link to end of symbol chain. */
3416 {
3417 extern int symbol_table_frozen;
3418
3419 if (symbol_table_frozen)
3420 abort ();
3421 }
3422
3423 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3424
3425 obj_symbol_new_hook (symbolP);
3426
3427 #ifdef tc_symbol_new_hook
3428 tc_symbol_new_hook (symbolP);
3429 #endif
3430
3431 #ifdef DEBUG_SYMS
3432 verify_symbol_chain (symbol_rootP, symbol_lastP);
3433 #endif /* DEBUG_SYMS */
3434 }
3435
3436 static void
3437 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3438 {
3439 unsigned int entry;
3440 literal_pool * pool;
3441 char sym_name[20];
3442
3443 pool = find_literal_pool ();
3444 if (pool == NULL
3445 || pool->symbol == NULL
3446 || pool->next_free_entry == 0)
3447 return;
3448
3449 /* Align pool as you have word accesses.
3450 Only make a frag if we have to. */
3451 if (!need_pass_2)
3452 frag_align (pool->alignment, 0, 0);
3453
3454 record_alignment (now_seg, 2);
3455
3456 #ifdef OBJ_ELF
3457 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3458 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3459 #endif
3460 sprintf (sym_name, "$$lit_\002%x", pool->id);
3461
3462 symbol_locate (pool->symbol, sym_name, now_seg,
3463 (valueT) frag_now_fix (), frag_now);
3464 symbol_table_insert (pool->symbol);
3465
3466 ARM_SET_THUMB (pool->symbol, thumb_mode);
3467
3468 #if defined OBJ_COFF || defined OBJ_ELF
3469 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3470 #endif
3471
3472 for (entry = 0; entry < pool->next_free_entry; entry ++)
3473 {
3474 #ifdef OBJ_ELF
3475 if (debug_type == DEBUG_DWARF2)
3476 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3477 #endif
3478 /* First output the expression in the instruction to the pool. */
3479 emit_expr (&(pool->literals[entry]),
3480 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3481 }
3482
3483 /* Mark the pool as empty. */
3484 pool->next_free_entry = 0;
3485 pool->symbol = NULL;
3486 }
3487
3488 #ifdef OBJ_ELF
3489 /* Forward declarations for functions below, in the MD interface
3490 section. */
3491 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3492 static valueT create_unwind_entry (int);
3493 static void start_unwind_section (const segT, int);
3494 static void add_unwind_opcode (valueT, int);
3495 static void flush_pending_unwind (void);
3496
3497 /* Directives: Data. */
3498
3499 static void
3500 s_arm_elf_cons (int nbytes)
3501 {
3502 expressionS exp;
3503
3504 #ifdef md_flush_pending_output
3505 md_flush_pending_output ();
3506 #endif
3507
3508 if (is_it_end_of_statement ())
3509 {
3510 demand_empty_rest_of_line ();
3511 return;
3512 }
3513
3514 #ifdef md_cons_align
3515 md_cons_align (nbytes);
3516 #endif
3517
3518 mapping_state (MAP_DATA);
3519 do
3520 {
3521 int reloc;
3522 char *base = input_line_pointer;
3523
3524 expression (& exp);
3525
3526 if (exp.X_op != O_symbol)
3527 emit_expr (&exp, (unsigned int) nbytes);
3528 else
3529 {
3530 char *before_reloc = input_line_pointer;
3531 reloc = parse_reloc (&input_line_pointer);
3532 if (reloc == -1)
3533 {
3534 as_bad (_("unrecognized relocation suffix"));
3535 ignore_rest_of_line ();
3536 return;
3537 }
3538 else if (reloc == BFD_RELOC_UNUSED)
3539 emit_expr (&exp, (unsigned int) nbytes);
3540 else
3541 {
3542 reloc_howto_type *howto = (reloc_howto_type *)
3543 bfd_reloc_type_lookup (stdoutput,
3544 (bfd_reloc_code_real_type) reloc);
3545 int size = bfd_get_reloc_size (howto);
3546
3547 if (reloc == BFD_RELOC_ARM_PLT32)
3548 {
3549 as_bad (_("(plt) is only valid on branch targets"));
3550 reloc = BFD_RELOC_UNUSED;
3551 size = 0;
3552 }
3553
3554 if (size > nbytes)
3555 as_bad (_("%s relocations do not fit in %d bytes"),
3556 howto->name, nbytes);
3557 else
3558 {
3559 /* We've parsed an expression stopping at O_symbol.
3560 But there may be more expression left now that we
3561 have parsed the relocation marker. Parse it again.
3562 XXX Surely there is a cleaner way to do this. */
3563 char *p = input_line_pointer;
3564 int offset;
3565 char *save_buf = (char *) alloca (input_line_pointer - base);
3566 memcpy (save_buf, base, input_line_pointer - base);
3567 memmove (base + (input_line_pointer - before_reloc),
3568 base, before_reloc - base);
3569
3570 input_line_pointer = base + (input_line_pointer-before_reloc);
3571 expression (&exp);
3572 memcpy (base, save_buf, p - base);
3573
3574 offset = nbytes - size;
3575 p = frag_more (nbytes);
3576 memset (p, 0, nbytes);
3577 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3578 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3579 }
3580 }
3581 }
3582 }
3583 while (*input_line_pointer++ == ',');
3584
3585 /* Put terminator back into stream. */
3586 input_line_pointer --;
3587 demand_empty_rest_of_line ();
3588 }
3589
3590 /* Emit an expression containing a 32-bit thumb instruction.
3591 Implementation based on put_thumb32_insn. */
3592
3593 static void
3594 emit_thumb32_expr (expressionS * exp)
3595 {
3596 expressionS exp_high = *exp;
3597
3598 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3599 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3600 exp->X_add_number &= 0xffff;
3601 emit_expr (exp, (unsigned int) THUMB_SIZE);
3602 }
3603
3604 /* Guess the instruction size based on the opcode. */
3605
3606 static int
3607 thumb_insn_size (int opcode)
3608 {
3609 if ((unsigned int) opcode < 0xe800u)
3610 return 2;
3611 else if ((unsigned int) opcode >= 0xe8000000u)
3612 return 4;
3613 else
3614 return 0;
3615 }
3616
3617 static bfd_boolean
3618 emit_insn (expressionS *exp, int nbytes)
3619 {
3620 int size = 0;
3621
3622 if (exp->X_op == O_constant)
3623 {
3624 size = nbytes;
3625
3626 if (size == 0)
3627 size = thumb_insn_size (exp->X_add_number);
3628
3629 if (size != 0)
3630 {
3631 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3632 {
3633 as_bad (_(".inst.n operand too big. "\
3634 "Use .inst.w instead"));
3635 size = 0;
3636 }
3637 else
3638 {
3639 if (now_it.state == AUTOMATIC_IT_BLOCK)
3640 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3641 else
3642 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3643
3644 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3645 emit_thumb32_expr (exp);
3646 else
3647 emit_expr (exp, (unsigned int) size);
3648
3649 it_fsm_post_encode ();
3650 }
3651 }
3652 else
3653 as_bad (_("cannot determine Thumb instruction size. " \
3654 "Use .inst.n/.inst.w instead"));
3655 }
3656 else
3657 as_bad (_("constant expression required"));
3658
3659 return (size != 0);
3660 }
3661
3662 /* Like s_arm_elf_cons but do not use md_cons_align and
3663 set the mapping state to MAP_ARM/MAP_THUMB. */
3664
3665 static void
3666 s_arm_elf_inst (int nbytes)
3667 {
3668 if (is_it_end_of_statement ())
3669 {
3670 demand_empty_rest_of_line ();
3671 return;
3672 }
3673
3674 /* Calling mapping_state () here will not change ARM/THUMB,
3675 but will ensure not to be in DATA state. */
3676
3677 if (thumb_mode)
3678 mapping_state (MAP_THUMB);
3679 else
3680 {
3681 if (nbytes != 0)
3682 {
3683 as_bad (_("width suffixes are invalid in ARM mode"));
3684 ignore_rest_of_line ();
3685 return;
3686 }
3687
3688 nbytes = 4;
3689
3690 mapping_state (MAP_ARM);
3691 }
3692
3693 do
3694 {
3695 expressionS exp;
3696
3697 expression (& exp);
3698
3699 if (! emit_insn (& exp, nbytes))
3700 {
3701 ignore_rest_of_line ();
3702 return;
3703 }
3704 }
3705 while (*input_line_pointer++ == ',');
3706
3707 /* Put terminator back into stream. */
3708 input_line_pointer --;
3709 demand_empty_rest_of_line ();
3710 }
3711
3712 /* Parse a .rel31 directive. */
3713
3714 static void
3715 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3716 {
3717 expressionS exp;
3718 char *p;
3719 valueT highbit;
3720
3721 highbit = 0;
3722 if (*input_line_pointer == '1')
3723 highbit = 0x80000000;
3724 else if (*input_line_pointer != '0')
3725 as_bad (_("expected 0 or 1"));
3726
3727 input_line_pointer++;
3728 if (*input_line_pointer != ',')
3729 as_bad (_("missing comma"));
3730 input_line_pointer++;
3731
3732 #ifdef md_flush_pending_output
3733 md_flush_pending_output ();
3734 #endif
3735
3736 #ifdef md_cons_align
3737 md_cons_align (4);
3738 #endif
3739
3740 mapping_state (MAP_DATA);
3741
3742 expression (&exp);
3743
3744 p = frag_more (4);
3745 md_number_to_chars (p, highbit, 4);
3746 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3747 BFD_RELOC_ARM_PREL31);
3748
3749 demand_empty_rest_of_line ();
3750 }
3751
3752 /* Directives: AEABI stack-unwind tables. */
3753
3754 /* Parse an unwind_fnstart directive. Simply records the current location. */
3755
3756 static void
3757 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3758 {
3759 demand_empty_rest_of_line ();
3760 if (unwind.proc_start)
3761 {
3762 as_bad (_("duplicate .fnstart directive"));
3763 return;
3764 }
3765
3766 /* Mark the start of the function. */
3767 unwind.proc_start = expr_build_dot ();
3768
3769 /* Reset the rest of the unwind info. */
3770 unwind.opcode_count = 0;
3771 unwind.table_entry = NULL;
3772 unwind.personality_routine = NULL;
3773 unwind.personality_index = -1;
3774 unwind.frame_size = 0;
3775 unwind.fp_offset = 0;
3776 unwind.fp_reg = REG_SP;
3777 unwind.fp_used = 0;
3778 unwind.sp_restored = 0;
3779 }
3780
3781
3782 /* Parse a handlerdata directive. Creates the exception handling table entry
3783 for the function. */
3784
3785 static void
3786 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3787 {
3788 demand_empty_rest_of_line ();
3789 if (!unwind.proc_start)
3790 as_bad (MISSING_FNSTART);
3791
3792 if (unwind.table_entry)
3793 as_bad (_("duplicate .handlerdata directive"));
3794
3795 create_unwind_entry (1);
3796 }
3797
3798 /* Parse an unwind_fnend directive. Generates the index table entry. */
3799
3800 static void
3801 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3802 {
3803 long where;
3804 char *ptr;
3805 valueT val;
3806 unsigned int marked_pr_dependency;
3807
3808 demand_empty_rest_of_line ();
3809
3810 if (!unwind.proc_start)
3811 {
3812 as_bad (_(".fnend directive without .fnstart"));
3813 return;
3814 }
3815
3816 /* Add eh table entry. */
3817 if (unwind.table_entry == NULL)
3818 val = create_unwind_entry (0);
3819 else
3820 val = 0;
3821
3822 /* Add index table entry. This is two words. */
3823 start_unwind_section (unwind.saved_seg, 1);
3824 frag_align (2, 0, 0);
3825 record_alignment (now_seg, 2);
3826
3827 ptr = frag_more (8);
3828 memset (ptr, 0, 8);
3829 where = frag_now_fix () - 8;
3830
3831 /* Self relative offset of the function start. */
3832 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3833 BFD_RELOC_ARM_PREL31);
3834
3835 /* Indicate dependency on EHABI-defined personality routines to the
3836 linker, if it hasn't been done already. */
3837 marked_pr_dependency
3838 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3839 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3840 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3841 {
3842 static const char *const name[] =
3843 {
3844 "__aeabi_unwind_cpp_pr0",
3845 "__aeabi_unwind_cpp_pr1",
3846 "__aeabi_unwind_cpp_pr2"
3847 };
3848 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3849 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3850 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3851 |= 1 << unwind.personality_index;
3852 }
3853
3854 if (val)
3855 /* Inline exception table entry. */
3856 md_number_to_chars (ptr + 4, val, 4);
3857 else
3858 /* Self relative offset of the table entry. */
3859 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3860 BFD_RELOC_ARM_PREL31);
3861
3862 /* Restore the original section. */
3863 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3864
3865 unwind.proc_start = NULL;
3866 }
3867
3868
3869 /* Parse an unwind_cantunwind directive. */
3870
3871 static void
3872 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3873 {
3874 demand_empty_rest_of_line ();
3875 if (!unwind.proc_start)
3876 as_bad (MISSING_FNSTART);
3877
3878 if (unwind.personality_routine || unwind.personality_index != -1)
3879 as_bad (_("personality routine specified for cantunwind frame"));
3880
3881 unwind.personality_index = -2;
3882 }
3883
3884
3885 /* Parse a personalityindex directive. */
3886
3887 static void
3888 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3889 {
3890 expressionS exp;
3891
3892 if (!unwind.proc_start)
3893 as_bad (MISSING_FNSTART);
3894
3895 if (unwind.personality_routine || unwind.personality_index != -1)
3896 as_bad (_("duplicate .personalityindex directive"));
3897
3898 expression (&exp);
3899
3900 if (exp.X_op != O_constant
3901 || exp.X_add_number < 0 || exp.X_add_number > 15)
3902 {
3903 as_bad (_("bad personality routine number"));
3904 ignore_rest_of_line ();
3905 return;
3906 }
3907
3908 unwind.personality_index = exp.X_add_number;
3909
3910 demand_empty_rest_of_line ();
3911 }
3912
3913
3914 /* Parse a personality directive. */
3915
3916 static void
3917 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3918 {
3919 char *name, *p, c;
3920
3921 if (!unwind.proc_start)
3922 as_bad (MISSING_FNSTART);
3923
3924 if (unwind.personality_routine || unwind.personality_index != -1)
3925 as_bad (_("duplicate .personality directive"));
3926
3927 name = input_line_pointer;
3928 c = get_symbol_end ();
3929 p = input_line_pointer;
3930 unwind.personality_routine = symbol_find_or_make (name);
3931 *p = c;
3932 demand_empty_rest_of_line ();
3933 }
3934
3935
3936 /* Parse a directive saving core registers. */
3937
3938 static void
3939 s_arm_unwind_save_core (void)
3940 {
3941 valueT op;
3942 long range;
3943 int n;
3944
3945 range = parse_reg_list (&input_line_pointer);
3946 if (range == FAIL)
3947 {
3948 as_bad (_("expected register list"));
3949 ignore_rest_of_line ();
3950 return;
3951 }
3952
3953 demand_empty_rest_of_line ();
3954
3955 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3956 into .unwind_save {..., sp...}. We aren't bothered about the value of
3957 ip because it is clobbered by calls. */
3958 if (unwind.sp_restored && unwind.fp_reg == 12
3959 && (range & 0x3000) == 0x1000)
3960 {
3961 unwind.opcode_count--;
3962 unwind.sp_restored = 0;
3963 range = (range | 0x2000) & ~0x1000;
3964 unwind.pending_offset = 0;
3965 }
3966
3967 /* Pop r4-r15. */
3968 if (range & 0xfff0)
3969 {
3970 /* See if we can use the short opcodes. These pop a block of up to 8
3971 registers starting with r4, plus maybe r14. */
3972 for (n = 0; n < 8; n++)
3973 {
3974 /* Break at the first non-saved register. */
3975 if ((range & (1 << (n + 4))) == 0)
3976 break;
3977 }
3978 /* See if there are any other bits set. */
3979 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3980 {
3981 /* Use the long form. */
3982 op = 0x8000 | ((range >> 4) & 0xfff);
3983 add_unwind_opcode (op, 2);
3984 }
3985 else
3986 {
3987 /* Use the short form. */
3988 if (range & 0x4000)
3989 op = 0xa8; /* Pop r14. */
3990 else
3991 op = 0xa0; /* Do not pop r14. */
3992 op |= (n - 1);
3993 add_unwind_opcode (op, 1);
3994 }
3995 }
3996
3997 /* Pop r0-r3. */
3998 if (range & 0xf)
3999 {
4000 op = 0xb100 | (range & 0xf);
4001 add_unwind_opcode (op, 2);
4002 }
4003
4004 /* Record the number of bytes pushed. */
4005 for (n = 0; n < 16; n++)
4006 {
4007 if (range & (1 << n))
4008 unwind.frame_size += 4;
4009 }
4010 }
4011
4012
4013 /* Parse a directive saving FPA registers. */
4014
4015 static void
4016 s_arm_unwind_save_fpa (int reg)
4017 {
4018 expressionS exp;
4019 int num_regs;
4020 valueT op;
4021
4022 /* Get Number of registers to transfer. */
4023 if (skip_past_comma (&input_line_pointer) != FAIL)
4024 expression (&exp);
4025 else
4026 exp.X_op = O_illegal;
4027
4028 if (exp.X_op != O_constant)
4029 {
4030 as_bad (_("expected , <constant>"));
4031 ignore_rest_of_line ();
4032 return;
4033 }
4034
4035 num_regs = exp.X_add_number;
4036
4037 if (num_regs < 1 || num_regs > 4)
4038 {
4039 as_bad (_("number of registers must be in the range [1:4]"));
4040 ignore_rest_of_line ();
4041 return;
4042 }
4043
4044 demand_empty_rest_of_line ();
4045
4046 if (reg == 4)
4047 {
4048 /* Short form. */
4049 op = 0xb4 | (num_regs - 1);
4050 add_unwind_opcode (op, 1);
4051 }
4052 else
4053 {
4054 /* Long form. */
4055 op = 0xc800 | (reg << 4) | (num_regs - 1);
4056 add_unwind_opcode (op, 2);
4057 }
4058 unwind.frame_size += num_regs * 12;
4059 }
4060
4061
4062 /* Parse a directive saving VFP registers for ARMv6 and above. */
4063
4064 static void
4065 s_arm_unwind_save_vfp_armv6 (void)
4066 {
4067 int count;
4068 unsigned int start;
4069 valueT op;
4070 int num_vfpv3_regs = 0;
4071 int num_regs_below_16;
4072
4073 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4074 if (count == FAIL)
4075 {
4076 as_bad (_("expected register list"));
4077 ignore_rest_of_line ();
4078 return;
4079 }
4080
4081 demand_empty_rest_of_line ();
4082
4083 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4084 than FSTMX/FLDMX-style ones). */
4085
4086 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4087 if (start >= 16)
4088 num_vfpv3_regs = count;
4089 else if (start + count > 16)
4090 num_vfpv3_regs = start + count - 16;
4091
4092 if (num_vfpv3_regs > 0)
4093 {
4094 int start_offset = start > 16 ? start - 16 : 0;
4095 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4096 add_unwind_opcode (op, 2);
4097 }
4098
4099 /* Generate opcode for registers numbered in the range 0 .. 15. */
4100 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4101 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4102 if (num_regs_below_16 > 0)
4103 {
4104 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4105 add_unwind_opcode (op, 2);
4106 }
4107
4108 unwind.frame_size += count * 8;
4109 }
4110
4111
4112 /* Parse a directive saving VFP registers for pre-ARMv6. */
4113
4114 static void
4115 s_arm_unwind_save_vfp (void)
4116 {
4117 int count;
4118 unsigned int reg;
4119 valueT op;
4120
4121 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4122 if (count == FAIL)
4123 {
4124 as_bad (_("expected register list"));
4125 ignore_rest_of_line ();
4126 return;
4127 }
4128
4129 demand_empty_rest_of_line ();
4130
4131 if (reg == 8)
4132 {
4133 /* Short form. */
4134 op = 0xb8 | (count - 1);
4135 add_unwind_opcode (op, 1);
4136 }
4137 else
4138 {
4139 /* Long form. */
4140 op = 0xb300 | (reg << 4) | (count - 1);
4141 add_unwind_opcode (op, 2);
4142 }
4143 unwind.frame_size += count * 8 + 4;
4144 }
4145
4146
4147 /* Parse a directive saving iWMMXt data registers. */
4148
4149 static void
4150 s_arm_unwind_save_mmxwr (void)
4151 {
4152 int reg;
4153 int hi_reg;
4154 int i;
4155 unsigned mask = 0;
4156 valueT op;
4157
4158 if (*input_line_pointer == '{')
4159 input_line_pointer++;
4160
4161 do
4162 {
4163 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4164
4165 if (reg == FAIL)
4166 {
4167 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4168 goto error;
4169 }
4170
4171 if (mask >> reg)
4172 as_tsktsk (_("register list not in ascending order"));
4173 mask |= 1 << reg;
4174
4175 if (*input_line_pointer == '-')
4176 {
4177 input_line_pointer++;
4178 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4179 if (hi_reg == FAIL)
4180 {
4181 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4182 goto error;
4183 }
4184 else if (reg >= hi_reg)
4185 {
4186 as_bad (_("bad register range"));
4187 goto error;
4188 }
4189 for (; reg < hi_reg; reg++)
4190 mask |= 1 << reg;
4191 }
4192 }
4193 while (skip_past_comma (&input_line_pointer) != FAIL);
4194
4195 skip_past_char (&input_line_pointer, '}');
4196
4197 demand_empty_rest_of_line ();
4198
4199 /* Generate any deferred opcodes because we're going to be looking at
4200 the list. */
4201 flush_pending_unwind ();
4202
4203 for (i = 0; i < 16; i++)
4204 {
4205 if (mask & (1 << i))
4206 unwind.frame_size += 8;
4207 }
4208
4209 /* Attempt to combine with a previous opcode. We do this because gcc
4210 likes to output separate unwind directives for a single block of
4211 registers. */
4212 if (unwind.opcode_count > 0)
4213 {
4214 i = unwind.opcodes[unwind.opcode_count - 1];
4215 if ((i & 0xf8) == 0xc0)
4216 {
4217 i &= 7;
4218 /* Only merge if the blocks are contiguous. */
4219 if (i < 6)
4220 {
4221 if ((mask & 0xfe00) == (1 << 9))
4222 {
4223 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4224 unwind.opcode_count--;
4225 }
4226 }
4227 else if (i == 6 && unwind.opcode_count >= 2)
4228 {
4229 i = unwind.opcodes[unwind.opcode_count - 2];
4230 reg = i >> 4;
4231 i &= 0xf;
4232
4233 op = 0xffff << (reg - 1);
4234 if (reg > 0
4235 && ((mask & op) == (1u << (reg - 1))))
4236 {
4237 op = (1 << (reg + i + 1)) - 1;
4238 op &= ~((1 << reg) - 1);
4239 mask |= op;
4240 unwind.opcode_count -= 2;
4241 }
4242 }
4243 }
4244 }
4245
4246 hi_reg = 15;
4247 /* We want to generate opcodes in the order the registers have been
4248 saved, ie. descending order. */
4249 for (reg = 15; reg >= -1; reg--)
4250 {
4251 /* Save registers in blocks. */
4252 if (reg < 0
4253 || !(mask & (1 << reg)))
4254 {
4255 /* We found an unsaved reg. Generate opcodes to save the
4256 preceding block. */
4257 if (reg != hi_reg)
4258 {
4259 if (reg == 9)
4260 {
4261 /* Short form. */
4262 op = 0xc0 | (hi_reg - 10);
4263 add_unwind_opcode (op, 1);
4264 }
4265 else
4266 {
4267 /* Long form. */
4268 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4269 add_unwind_opcode (op, 2);
4270 }
4271 }
4272 hi_reg = reg - 1;
4273 }
4274 }
4275
4276 return;
4277 error:
4278 ignore_rest_of_line ();
4279 }
4280
4281 static void
4282 s_arm_unwind_save_mmxwcg (void)
4283 {
4284 int reg;
4285 int hi_reg;
4286 unsigned mask = 0;
4287 valueT op;
4288
4289 if (*input_line_pointer == '{')
4290 input_line_pointer++;
4291
4292 skip_whitespace (input_line_pointer);
4293
4294 do
4295 {
4296 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4297
4298 if (reg == FAIL)
4299 {
4300 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4301 goto error;
4302 }
4303
4304 reg -= 8;
4305 if (mask >> reg)
4306 as_tsktsk (_("register list not in ascending order"));
4307 mask |= 1 << reg;
4308
4309 if (*input_line_pointer == '-')
4310 {
4311 input_line_pointer++;
4312 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4313 if (hi_reg == FAIL)
4314 {
4315 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4316 goto error;
4317 }
4318 else if (reg >= hi_reg)
4319 {
4320 as_bad (_("bad register range"));
4321 goto error;
4322 }
4323 for (; reg < hi_reg; reg++)
4324 mask |= 1 << reg;
4325 }
4326 }
4327 while (skip_past_comma (&input_line_pointer) != FAIL);
4328
4329 skip_past_char (&input_line_pointer, '}');
4330
4331 demand_empty_rest_of_line ();
4332
4333 /* Generate any deferred opcodes because we're going to be looking at
4334 the list. */
4335 flush_pending_unwind ();
4336
4337 for (reg = 0; reg < 16; reg++)
4338 {
4339 if (mask & (1 << reg))
4340 unwind.frame_size += 4;
4341 }
4342 op = 0xc700 | mask;
4343 add_unwind_opcode (op, 2);
4344 return;
4345 error:
4346 ignore_rest_of_line ();
4347 }
4348
4349
4350 /* Parse an unwind_save directive.
4351 If the argument is non-zero, this is a .vsave directive. */
4352
4353 static void
4354 s_arm_unwind_save (int arch_v6)
4355 {
4356 char *peek;
4357 struct reg_entry *reg;
4358 bfd_boolean had_brace = FALSE;
4359
4360 if (!unwind.proc_start)
4361 as_bad (MISSING_FNSTART);
4362
4363 /* Figure out what sort of save we have. */
4364 peek = input_line_pointer;
4365
4366 if (*peek == '{')
4367 {
4368 had_brace = TRUE;
4369 peek++;
4370 }
4371
4372 reg = arm_reg_parse_multi (&peek);
4373
4374 if (!reg)
4375 {
4376 as_bad (_("register expected"));
4377 ignore_rest_of_line ();
4378 return;
4379 }
4380
4381 switch (reg->type)
4382 {
4383 case REG_TYPE_FN:
4384 if (had_brace)
4385 {
4386 as_bad (_("FPA .unwind_save does not take a register list"));
4387 ignore_rest_of_line ();
4388 return;
4389 }
4390 input_line_pointer = peek;
4391 s_arm_unwind_save_fpa (reg->number);
4392 return;
4393
4394 case REG_TYPE_RN:
4395 s_arm_unwind_save_core ();
4396 return;
4397
4398 case REG_TYPE_VFD:
4399 if (arch_v6)
4400 s_arm_unwind_save_vfp_armv6 ();
4401 else
4402 s_arm_unwind_save_vfp ();
4403 return;
4404
4405 case REG_TYPE_MMXWR:
4406 s_arm_unwind_save_mmxwr ();
4407 return;
4408
4409 case REG_TYPE_MMXWCG:
4410 s_arm_unwind_save_mmxwcg ();
4411 return;
4412
4413 default:
4414 as_bad (_(".unwind_save does not support this kind of register"));
4415 ignore_rest_of_line ();
4416 }
4417 }
4418
4419
4420 /* Parse an unwind_movsp directive. */
4421
4422 static void
4423 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4424 {
4425 int reg;
4426 valueT op;
4427 int offset;
4428
4429 if (!unwind.proc_start)
4430 as_bad (MISSING_FNSTART);
4431
4432 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4433 if (reg == FAIL)
4434 {
4435 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4436 ignore_rest_of_line ();
4437 return;
4438 }
4439
4440 /* Optional constant. */
4441 if (skip_past_comma (&input_line_pointer) != FAIL)
4442 {
4443 if (immediate_for_directive (&offset) == FAIL)
4444 return;
4445 }
4446 else
4447 offset = 0;
4448
4449 demand_empty_rest_of_line ();
4450
4451 if (reg == REG_SP || reg == REG_PC)
4452 {
4453 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4454 return;
4455 }
4456
4457 if (unwind.fp_reg != REG_SP)
4458 as_bad (_("unexpected .unwind_movsp directive"));
4459
4460 /* Generate opcode to restore the value. */
4461 op = 0x90 | reg;
4462 add_unwind_opcode (op, 1);
4463
4464 /* Record the information for later. */
4465 unwind.fp_reg = reg;
4466 unwind.fp_offset = unwind.frame_size - offset;
4467 unwind.sp_restored = 1;
4468 }
4469
4470 /* Parse an unwind_pad directive. */
4471
4472 static void
4473 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4474 {
4475 int offset;
4476
4477 if (!unwind.proc_start)
4478 as_bad (MISSING_FNSTART);
4479
4480 if (immediate_for_directive (&offset) == FAIL)
4481 return;
4482
4483 if (offset & 3)
4484 {
4485 as_bad (_("stack increment must be multiple of 4"));
4486 ignore_rest_of_line ();
4487 return;
4488 }
4489
4490 /* Don't generate any opcodes, just record the details for later. */
4491 unwind.frame_size += offset;
4492 unwind.pending_offset += offset;
4493
4494 demand_empty_rest_of_line ();
4495 }
4496
4497 /* Parse an unwind_setfp directive. */
4498
4499 static void
4500 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4501 {
4502 int sp_reg;
4503 int fp_reg;
4504 int offset;
4505
4506 if (!unwind.proc_start)
4507 as_bad (MISSING_FNSTART);
4508
4509 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4510 if (skip_past_comma (&input_line_pointer) == FAIL)
4511 sp_reg = FAIL;
4512 else
4513 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4514
4515 if (fp_reg == FAIL || sp_reg == FAIL)
4516 {
4517 as_bad (_("expected <reg>, <reg>"));
4518 ignore_rest_of_line ();
4519 return;
4520 }
4521
4522 /* Optional constant. */
4523 if (skip_past_comma (&input_line_pointer) != FAIL)
4524 {
4525 if (immediate_for_directive (&offset) == FAIL)
4526 return;
4527 }
4528 else
4529 offset = 0;
4530
4531 demand_empty_rest_of_line ();
4532
4533 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4534 {
4535 as_bad (_("register must be either sp or set by a previous"
4536 "unwind_movsp directive"));
4537 return;
4538 }
4539
4540 /* Don't generate any opcodes, just record the information for later. */
4541 unwind.fp_reg = fp_reg;
4542 unwind.fp_used = 1;
4543 if (sp_reg == REG_SP)
4544 unwind.fp_offset = unwind.frame_size - offset;
4545 else
4546 unwind.fp_offset -= offset;
4547 }
4548
4549 /* Parse an unwind_raw directive. */
4550
4551 static void
4552 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4553 {
4554 expressionS exp;
4555 /* This is an arbitrary limit. */
4556 unsigned char op[16];
4557 int count;
4558
4559 if (!unwind.proc_start)
4560 as_bad (MISSING_FNSTART);
4561
4562 expression (&exp);
4563 if (exp.X_op == O_constant
4564 && skip_past_comma (&input_line_pointer) != FAIL)
4565 {
4566 unwind.frame_size += exp.X_add_number;
4567 expression (&exp);
4568 }
4569 else
4570 exp.X_op = O_illegal;
4571
4572 if (exp.X_op != O_constant)
4573 {
4574 as_bad (_("expected <offset>, <opcode>"));
4575 ignore_rest_of_line ();
4576 return;
4577 }
4578
4579 count = 0;
4580
4581 /* Parse the opcode. */
4582 for (;;)
4583 {
4584 if (count >= 16)
4585 {
4586 as_bad (_("unwind opcode too long"));
4587 ignore_rest_of_line ();
4588 }
4589 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4590 {
4591 as_bad (_("invalid unwind opcode"));
4592 ignore_rest_of_line ();
4593 return;
4594 }
4595 op[count++] = exp.X_add_number;
4596
4597 /* Parse the next byte. */
4598 if (skip_past_comma (&input_line_pointer) == FAIL)
4599 break;
4600
4601 expression (&exp);
4602 }
4603
4604 /* Add the opcode bytes in reverse order. */
4605 while (count--)
4606 add_unwind_opcode (op[count], 1);
4607
4608 demand_empty_rest_of_line ();
4609 }
4610
4611
4612 /* Parse a .eabi_attribute directive. */
4613
4614 static void
4615 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4616 {
4617 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4618
4619 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4620 attributes_set_explicitly[tag] = 1;
4621 }
4622
4623 /* Emit a tls fix for the symbol. */
4624
4625 static void
4626 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4627 {
4628 char *p;
4629 expressionS exp;
4630 #ifdef md_flush_pending_output
4631 md_flush_pending_output ();
4632 #endif
4633
4634 #ifdef md_cons_align
4635 md_cons_align (4);
4636 #endif
4637
4638 /* Since we're just labelling the code, there's no need to define a
4639 mapping symbol. */
4640 expression (&exp);
4641 p = obstack_next_free (&frchain_now->frch_obstack);
4642 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4643 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4644 : BFD_RELOC_ARM_TLS_DESCSEQ);
4645 }
4646 #endif /* OBJ_ELF */
4647
4648 static void s_arm_arch (int);
4649 static void s_arm_object_arch (int);
4650 static void s_arm_cpu (int);
4651 static void s_arm_fpu (int);
4652 static void s_arm_arch_extension (int);
4653
4654 #ifdef TE_PE
4655
4656 static void
4657 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4658 {
4659 expressionS exp;
4660
4661 do
4662 {
4663 expression (&exp);
4664 if (exp.X_op == O_symbol)
4665 exp.X_op = O_secrel;
4666
4667 emit_expr (&exp, 4);
4668 }
4669 while (*input_line_pointer++ == ',');
4670
4671 input_line_pointer--;
4672 demand_empty_rest_of_line ();
4673 }
4674 #endif /* TE_PE */
4675
4676 /* This table describes all the machine specific pseudo-ops the assembler
4677 has to support. The fields are:
4678 pseudo-op name without dot
4679 function to call to execute this pseudo-op
4680 Integer arg to pass to the function. */
4681
4682 const pseudo_typeS md_pseudo_table[] =
4683 {
4684 /* Never called because '.req' does not start a line. */
4685 { "req", s_req, 0 },
4686 /* Following two are likewise never called. */
4687 { "dn", s_dn, 0 },
4688 { "qn", s_qn, 0 },
4689 { "unreq", s_unreq, 0 },
4690 { "bss", s_bss, 0 },
4691 { "align", s_align, 0 },
4692 { "arm", s_arm, 0 },
4693 { "thumb", s_thumb, 0 },
4694 { "code", s_code, 0 },
4695 { "force_thumb", s_force_thumb, 0 },
4696 { "thumb_func", s_thumb_func, 0 },
4697 { "thumb_set", s_thumb_set, 0 },
4698 { "even", s_even, 0 },
4699 { "ltorg", s_ltorg, 0 },
4700 { "pool", s_ltorg, 0 },
4701 { "syntax", s_syntax, 0 },
4702 { "cpu", s_arm_cpu, 0 },
4703 { "arch", s_arm_arch, 0 },
4704 { "object_arch", s_arm_object_arch, 0 },
4705 { "fpu", s_arm_fpu, 0 },
4706 { "arch_extension", s_arm_arch_extension, 0 },
4707 #ifdef OBJ_ELF
4708 { "word", s_arm_elf_cons, 4 },
4709 { "long", s_arm_elf_cons, 4 },
4710 { "inst.n", s_arm_elf_inst, 2 },
4711 { "inst.w", s_arm_elf_inst, 4 },
4712 { "inst", s_arm_elf_inst, 0 },
4713 { "rel31", s_arm_rel31, 0 },
4714 { "fnstart", s_arm_unwind_fnstart, 0 },
4715 { "fnend", s_arm_unwind_fnend, 0 },
4716 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4717 { "personality", s_arm_unwind_personality, 0 },
4718 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4719 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4720 { "save", s_arm_unwind_save, 0 },
4721 { "vsave", s_arm_unwind_save, 1 },
4722 { "movsp", s_arm_unwind_movsp, 0 },
4723 { "pad", s_arm_unwind_pad, 0 },
4724 { "setfp", s_arm_unwind_setfp, 0 },
4725 { "unwind_raw", s_arm_unwind_raw, 0 },
4726 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4727 { "tlsdescseq", s_arm_tls_descseq, 0 },
4728 #else
4729 { "word", cons, 4},
4730
4731 /* These are used for dwarf. */
4732 {"2byte", cons, 2},
4733 {"4byte", cons, 4},
4734 {"8byte", cons, 8},
4735 /* These are used for dwarf2. */
4736 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4737 { "loc", dwarf2_directive_loc, 0 },
4738 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4739 #endif
4740 { "extend", float_cons, 'x' },
4741 { "ldouble", float_cons, 'x' },
4742 { "packed", float_cons, 'p' },
4743 #ifdef TE_PE
4744 {"secrel32", pe_directive_secrel, 0},
4745 #endif
4746
4747 /* These are for compatibility with CodeComposer Studio. */
4748 {"ref", s_ccs_ref, 0},
4749 {"def", s_ccs_def, 0},
4750 {"asmfunc", s_ccs_asmfunc, 0},
4751 {"endasmfunc", s_ccs_endasmfunc, 0},
4752
4753 { 0, 0, 0 }
4754 };
4755 \f
4756 /* Parser functions used exclusively in instruction operands. */
4757
4758 /* Generic immediate-value read function for use in insn parsing.
4759 STR points to the beginning of the immediate (the leading #);
4760 VAL receives the value; if the value is outside [MIN, MAX]
4761 issue an error. PREFIX_OPT is true if the immediate prefix is
4762 optional. */
4763
4764 static int
4765 parse_immediate (char **str, int *val, int min, int max,
4766 bfd_boolean prefix_opt)
4767 {
4768 expressionS exp;
4769 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4770 if (exp.X_op != O_constant)
4771 {
4772 inst.error = _("constant expression required");
4773 return FAIL;
4774 }
4775
4776 if (exp.X_add_number < min || exp.X_add_number > max)
4777 {
4778 inst.error = _("immediate value out of range");
4779 return FAIL;
4780 }
4781
4782 *val = exp.X_add_number;
4783 return SUCCESS;
4784 }
4785
4786 /* Less-generic immediate-value read function with the possibility of loading a
4787 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4788 instructions. Puts the result directly in inst.operands[i]. */
4789
4790 static int
4791 parse_big_immediate (char **str, int i, expressionS *in_exp,
4792 bfd_boolean allow_symbol_p)
4793 {
4794 expressionS exp;
4795 expressionS *exp_p = in_exp ? in_exp : &exp;
4796 char *ptr = *str;
4797
4798 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4799
4800 if (exp_p->X_op == O_constant)
4801 {
4802 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4803 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4804 O_constant. We have to be careful not to break compilation for
4805 32-bit X_add_number, though. */
4806 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4807 {
4808 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4809 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4810 & 0xffffffff);
4811 inst.operands[i].regisimm = 1;
4812 }
4813 }
4814 else if (exp_p->X_op == O_big
4815 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4816 {
4817 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4818
4819 /* Bignums have their least significant bits in
4820 generic_bignum[0]. Make sure we put 32 bits in imm and
4821 32 bits in reg, in a (hopefully) portable way. */
4822 gas_assert (parts != 0);
4823
4824 /* Make sure that the number is not too big.
4825 PR 11972: Bignums can now be sign-extended to the
4826 size of a .octa so check that the out of range bits
4827 are all zero or all one. */
4828 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4829 {
4830 LITTLENUM_TYPE m = -1;
4831
4832 if (generic_bignum[parts * 2] != 0
4833 && generic_bignum[parts * 2] != m)
4834 return FAIL;
4835
4836 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4837 if (generic_bignum[j] != generic_bignum[j-1])
4838 return FAIL;
4839 }
4840
4841 inst.operands[i].imm = 0;
4842 for (j = 0; j < parts; j++, idx++)
4843 inst.operands[i].imm |= generic_bignum[idx]
4844 << (LITTLENUM_NUMBER_OF_BITS * j);
4845 inst.operands[i].reg = 0;
4846 for (j = 0; j < parts; j++, idx++)
4847 inst.operands[i].reg |= generic_bignum[idx]
4848 << (LITTLENUM_NUMBER_OF_BITS * j);
4849 inst.operands[i].regisimm = 1;
4850 }
4851 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4852 return FAIL;
4853
4854 *str = ptr;
4855
4856 return SUCCESS;
4857 }
4858
4859 /* Returns the pseudo-register number of an FPA immediate constant,
4860 or FAIL if there isn't a valid constant here. */
4861
4862 static int
4863 parse_fpa_immediate (char ** str)
4864 {
4865 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4866 char * save_in;
4867 expressionS exp;
4868 int i;
4869 int j;
4870
4871 /* First try and match exact strings, this is to guarantee
4872 that some formats will work even for cross assembly. */
4873
4874 for (i = 0; fp_const[i]; i++)
4875 {
4876 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4877 {
4878 char *start = *str;
4879
4880 *str += strlen (fp_const[i]);
4881 if (is_end_of_line[(unsigned char) **str])
4882 return i + 8;
4883 *str = start;
4884 }
4885 }
4886
4887 /* Just because we didn't get a match doesn't mean that the constant
4888 isn't valid, just that it is in a format that we don't
4889 automatically recognize. Try parsing it with the standard
4890 expression routines. */
4891
4892 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4893
4894 /* Look for a raw floating point number. */
4895 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4896 && is_end_of_line[(unsigned char) *save_in])
4897 {
4898 for (i = 0; i < NUM_FLOAT_VALS; i++)
4899 {
4900 for (j = 0; j < MAX_LITTLENUMS; j++)
4901 {
4902 if (words[j] != fp_values[i][j])
4903 break;
4904 }
4905
4906 if (j == MAX_LITTLENUMS)
4907 {
4908 *str = save_in;
4909 return i + 8;
4910 }
4911 }
4912 }
4913
4914 /* Try and parse a more complex expression, this will probably fail
4915 unless the code uses a floating point prefix (eg "0f"). */
4916 save_in = input_line_pointer;
4917 input_line_pointer = *str;
4918 if (expression (&exp) == absolute_section
4919 && exp.X_op == O_big
4920 && exp.X_add_number < 0)
4921 {
4922 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4923 Ditto for 15. */
4924 if (gen_to_words (words, 5, (long) 15) == 0)
4925 {
4926 for (i = 0; i < NUM_FLOAT_VALS; i++)
4927 {
4928 for (j = 0; j < MAX_LITTLENUMS; j++)
4929 {
4930 if (words[j] != fp_values[i][j])
4931 break;
4932 }
4933
4934 if (j == MAX_LITTLENUMS)
4935 {
4936 *str = input_line_pointer;
4937 input_line_pointer = save_in;
4938 return i + 8;
4939 }
4940 }
4941 }
4942 }
4943
4944 *str = input_line_pointer;
4945 input_line_pointer = save_in;
4946 inst.error = _("invalid FPA immediate expression");
4947 return FAIL;
4948 }
4949
4950 /* Returns 1 if a number has "quarter-precision" float format
4951 0baBbbbbbc defgh000 00000000 00000000. */
4952
4953 static int
4954 is_quarter_float (unsigned imm)
4955 {
4956 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4957 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4958 }
4959
4960
4961 /* Detect the presence of a floating point or integer zero constant,
4962 i.e. #0.0 or #0. */
4963
4964 static bfd_boolean
4965 parse_ifimm_zero (char **in)
4966 {
4967 int error_code;
4968
4969 if (!is_immediate_prefix (**in))
4970 return FALSE;
4971
4972 ++*in;
4973
4974 /* Accept #0x0 as a synonym for #0. */
4975 if (strncmp (*in, "0x", 2) == 0)
4976 {
4977 int val;
4978 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4979 return FALSE;
4980 return TRUE;
4981 }
4982
4983 error_code = atof_generic (in, ".", EXP_CHARS,
4984 &generic_floating_point_number);
4985
4986 if (!error_code
4987 && generic_floating_point_number.sign == '+'
4988 && (generic_floating_point_number.low
4989 > generic_floating_point_number.leader))
4990 return TRUE;
4991
4992 return FALSE;
4993 }
4994
4995 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4996 0baBbbbbbc defgh000 00000000 00000000.
4997 The zero and minus-zero cases need special handling, since they can't be
4998 encoded in the "quarter-precision" float format, but can nonetheless be
4999 loaded as integer constants. */
5000
5001 static unsigned
5002 parse_qfloat_immediate (char **ccp, int *immed)
5003 {
5004 char *str = *ccp;
5005 char *fpnum;
5006 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5007 int found_fpchar = 0;
5008
5009 skip_past_char (&str, '#');
5010
5011 /* We must not accidentally parse an integer as a floating-point number. Make
5012 sure that the value we parse is not an integer by checking for special
5013 characters '.' or 'e'.
5014 FIXME: This is a horrible hack, but doing better is tricky because type
5015 information isn't in a very usable state at parse time. */
5016 fpnum = str;
5017 skip_whitespace (fpnum);
5018
5019 if (strncmp (fpnum, "0x", 2) == 0)
5020 return FAIL;
5021 else
5022 {
5023 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5024 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5025 {
5026 found_fpchar = 1;
5027 break;
5028 }
5029
5030 if (!found_fpchar)
5031 return FAIL;
5032 }
5033
5034 if ((str = atof_ieee (str, 's', words)) != NULL)
5035 {
5036 unsigned fpword = 0;
5037 int i;
5038
5039 /* Our FP word must be 32 bits (single-precision FP). */
5040 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5041 {
5042 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5043 fpword |= words[i];
5044 }
5045
5046 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5047 *immed = fpword;
5048 else
5049 return FAIL;
5050
5051 *ccp = str;
5052
5053 return SUCCESS;
5054 }
5055
5056 return FAIL;
5057 }
5058
5059 /* Shift operands. */
5060 enum shift_kind
5061 {
5062 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5063 };
5064
5065 struct asm_shift_name
5066 {
5067 const char *name;
5068 enum shift_kind kind;
5069 };
5070
5071 /* Third argument to parse_shift. */
5072 enum parse_shift_mode
5073 {
5074 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5075 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5076 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5077 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5078 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5079 };
5080
5081 /* Parse a <shift> specifier on an ARM data processing instruction.
5082 This has three forms:
5083
5084 (LSL|LSR|ASL|ASR|ROR) Rs
5085 (LSL|LSR|ASL|ASR|ROR) #imm
5086 RRX
5087
5088 Note that ASL is assimilated to LSL in the instruction encoding, and
5089 RRX to ROR #0 (which cannot be written as such). */
5090
5091 static int
5092 parse_shift (char **str, int i, enum parse_shift_mode mode)
5093 {
5094 const struct asm_shift_name *shift_name;
5095 enum shift_kind shift;
5096 char *s = *str;
5097 char *p = s;
5098 int reg;
5099
5100 for (p = *str; ISALPHA (*p); p++)
5101 ;
5102
5103 if (p == *str)
5104 {
5105 inst.error = _("shift expression expected");
5106 return FAIL;
5107 }
5108
5109 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5110 p - *str);
5111
5112 if (shift_name == NULL)
5113 {
5114 inst.error = _("shift expression expected");
5115 return FAIL;
5116 }
5117
5118 shift = shift_name->kind;
5119
5120 switch (mode)
5121 {
5122 case NO_SHIFT_RESTRICT:
5123 case SHIFT_IMMEDIATE: break;
5124
5125 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5126 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5127 {
5128 inst.error = _("'LSL' or 'ASR' required");
5129 return FAIL;
5130 }
5131 break;
5132
5133 case SHIFT_LSL_IMMEDIATE:
5134 if (shift != SHIFT_LSL)
5135 {
5136 inst.error = _("'LSL' required");
5137 return FAIL;
5138 }
5139 break;
5140
5141 case SHIFT_ASR_IMMEDIATE:
5142 if (shift != SHIFT_ASR)
5143 {
5144 inst.error = _("'ASR' required");
5145 return FAIL;
5146 }
5147 break;
5148
5149 default: abort ();
5150 }
5151
5152 if (shift != SHIFT_RRX)
5153 {
5154 /* Whitespace can appear here if the next thing is a bare digit. */
5155 skip_whitespace (p);
5156
5157 if (mode == NO_SHIFT_RESTRICT
5158 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5159 {
5160 inst.operands[i].imm = reg;
5161 inst.operands[i].immisreg = 1;
5162 }
5163 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5164 return FAIL;
5165 }
5166 inst.operands[i].shift_kind = shift;
5167 inst.operands[i].shifted = 1;
5168 *str = p;
5169 return SUCCESS;
5170 }
5171
5172 /* Parse a <shifter_operand> for an ARM data processing instruction:
5173
5174 #<immediate>
5175 #<immediate>, <rotate>
5176 <Rm>
5177 <Rm>, <shift>
5178
5179 where <shift> is defined by parse_shift above, and <rotate> is a
5180 multiple of 2 between 0 and 30. Validation of immediate operands
5181 is deferred to md_apply_fix. */
5182
5183 static int
5184 parse_shifter_operand (char **str, int i)
5185 {
5186 int value;
5187 expressionS exp;
5188
5189 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5190 {
5191 inst.operands[i].reg = value;
5192 inst.operands[i].isreg = 1;
5193
5194 /* parse_shift will override this if appropriate */
5195 inst.reloc.exp.X_op = O_constant;
5196 inst.reloc.exp.X_add_number = 0;
5197
5198 if (skip_past_comma (str) == FAIL)
5199 return SUCCESS;
5200
5201 /* Shift operation on register. */
5202 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5203 }
5204
5205 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5206 return FAIL;
5207
5208 if (skip_past_comma (str) == SUCCESS)
5209 {
5210 /* #x, y -- ie explicit rotation by Y. */
5211 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5212 return FAIL;
5213
5214 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5215 {
5216 inst.error = _("constant expression expected");
5217 return FAIL;
5218 }
5219
5220 value = exp.X_add_number;
5221 if (value < 0 || value > 30 || value % 2 != 0)
5222 {
5223 inst.error = _("invalid rotation");
5224 return FAIL;
5225 }
5226 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5227 {
5228 inst.error = _("invalid constant");
5229 return FAIL;
5230 }
5231
5232 /* Encode as specified. */
5233 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5234 return SUCCESS;
5235 }
5236
5237 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5238 inst.reloc.pc_rel = 0;
5239 return SUCCESS;
5240 }
5241
5242 /* Group relocation information. Each entry in the table contains the
5243 textual name of the relocation as may appear in assembler source
5244 and must end with a colon.
5245 Along with this textual name are the relocation codes to be used if
5246 the corresponding instruction is an ALU instruction (ADD or SUB only),
5247 an LDR, an LDRS, or an LDC. */
5248
5249 struct group_reloc_table_entry
5250 {
5251 const char *name;
5252 int alu_code;
5253 int ldr_code;
5254 int ldrs_code;
5255 int ldc_code;
5256 };
5257
5258 typedef enum
5259 {
5260 /* Varieties of non-ALU group relocation. */
5261
5262 GROUP_LDR,
5263 GROUP_LDRS,
5264 GROUP_LDC
5265 } group_reloc_type;
5266
5267 static struct group_reloc_table_entry group_reloc_table[] =
5268 { /* Program counter relative: */
5269 { "pc_g0_nc",
5270 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5271 0, /* LDR */
5272 0, /* LDRS */
5273 0 }, /* LDC */
5274 { "pc_g0",
5275 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5276 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5277 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5278 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5279 { "pc_g1_nc",
5280 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5281 0, /* LDR */
5282 0, /* LDRS */
5283 0 }, /* LDC */
5284 { "pc_g1",
5285 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5286 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5287 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5288 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5289 { "pc_g2",
5290 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5291 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5292 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5293 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5294 /* Section base relative */
5295 { "sb_g0_nc",
5296 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5297 0, /* LDR */
5298 0, /* LDRS */
5299 0 }, /* LDC */
5300 { "sb_g0",
5301 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5302 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5303 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5304 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5305 { "sb_g1_nc",
5306 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5307 0, /* LDR */
5308 0, /* LDRS */
5309 0 }, /* LDC */
5310 { "sb_g1",
5311 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5312 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5313 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5314 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5315 { "sb_g2",
5316 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5317 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5318 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5319 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5320
5321 /* Given the address of a pointer pointing to the textual name of a group
5322 relocation as may appear in assembler source, attempt to find its details
5323 in group_reloc_table. The pointer will be updated to the character after
5324 the trailing colon. On failure, FAIL will be returned; SUCCESS
5325 otherwise. On success, *entry will be updated to point at the relevant
5326 group_reloc_table entry. */
5327
5328 static int
5329 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5330 {
5331 unsigned int i;
5332 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5333 {
5334 int length = strlen (group_reloc_table[i].name);
5335
5336 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5337 && (*str)[length] == ':')
5338 {
5339 *out = &group_reloc_table[i];
5340 *str += (length + 1);
5341 return SUCCESS;
5342 }
5343 }
5344
5345 return FAIL;
5346 }
5347
5348 /* Parse a <shifter_operand> for an ARM data processing instruction
5349 (as for parse_shifter_operand) where group relocations are allowed:
5350
5351 #<immediate>
5352 #<immediate>, <rotate>
5353 #:<group_reloc>:<expression>
5354 <Rm>
5355 <Rm>, <shift>
5356
5357 where <group_reloc> is one of the strings defined in group_reloc_table.
5358 The hashes are optional.
5359
5360 Everything else is as for parse_shifter_operand. */
5361
5362 static parse_operand_result
5363 parse_shifter_operand_group_reloc (char **str, int i)
5364 {
5365 /* Determine if we have the sequence of characters #: or just :
5366 coming next. If we do, then we check for a group relocation.
5367 If we don't, punt the whole lot to parse_shifter_operand. */
5368
5369 if (((*str)[0] == '#' && (*str)[1] == ':')
5370 || (*str)[0] == ':')
5371 {
5372 struct group_reloc_table_entry *entry;
5373
5374 if ((*str)[0] == '#')
5375 (*str) += 2;
5376 else
5377 (*str)++;
5378
5379 /* Try to parse a group relocation. Anything else is an error. */
5380 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5381 {
5382 inst.error = _("unknown group relocation");
5383 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5384 }
5385
5386 /* We now have the group relocation table entry corresponding to
5387 the name in the assembler source. Next, we parse the expression. */
5388 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5389 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5390
5391 /* Record the relocation type (always the ALU variant here). */
5392 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5393 gas_assert (inst.reloc.type != 0);
5394
5395 return PARSE_OPERAND_SUCCESS;
5396 }
5397 else
5398 return parse_shifter_operand (str, i) == SUCCESS
5399 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5400
5401 /* Never reached. */
5402 }
5403
5404 /* Parse a Neon alignment expression. Information is written to
5405 inst.operands[i]. We assume the initial ':' has been skipped.
5406
5407 align .imm = align << 8, .immisalign=1, .preind=0 */
5408 static parse_operand_result
5409 parse_neon_alignment (char **str, int i)
5410 {
5411 char *p = *str;
5412 expressionS exp;
5413
5414 my_get_expression (&exp, &p, GE_NO_PREFIX);
5415
5416 if (exp.X_op != O_constant)
5417 {
5418 inst.error = _("alignment must be constant");
5419 return PARSE_OPERAND_FAIL;
5420 }
5421
5422 inst.operands[i].imm = exp.X_add_number << 8;
5423 inst.operands[i].immisalign = 1;
5424 /* Alignments are not pre-indexes. */
5425 inst.operands[i].preind = 0;
5426
5427 *str = p;
5428 return PARSE_OPERAND_SUCCESS;
5429 }
5430
5431 /* Parse all forms of an ARM address expression. Information is written
5432 to inst.operands[i] and/or inst.reloc.
5433
5434 Preindexed addressing (.preind=1):
5435
5436 [Rn, #offset] .reg=Rn .reloc.exp=offset
5437 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5438 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5439 .shift_kind=shift .reloc.exp=shift_imm
5440
5441 These three may have a trailing ! which causes .writeback to be set also.
5442
5443 Postindexed addressing (.postind=1, .writeback=1):
5444
5445 [Rn], #offset .reg=Rn .reloc.exp=offset
5446 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5447 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5448 .shift_kind=shift .reloc.exp=shift_imm
5449
5450 Unindexed addressing (.preind=0, .postind=0):
5451
5452 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5453
5454 Other:
5455
5456 [Rn]{!} shorthand for [Rn,#0]{!}
5457 =immediate .isreg=0 .reloc.exp=immediate
5458 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5459
5460 It is the caller's responsibility to check for addressing modes not
5461 supported by the instruction, and to set inst.reloc.type. */
5462
5463 static parse_operand_result
5464 parse_address_main (char **str, int i, int group_relocations,
5465 group_reloc_type group_type)
5466 {
5467 char *p = *str;
5468 int reg;
5469
5470 if (skip_past_char (&p, '[') == FAIL)
5471 {
5472 if (skip_past_char (&p, '=') == FAIL)
5473 {
5474 /* Bare address - translate to PC-relative offset. */
5475 inst.reloc.pc_rel = 1;
5476 inst.operands[i].reg = REG_PC;
5477 inst.operands[i].isreg = 1;
5478 inst.operands[i].preind = 1;
5479
5480 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5481 return PARSE_OPERAND_FAIL;
5482 }
5483 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5484 /*allow_symbol_p=*/TRUE))
5485 return PARSE_OPERAND_FAIL;
5486
5487 *str = p;
5488 return PARSE_OPERAND_SUCCESS;
5489 }
5490
5491 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5492 skip_whitespace (p);
5493
5494 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5495 {
5496 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5497 return PARSE_OPERAND_FAIL;
5498 }
5499 inst.operands[i].reg = reg;
5500 inst.operands[i].isreg = 1;
5501
5502 if (skip_past_comma (&p) == SUCCESS)
5503 {
5504 inst.operands[i].preind = 1;
5505
5506 if (*p == '+') p++;
5507 else if (*p == '-') p++, inst.operands[i].negative = 1;
5508
5509 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5510 {
5511 inst.operands[i].imm = reg;
5512 inst.operands[i].immisreg = 1;
5513
5514 if (skip_past_comma (&p) == SUCCESS)
5515 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5516 return PARSE_OPERAND_FAIL;
5517 }
5518 else if (skip_past_char (&p, ':') == SUCCESS)
5519 {
5520 /* FIXME: '@' should be used here, but it's filtered out by generic
5521 code before we get to see it here. This may be subject to
5522 change. */
5523 parse_operand_result result = parse_neon_alignment (&p, i);
5524
5525 if (result != PARSE_OPERAND_SUCCESS)
5526 return result;
5527 }
5528 else
5529 {
5530 if (inst.operands[i].negative)
5531 {
5532 inst.operands[i].negative = 0;
5533 p--;
5534 }
5535
5536 if (group_relocations
5537 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5538 {
5539 struct group_reloc_table_entry *entry;
5540
5541 /* Skip over the #: or : sequence. */
5542 if (*p == '#')
5543 p += 2;
5544 else
5545 p++;
5546
5547 /* Try to parse a group relocation. Anything else is an
5548 error. */
5549 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5550 {
5551 inst.error = _("unknown group relocation");
5552 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5553 }
5554
5555 /* We now have the group relocation table entry corresponding to
5556 the name in the assembler source. Next, we parse the
5557 expression. */
5558 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5559 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5560
5561 /* Record the relocation type. */
5562 switch (group_type)
5563 {
5564 case GROUP_LDR:
5565 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5566 break;
5567
5568 case GROUP_LDRS:
5569 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5570 break;
5571
5572 case GROUP_LDC:
5573 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5574 break;
5575
5576 default:
5577 gas_assert (0);
5578 }
5579
5580 if (inst.reloc.type == 0)
5581 {
5582 inst.error = _("this group relocation is not allowed on this instruction");
5583 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5584 }
5585 }
5586 else
5587 {
5588 char *q = p;
5589 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5590 return PARSE_OPERAND_FAIL;
5591 /* If the offset is 0, find out if it's a +0 or -0. */
5592 if (inst.reloc.exp.X_op == O_constant
5593 && inst.reloc.exp.X_add_number == 0)
5594 {
5595 skip_whitespace (q);
5596 if (*q == '#')
5597 {
5598 q++;
5599 skip_whitespace (q);
5600 }
5601 if (*q == '-')
5602 inst.operands[i].negative = 1;
5603 }
5604 }
5605 }
5606 }
5607 else if (skip_past_char (&p, ':') == SUCCESS)
5608 {
5609 /* FIXME: '@' should be used here, but it's filtered out by generic code
5610 before we get to see it here. This may be subject to change. */
5611 parse_operand_result result = parse_neon_alignment (&p, i);
5612
5613 if (result != PARSE_OPERAND_SUCCESS)
5614 return result;
5615 }
5616
5617 if (skip_past_char (&p, ']') == FAIL)
5618 {
5619 inst.error = _("']' expected");
5620 return PARSE_OPERAND_FAIL;
5621 }
5622
5623 if (skip_past_char (&p, '!') == SUCCESS)
5624 inst.operands[i].writeback = 1;
5625
5626 else if (skip_past_comma (&p) == SUCCESS)
5627 {
5628 if (skip_past_char (&p, '{') == SUCCESS)
5629 {
5630 /* [Rn], {expr} - unindexed, with option */
5631 if (parse_immediate (&p, &inst.operands[i].imm,
5632 0, 255, TRUE) == FAIL)
5633 return PARSE_OPERAND_FAIL;
5634
5635 if (skip_past_char (&p, '}') == FAIL)
5636 {
5637 inst.error = _("'}' expected at end of 'option' field");
5638 return PARSE_OPERAND_FAIL;
5639 }
5640 if (inst.operands[i].preind)
5641 {
5642 inst.error = _("cannot combine index with option");
5643 return PARSE_OPERAND_FAIL;
5644 }
5645 *str = p;
5646 return PARSE_OPERAND_SUCCESS;
5647 }
5648 else
5649 {
5650 inst.operands[i].postind = 1;
5651 inst.operands[i].writeback = 1;
5652
5653 if (inst.operands[i].preind)
5654 {
5655 inst.error = _("cannot combine pre- and post-indexing");
5656 return PARSE_OPERAND_FAIL;
5657 }
5658
5659 if (*p == '+') p++;
5660 else if (*p == '-') p++, inst.operands[i].negative = 1;
5661
5662 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5663 {
5664 /* We might be using the immediate for alignment already. If we
5665 are, OR the register number into the low-order bits. */
5666 if (inst.operands[i].immisalign)
5667 inst.operands[i].imm |= reg;
5668 else
5669 inst.operands[i].imm = reg;
5670 inst.operands[i].immisreg = 1;
5671
5672 if (skip_past_comma (&p) == SUCCESS)
5673 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5674 return PARSE_OPERAND_FAIL;
5675 }
5676 else
5677 {
5678 char *q = p;
5679 if (inst.operands[i].negative)
5680 {
5681 inst.operands[i].negative = 0;
5682 p--;
5683 }
5684 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5685 return PARSE_OPERAND_FAIL;
5686 /* If the offset is 0, find out if it's a +0 or -0. */
5687 if (inst.reloc.exp.X_op == O_constant
5688 && inst.reloc.exp.X_add_number == 0)
5689 {
5690 skip_whitespace (q);
5691 if (*q == '#')
5692 {
5693 q++;
5694 skip_whitespace (q);
5695 }
5696 if (*q == '-')
5697 inst.operands[i].negative = 1;
5698 }
5699 }
5700 }
5701 }
5702
5703 /* If at this point neither .preind nor .postind is set, we have a
5704 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5705 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5706 {
5707 inst.operands[i].preind = 1;
5708 inst.reloc.exp.X_op = O_constant;
5709 inst.reloc.exp.X_add_number = 0;
5710 }
5711 *str = p;
5712 return PARSE_OPERAND_SUCCESS;
5713 }
5714
5715 static int
5716 parse_address (char **str, int i)
5717 {
5718 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5719 ? SUCCESS : FAIL;
5720 }
5721
5722 static parse_operand_result
5723 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5724 {
5725 return parse_address_main (str, i, 1, type);
5726 }
5727
5728 /* Parse an operand for a MOVW or MOVT instruction. */
5729 static int
5730 parse_half (char **str)
5731 {
5732 char * p;
5733
5734 p = *str;
5735 skip_past_char (&p, '#');
5736 if (strncasecmp (p, ":lower16:", 9) == 0)
5737 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5738 else if (strncasecmp (p, ":upper16:", 9) == 0)
5739 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5740
5741 if (inst.reloc.type != BFD_RELOC_UNUSED)
5742 {
5743 p += 9;
5744 skip_whitespace (p);
5745 }
5746
5747 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5748 return FAIL;
5749
5750 if (inst.reloc.type == BFD_RELOC_UNUSED)
5751 {
5752 if (inst.reloc.exp.X_op != O_constant)
5753 {
5754 inst.error = _("constant expression expected");
5755 return FAIL;
5756 }
5757 if (inst.reloc.exp.X_add_number < 0
5758 || inst.reloc.exp.X_add_number > 0xffff)
5759 {
5760 inst.error = _("immediate value out of range");
5761 return FAIL;
5762 }
5763 }
5764 *str = p;
5765 return SUCCESS;
5766 }
5767
5768 /* Miscellaneous. */
5769
5770 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5771 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5772 static int
5773 parse_psr (char **str, bfd_boolean lhs)
5774 {
5775 char *p;
5776 unsigned long psr_field;
5777 const struct asm_psr *psr;
5778 char *start;
5779 bfd_boolean is_apsr = FALSE;
5780 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5781
5782 /* PR gas/12698: If the user has specified -march=all then m_profile will
5783 be TRUE, but we want to ignore it in this case as we are building for any
5784 CPU type, including non-m variants. */
5785 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5786 m_profile = FALSE;
5787
5788 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5789 feature for ease of use and backwards compatibility. */
5790 p = *str;
5791 if (strncasecmp (p, "SPSR", 4) == 0)
5792 {
5793 if (m_profile)
5794 goto unsupported_psr;
5795
5796 psr_field = SPSR_BIT;
5797 }
5798 else if (strncasecmp (p, "CPSR", 4) == 0)
5799 {
5800 if (m_profile)
5801 goto unsupported_psr;
5802
5803 psr_field = 0;
5804 }
5805 else if (strncasecmp (p, "APSR", 4) == 0)
5806 {
5807 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5808 and ARMv7-R architecture CPUs. */
5809 is_apsr = TRUE;
5810 psr_field = 0;
5811 }
5812 else if (m_profile)
5813 {
5814 start = p;
5815 do
5816 p++;
5817 while (ISALNUM (*p) || *p == '_');
5818
5819 if (strncasecmp (start, "iapsr", 5) == 0
5820 || strncasecmp (start, "eapsr", 5) == 0
5821 || strncasecmp (start, "xpsr", 4) == 0
5822 || strncasecmp (start, "psr", 3) == 0)
5823 p = start + strcspn (start, "rR") + 1;
5824
5825 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5826 p - start);
5827
5828 if (!psr)
5829 return FAIL;
5830
5831 /* If APSR is being written, a bitfield may be specified. Note that
5832 APSR itself is handled above. */
5833 if (psr->field <= 3)
5834 {
5835 psr_field = psr->field;
5836 is_apsr = TRUE;
5837 goto check_suffix;
5838 }
5839
5840 *str = p;
5841 /* M-profile MSR instructions have the mask field set to "10", except
5842 *PSR variants which modify APSR, which may use a different mask (and
5843 have been handled already). Do that by setting the PSR_f field
5844 here. */
5845 return psr->field | (lhs ? PSR_f : 0);
5846 }
5847 else
5848 goto unsupported_psr;
5849
5850 p += 4;
5851 check_suffix:
5852 if (*p == '_')
5853 {
5854 /* A suffix follows. */
5855 p++;
5856 start = p;
5857
5858 do
5859 p++;
5860 while (ISALNUM (*p) || *p == '_');
5861
5862 if (is_apsr)
5863 {
5864 /* APSR uses a notation for bits, rather than fields. */
5865 unsigned int nzcvq_bits = 0;
5866 unsigned int g_bit = 0;
5867 char *bit;
5868
5869 for (bit = start; bit != p; bit++)
5870 {
5871 switch (TOLOWER (*bit))
5872 {
5873 case 'n':
5874 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5875 break;
5876
5877 case 'z':
5878 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5879 break;
5880
5881 case 'c':
5882 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5883 break;
5884
5885 case 'v':
5886 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5887 break;
5888
5889 case 'q':
5890 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5891 break;
5892
5893 case 'g':
5894 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5895 break;
5896
5897 default:
5898 inst.error = _("unexpected bit specified after APSR");
5899 return FAIL;
5900 }
5901 }
5902
5903 if (nzcvq_bits == 0x1f)
5904 psr_field |= PSR_f;
5905
5906 if (g_bit == 0x1)
5907 {
5908 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5909 {
5910 inst.error = _("selected processor does not "
5911 "support DSP extension");
5912 return FAIL;
5913 }
5914
5915 psr_field |= PSR_s;
5916 }
5917
5918 if ((nzcvq_bits & 0x20) != 0
5919 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5920 || (g_bit & 0x2) != 0)
5921 {
5922 inst.error = _("bad bitmask specified after APSR");
5923 return FAIL;
5924 }
5925 }
5926 else
5927 {
5928 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5929 p - start);
5930 if (!psr)
5931 goto error;
5932
5933 psr_field |= psr->field;
5934 }
5935 }
5936 else
5937 {
5938 if (ISALNUM (*p))
5939 goto error; /* Garbage after "[CS]PSR". */
5940
5941 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5942 is deprecated, but allow it anyway. */
5943 if (is_apsr && lhs)
5944 {
5945 psr_field |= PSR_f;
5946 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5947 "deprecated"));
5948 }
5949 else if (!m_profile)
5950 /* These bits are never right for M-profile devices: don't set them
5951 (only code paths which read/write APSR reach here). */
5952 psr_field |= (PSR_c | PSR_f);
5953 }
5954 *str = p;
5955 return psr_field;
5956
5957 unsupported_psr:
5958 inst.error = _("selected processor does not support requested special "
5959 "purpose register");
5960 return FAIL;
5961
5962 error:
5963 inst.error = _("flag for {c}psr instruction expected");
5964 return FAIL;
5965 }
5966
5967 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5968 value suitable for splatting into the AIF field of the instruction. */
5969
5970 static int
5971 parse_cps_flags (char **str)
5972 {
5973 int val = 0;
5974 int saw_a_flag = 0;
5975 char *s = *str;
5976
5977 for (;;)
5978 switch (*s++)
5979 {
5980 case '\0': case ',':
5981 goto done;
5982
5983 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5984 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5985 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5986
5987 default:
5988 inst.error = _("unrecognized CPS flag");
5989 return FAIL;
5990 }
5991
5992 done:
5993 if (saw_a_flag == 0)
5994 {
5995 inst.error = _("missing CPS flags");
5996 return FAIL;
5997 }
5998
5999 *str = s - 1;
6000 return val;
6001 }
6002
6003 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6004 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6005
6006 static int
6007 parse_endian_specifier (char **str)
6008 {
6009 int little_endian;
6010 char *s = *str;
6011
6012 if (strncasecmp (s, "BE", 2))
6013 little_endian = 0;
6014 else if (strncasecmp (s, "LE", 2))
6015 little_endian = 1;
6016 else
6017 {
6018 inst.error = _("valid endian specifiers are be or le");
6019 return FAIL;
6020 }
6021
6022 if (ISALNUM (s[2]) || s[2] == '_')
6023 {
6024 inst.error = _("valid endian specifiers are be or le");
6025 return FAIL;
6026 }
6027
6028 *str = s + 2;
6029 return little_endian;
6030 }
6031
6032 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6033 value suitable for poking into the rotate field of an sxt or sxta
6034 instruction, or FAIL on error. */
6035
6036 static int
6037 parse_ror (char **str)
6038 {
6039 int rot;
6040 char *s = *str;
6041
6042 if (strncasecmp (s, "ROR", 3) == 0)
6043 s += 3;
6044 else
6045 {
6046 inst.error = _("missing rotation field after comma");
6047 return FAIL;
6048 }
6049
6050 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6051 return FAIL;
6052
6053 switch (rot)
6054 {
6055 case 0: *str = s; return 0x0;
6056 case 8: *str = s; return 0x1;
6057 case 16: *str = s; return 0x2;
6058 case 24: *str = s; return 0x3;
6059
6060 default:
6061 inst.error = _("rotation can only be 0, 8, 16, or 24");
6062 return FAIL;
6063 }
6064 }
6065
6066 /* Parse a conditional code (from conds[] below). The value returned is in the
6067 range 0 .. 14, or FAIL. */
6068 static int
6069 parse_cond (char **str)
6070 {
6071 char *q;
6072 const struct asm_cond *c;
6073 int n;
6074 /* Condition codes are always 2 characters, so matching up to
6075 3 characters is sufficient. */
6076 char cond[3];
6077
6078 q = *str;
6079 n = 0;
6080 while (ISALPHA (*q) && n < 3)
6081 {
6082 cond[n] = TOLOWER (*q);
6083 q++;
6084 n++;
6085 }
6086
6087 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6088 if (!c)
6089 {
6090 inst.error = _("condition required");
6091 return FAIL;
6092 }
6093
6094 *str = q;
6095 return c->value;
6096 }
6097
6098 /* If the given feature available in the selected CPU, mark it as used.
6099 Returns TRUE iff feature is available. */
6100 static bfd_boolean
6101 mark_feature_used (const arm_feature_set *feature)
6102 {
6103 /* Ensure the option is valid on the current architecture. */
6104 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6105 return FALSE;
6106
6107 /* Add the appropriate architecture feature for the barrier option used.
6108 */
6109 if (thumb_mode)
6110 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6111 else
6112 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6113
6114 return TRUE;
6115 }
6116
6117 /* Parse an option for a barrier instruction. Returns the encoding for the
6118 option, or FAIL. */
6119 static int
6120 parse_barrier (char **str)
6121 {
6122 char *p, *q;
6123 const struct asm_barrier_opt *o;
6124
6125 p = q = *str;
6126 while (ISALPHA (*q))
6127 q++;
6128
6129 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6130 q - p);
6131 if (!o)
6132 return FAIL;
6133
6134 if (!mark_feature_used (&o->arch))
6135 return FAIL;
6136
6137 *str = q;
6138 return o->value;
6139 }
6140
6141 /* Parse the operands of a table branch instruction. Similar to a memory
6142 operand. */
6143 static int
6144 parse_tb (char **str)
6145 {
6146 char * p = *str;
6147 int reg;
6148
6149 if (skip_past_char (&p, '[') == FAIL)
6150 {
6151 inst.error = _("'[' expected");
6152 return FAIL;
6153 }
6154
6155 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6156 {
6157 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6158 return FAIL;
6159 }
6160 inst.operands[0].reg = reg;
6161
6162 if (skip_past_comma (&p) == FAIL)
6163 {
6164 inst.error = _("',' expected");
6165 return FAIL;
6166 }
6167
6168 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6169 {
6170 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6171 return FAIL;
6172 }
6173 inst.operands[0].imm = reg;
6174
6175 if (skip_past_comma (&p) == SUCCESS)
6176 {
6177 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6178 return FAIL;
6179 if (inst.reloc.exp.X_add_number != 1)
6180 {
6181 inst.error = _("invalid shift");
6182 return FAIL;
6183 }
6184 inst.operands[0].shifted = 1;
6185 }
6186
6187 if (skip_past_char (&p, ']') == FAIL)
6188 {
6189 inst.error = _("']' expected");
6190 return FAIL;
6191 }
6192 *str = p;
6193 return SUCCESS;
6194 }
6195
6196 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6197 information on the types the operands can take and how they are encoded.
6198 Up to four operands may be read; this function handles setting the
6199 ".present" field for each read operand itself.
6200 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6201 else returns FAIL. */
6202
6203 static int
6204 parse_neon_mov (char **str, int *which_operand)
6205 {
6206 int i = *which_operand, val;
6207 enum arm_reg_type rtype;
6208 char *ptr = *str;
6209 struct neon_type_el optype;
6210
6211 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6212 {
6213 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6214 inst.operands[i].reg = val;
6215 inst.operands[i].isscalar = 1;
6216 inst.operands[i].vectype = optype;
6217 inst.operands[i++].present = 1;
6218
6219 if (skip_past_comma (&ptr) == FAIL)
6220 goto wanted_comma;
6221
6222 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6223 goto wanted_arm;
6224
6225 inst.operands[i].reg = val;
6226 inst.operands[i].isreg = 1;
6227 inst.operands[i].present = 1;
6228 }
6229 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6230 != FAIL)
6231 {
6232 /* Cases 0, 1, 2, 3, 5 (D only). */
6233 if (skip_past_comma (&ptr) == FAIL)
6234 goto wanted_comma;
6235
6236 inst.operands[i].reg = val;
6237 inst.operands[i].isreg = 1;
6238 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6239 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6240 inst.operands[i].isvec = 1;
6241 inst.operands[i].vectype = optype;
6242 inst.operands[i++].present = 1;
6243
6244 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6245 {
6246 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6247 Case 13: VMOV <Sd>, <Rm> */
6248 inst.operands[i].reg = val;
6249 inst.operands[i].isreg = 1;
6250 inst.operands[i].present = 1;
6251
6252 if (rtype == REG_TYPE_NQ)
6253 {
6254 first_error (_("can't use Neon quad register here"));
6255 return FAIL;
6256 }
6257 else if (rtype != REG_TYPE_VFS)
6258 {
6259 i++;
6260 if (skip_past_comma (&ptr) == FAIL)
6261 goto wanted_comma;
6262 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6263 goto wanted_arm;
6264 inst.operands[i].reg = val;
6265 inst.operands[i].isreg = 1;
6266 inst.operands[i].present = 1;
6267 }
6268 }
6269 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6270 &optype)) != FAIL)
6271 {
6272 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6273 Case 1: VMOV<c><q> <Dd>, <Dm>
6274 Case 8: VMOV.F32 <Sd>, <Sm>
6275 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6276
6277 inst.operands[i].reg = val;
6278 inst.operands[i].isreg = 1;
6279 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6280 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6281 inst.operands[i].isvec = 1;
6282 inst.operands[i].vectype = optype;
6283 inst.operands[i].present = 1;
6284
6285 if (skip_past_comma (&ptr) == SUCCESS)
6286 {
6287 /* Case 15. */
6288 i++;
6289
6290 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6291 goto wanted_arm;
6292
6293 inst.operands[i].reg = val;
6294 inst.operands[i].isreg = 1;
6295 inst.operands[i++].present = 1;
6296
6297 if (skip_past_comma (&ptr) == FAIL)
6298 goto wanted_comma;
6299
6300 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6301 goto wanted_arm;
6302
6303 inst.operands[i].reg = val;
6304 inst.operands[i].isreg = 1;
6305 inst.operands[i].present = 1;
6306 }
6307 }
6308 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6309 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6310 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6311 Case 10: VMOV.F32 <Sd>, #<imm>
6312 Case 11: VMOV.F64 <Dd>, #<imm> */
6313 inst.operands[i].immisfloat = 1;
6314 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6315 == SUCCESS)
6316 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6317 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6318 ;
6319 else
6320 {
6321 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6322 return FAIL;
6323 }
6324 }
6325 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6326 {
6327 /* Cases 6, 7. */
6328 inst.operands[i].reg = val;
6329 inst.operands[i].isreg = 1;
6330 inst.operands[i++].present = 1;
6331
6332 if (skip_past_comma (&ptr) == FAIL)
6333 goto wanted_comma;
6334
6335 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6336 {
6337 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6338 inst.operands[i].reg = val;
6339 inst.operands[i].isscalar = 1;
6340 inst.operands[i].present = 1;
6341 inst.operands[i].vectype = optype;
6342 }
6343 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6344 {
6345 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6346 inst.operands[i].reg = val;
6347 inst.operands[i].isreg = 1;
6348 inst.operands[i++].present = 1;
6349
6350 if (skip_past_comma (&ptr) == FAIL)
6351 goto wanted_comma;
6352
6353 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6354 == FAIL)
6355 {
6356 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6357 return FAIL;
6358 }
6359
6360 inst.operands[i].reg = val;
6361 inst.operands[i].isreg = 1;
6362 inst.operands[i].isvec = 1;
6363 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6364 inst.operands[i].vectype = optype;
6365 inst.operands[i].present = 1;
6366
6367 if (rtype == REG_TYPE_VFS)
6368 {
6369 /* Case 14. */
6370 i++;
6371 if (skip_past_comma (&ptr) == FAIL)
6372 goto wanted_comma;
6373 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6374 &optype)) == FAIL)
6375 {
6376 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6377 return FAIL;
6378 }
6379 inst.operands[i].reg = val;
6380 inst.operands[i].isreg = 1;
6381 inst.operands[i].isvec = 1;
6382 inst.operands[i].issingle = 1;
6383 inst.operands[i].vectype = optype;
6384 inst.operands[i].present = 1;
6385 }
6386 }
6387 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6388 != FAIL)
6389 {
6390 /* Case 13. */
6391 inst.operands[i].reg = val;
6392 inst.operands[i].isreg = 1;
6393 inst.operands[i].isvec = 1;
6394 inst.operands[i].issingle = 1;
6395 inst.operands[i].vectype = optype;
6396 inst.operands[i].present = 1;
6397 }
6398 }
6399 else
6400 {
6401 first_error (_("parse error"));
6402 return FAIL;
6403 }
6404
6405 /* Successfully parsed the operands. Update args. */
6406 *which_operand = i;
6407 *str = ptr;
6408 return SUCCESS;
6409
6410 wanted_comma:
6411 first_error (_("expected comma"));
6412 return FAIL;
6413
6414 wanted_arm:
6415 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6416 return FAIL;
6417 }
6418
6419 /* Use this macro when the operand constraints are different
6420 for ARM and THUMB (e.g. ldrd). */
6421 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6422 ((arm_operand) | ((thumb_operand) << 16))
6423
6424 /* Matcher codes for parse_operands. */
6425 enum operand_parse_code
6426 {
6427 OP_stop, /* end of line */
6428
6429 OP_RR, /* ARM register */
6430 OP_RRnpc, /* ARM register, not r15 */
6431 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6432 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6433 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6434 optional trailing ! */
6435 OP_RRw, /* ARM register, not r15, optional trailing ! */
6436 OP_RCP, /* Coprocessor number */
6437 OP_RCN, /* Coprocessor register */
6438 OP_RF, /* FPA register */
6439 OP_RVS, /* VFP single precision register */
6440 OP_RVD, /* VFP double precision register (0..15) */
6441 OP_RND, /* Neon double precision register (0..31) */
6442 OP_RNQ, /* Neon quad precision register */
6443 OP_RVSD, /* VFP single or double precision register */
6444 OP_RNDQ, /* Neon double or quad precision register */
6445 OP_RNSDQ, /* Neon single, double or quad precision register */
6446 OP_RNSC, /* Neon scalar D[X] */
6447 OP_RVC, /* VFP control register */
6448 OP_RMF, /* Maverick F register */
6449 OP_RMD, /* Maverick D register */
6450 OP_RMFX, /* Maverick FX register */
6451 OP_RMDX, /* Maverick DX register */
6452 OP_RMAX, /* Maverick AX register */
6453 OP_RMDS, /* Maverick DSPSC register */
6454 OP_RIWR, /* iWMMXt wR register */
6455 OP_RIWC, /* iWMMXt wC register */
6456 OP_RIWG, /* iWMMXt wCG register */
6457 OP_RXA, /* XScale accumulator register */
6458
6459 OP_REGLST, /* ARM register list */
6460 OP_VRSLST, /* VFP single-precision register list */
6461 OP_VRDLST, /* VFP double-precision register list */
6462 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6463 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6464 OP_NSTRLST, /* Neon element/structure list */
6465
6466 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6467 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6468 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6469 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6470 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6471 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6472 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6473 OP_VMOV, /* Neon VMOV operands. */
6474 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6475 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6476 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6477
6478 OP_I0, /* immediate zero */
6479 OP_I7, /* immediate value 0 .. 7 */
6480 OP_I15, /* 0 .. 15 */
6481 OP_I16, /* 1 .. 16 */
6482 OP_I16z, /* 0 .. 16 */
6483 OP_I31, /* 0 .. 31 */
6484 OP_I31w, /* 0 .. 31, optional trailing ! */
6485 OP_I32, /* 1 .. 32 */
6486 OP_I32z, /* 0 .. 32 */
6487 OP_I63, /* 0 .. 63 */
6488 OP_I63s, /* -64 .. 63 */
6489 OP_I64, /* 1 .. 64 */
6490 OP_I64z, /* 0 .. 64 */
6491 OP_I255, /* 0 .. 255 */
6492
6493 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6494 OP_I7b, /* 0 .. 7 */
6495 OP_I15b, /* 0 .. 15 */
6496 OP_I31b, /* 0 .. 31 */
6497
6498 OP_SH, /* shifter operand */
6499 OP_SHG, /* shifter operand with possible group relocation */
6500 OP_ADDR, /* Memory address expression (any mode) */
6501 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6502 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6503 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6504 OP_EXP, /* arbitrary expression */
6505 OP_EXPi, /* same, with optional immediate prefix */
6506 OP_EXPr, /* same, with optional relocation suffix */
6507 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6508
6509 OP_CPSF, /* CPS flags */
6510 OP_ENDI, /* Endianness specifier */
6511 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6512 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6513 OP_COND, /* conditional code */
6514 OP_TB, /* Table branch. */
6515
6516 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6517
6518 OP_RRnpc_I0, /* ARM register or literal 0 */
6519 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6520 OP_RR_EXi, /* ARM register or expression with imm prefix */
6521 OP_RF_IF, /* FPA register or immediate */
6522 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6523 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6524
6525 /* Optional operands. */
6526 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6527 OP_oI31b, /* 0 .. 31 */
6528 OP_oI32b, /* 1 .. 32 */
6529 OP_oI32z, /* 0 .. 32 */
6530 OP_oIffffb, /* 0 .. 65535 */
6531 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6532
6533 OP_oRR, /* ARM register */
6534 OP_oRRnpc, /* ARM register, not the PC */
6535 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6536 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6537 OP_oRND, /* Optional Neon double precision register */
6538 OP_oRNQ, /* Optional Neon quad precision register */
6539 OP_oRNDQ, /* Optional Neon double or quad precision register */
6540 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6541 OP_oSHll, /* LSL immediate */
6542 OP_oSHar, /* ASR immediate */
6543 OP_oSHllar, /* LSL or ASR immediate */
6544 OP_oROR, /* ROR 0/8/16/24 */
6545 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6546
6547 /* Some pre-defined mixed (ARM/THUMB) operands. */
6548 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6549 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6550 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6551
6552 OP_FIRST_OPTIONAL = OP_oI7b
6553 };
6554
6555 /* Generic instruction operand parser. This does no encoding and no
6556 semantic validation; it merely squirrels values away in the inst
6557 structure. Returns SUCCESS or FAIL depending on whether the
6558 specified grammar matched. */
6559 static int
6560 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6561 {
6562 unsigned const int *upat = pattern;
6563 char *backtrack_pos = 0;
6564 const char *backtrack_error = 0;
6565 int i, val = 0, backtrack_index = 0;
6566 enum arm_reg_type rtype;
6567 parse_operand_result result;
6568 unsigned int op_parse_code;
6569
6570 #define po_char_or_fail(chr) \
6571 do \
6572 { \
6573 if (skip_past_char (&str, chr) == FAIL) \
6574 goto bad_args; \
6575 } \
6576 while (0)
6577
6578 #define po_reg_or_fail(regtype) \
6579 do \
6580 { \
6581 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6582 & inst.operands[i].vectype); \
6583 if (val == FAIL) \
6584 { \
6585 first_error (_(reg_expected_msgs[regtype])); \
6586 goto failure; \
6587 } \
6588 inst.operands[i].reg = val; \
6589 inst.operands[i].isreg = 1; \
6590 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6591 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6592 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6593 || rtype == REG_TYPE_VFD \
6594 || rtype == REG_TYPE_NQ); \
6595 } \
6596 while (0)
6597
6598 #define po_reg_or_goto(regtype, label) \
6599 do \
6600 { \
6601 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6602 & inst.operands[i].vectype); \
6603 if (val == FAIL) \
6604 goto label; \
6605 \
6606 inst.operands[i].reg = val; \
6607 inst.operands[i].isreg = 1; \
6608 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6609 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6610 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6611 || rtype == REG_TYPE_VFD \
6612 || rtype == REG_TYPE_NQ); \
6613 } \
6614 while (0)
6615
6616 #define po_imm_or_fail(min, max, popt) \
6617 do \
6618 { \
6619 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6620 goto failure; \
6621 inst.operands[i].imm = val; \
6622 } \
6623 while (0)
6624
6625 #define po_scalar_or_goto(elsz, label) \
6626 do \
6627 { \
6628 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6629 if (val == FAIL) \
6630 goto label; \
6631 inst.operands[i].reg = val; \
6632 inst.operands[i].isscalar = 1; \
6633 } \
6634 while (0)
6635
6636 #define po_misc_or_fail(expr) \
6637 do \
6638 { \
6639 if (expr) \
6640 goto failure; \
6641 } \
6642 while (0)
6643
6644 #define po_misc_or_fail_no_backtrack(expr) \
6645 do \
6646 { \
6647 result = expr; \
6648 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6649 backtrack_pos = 0; \
6650 if (result != PARSE_OPERAND_SUCCESS) \
6651 goto failure; \
6652 } \
6653 while (0)
6654
6655 #define po_barrier_or_imm(str) \
6656 do \
6657 { \
6658 val = parse_barrier (&str); \
6659 if (val == FAIL && ! ISALPHA (*str)) \
6660 goto immediate; \
6661 if (val == FAIL \
6662 /* ISB can only take SY as an option. */ \
6663 || ((inst.instruction & 0xf0) == 0x60 \
6664 && val != 0xf)) \
6665 { \
6666 inst.error = _("invalid barrier type"); \
6667 backtrack_pos = 0; \
6668 goto failure; \
6669 } \
6670 } \
6671 while (0)
6672
6673 skip_whitespace (str);
6674
6675 for (i = 0; upat[i] != OP_stop; i++)
6676 {
6677 op_parse_code = upat[i];
6678 if (op_parse_code >= 1<<16)
6679 op_parse_code = thumb ? (op_parse_code >> 16)
6680 : (op_parse_code & ((1<<16)-1));
6681
6682 if (op_parse_code >= OP_FIRST_OPTIONAL)
6683 {
6684 /* Remember where we are in case we need to backtrack. */
6685 gas_assert (!backtrack_pos);
6686 backtrack_pos = str;
6687 backtrack_error = inst.error;
6688 backtrack_index = i;
6689 }
6690
6691 if (i > 0 && (i > 1 || inst.operands[0].present))
6692 po_char_or_fail (',');
6693
6694 switch (op_parse_code)
6695 {
6696 /* Registers */
6697 case OP_oRRnpc:
6698 case OP_oRRnpcsp:
6699 case OP_RRnpc:
6700 case OP_RRnpcsp:
6701 case OP_oRR:
6702 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6703 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6704 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6705 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6706 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6707 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6708 case OP_oRND:
6709 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6710 case OP_RVC:
6711 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6712 break;
6713 /* Also accept generic coprocessor regs for unknown registers. */
6714 coproc_reg:
6715 po_reg_or_fail (REG_TYPE_CN);
6716 break;
6717 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6718 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6719 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6720 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6721 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6722 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6723 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6724 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6725 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6726 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6727 case OP_oRNQ:
6728 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6729 case OP_oRNDQ:
6730 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6731 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6732 case OP_oRNSDQ:
6733 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6734
6735 /* Neon scalar. Using an element size of 8 means that some invalid
6736 scalars are accepted here, so deal with those in later code. */
6737 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6738
6739 case OP_RNDQ_I0:
6740 {
6741 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6742 break;
6743 try_imm0:
6744 po_imm_or_fail (0, 0, TRUE);
6745 }
6746 break;
6747
6748 case OP_RVSD_I0:
6749 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6750 break;
6751
6752 case OP_RSVD_FI0:
6753 {
6754 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6755 break;
6756 try_ifimm0:
6757 if (parse_ifimm_zero (&str))
6758 inst.operands[i].imm = 0;
6759 else
6760 {
6761 inst.error
6762 = _("only floating point zero is allowed as immediate value");
6763 goto failure;
6764 }
6765 }
6766 break;
6767
6768 case OP_RR_RNSC:
6769 {
6770 po_scalar_or_goto (8, try_rr);
6771 break;
6772 try_rr:
6773 po_reg_or_fail (REG_TYPE_RN);
6774 }
6775 break;
6776
6777 case OP_RNSDQ_RNSC:
6778 {
6779 po_scalar_or_goto (8, try_nsdq);
6780 break;
6781 try_nsdq:
6782 po_reg_or_fail (REG_TYPE_NSDQ);
6783 }
6784 break;
6785
6786 case OP_RNDQ_RNSC:
6787 {
6788 po_scalar_or_goto (8, try_ndq);
6789 break;
6790 try_ndq:
6791 po_reg_or_fail (REG_TYPE_NDQ);
6792 }
6793 break;
6794
6795 case OP_RND_RNSC:
6796 {
6797 po_scalar_or_goto (8, try_vfd);
6798 break;
6799 try_vfd:
6800 po_reg_or_fail (REG_TYPE_VFD);
6801 }
6802 break;
6803
6804 case OP_VMOV:
6805 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6806 not careful then bad things might happen. */
6807 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6808 break;
6809
6810 case OP_RNDQ_Ibig:
6811 {
6812 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6813 break;
6814 try_immbig:
6815 /* There's a possibility of getting a 64-bit immediate here, so
6816 we need special handling. */
6817 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6818 == FAIL)
6819 {
6820 inst.error = _("immediate value is out of range");
6821 goto failure;
6822 }
6823 }
6824 break;
6825
6826 case OP_RNDQ_I63b:
6827 {
6828 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6829 break;
6830 try_shimm:
6831 po_imm_or_fail (0, 63, TRUE);
6832 }
6833 break;
6834
6835 case OP_RRnpcb:
6836 po_char_or_fail ('[');
6837 po_reg_or_fail (REG_TYPE_RN);
6838 po_char_or_fail (']');
6839 break;
6840
6841 case OP_RRnpctw:
6842 case OP_RRw:
6843 case OP_oRRw:
6844 po_reg_or_fail (REG_TYPE_RN);
6845 if (skip_past_char (&str, '!') == SUCCESS)
6846 inst.operands[i].writeback = 1;
6847 break;
6848
6849 /* Immediates */
6850 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6851 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6852 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6853 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6854 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6855 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6856 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6857 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6858 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6859 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6860 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6861 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6862
6863 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6864 case OP_oI7b:
6865 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6866 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6867 case OP_oI31b:
6868 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6869 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6870 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6871 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6872
6873 /* Immediate variants */
6874 case OP_oI255c:
6875 po_char_or_fail ('{');
6876 po_imm_or_fail (0, 255, TRUE);
6877 po_char_or_fail ('}');
6878 break;
6879
6880 case OP_I31w:
6881 /* The expression parser chokes on a trailing !, so we have
6882 to find it first and zap it. */
6883 {
6884 char *s = str;
6885 while (*s && *s != ',')
6886 s++;
6887 if (s[-1] == '!')
6888 {
6889 s[-1] = '\0';
6890 inst.operands[i].writeback = 1;
6891 }
6892 po_imm_or_fail (0, 31, TRUE);
6893 if (str == s - 1)
6894 str = s;
6895 }
6896 break;
6897
6898 /* Expressions */
6899 case OP_EXPi: EXPi:
6900 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6901 GE_OPT_PREFIX));
6902 break;
6903
6904 case OP_EXP:
6905 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6906 GE_NO_PREFIX));
6907 break;
6908
6909 case OP_EXPr: EXPr:
6910 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6911 GE_NO_PREFIX));
6912 if (inst.reloc.exp.X_op == O_symbol)
6913 {
6914 val = parse_reloc (&str);
6915 if (val == -1)
6916 {
6917 inst.error = _("unrecognized relocation suffix");
6918 goto failure;
6919 }
6920 else if (val != BFD_RELOC_UNUSED)
6921 {
6922 inst.operands[i].imm = val;
6923 inst.operands[i].hasreloc = 1;
6924 }
6925 }
6926 break;
6927
6928 /* Operand for MOVW or MOVT. */
6929 case OP_HALF:
6930 po_misc_or_fail (parse_half (&str));
6931 break;
6932
6933 /* Register or expression. */
6934 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6935 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6936
6937 /* Register or immediate. */
6938 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6939 I0: po_imm_or_fail (0, 0, FALSE); break;
6940
6941 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6942 IF:
6943 if (!is_immediate_prefix (*str))
6944 goto bad_args;
6945 str++;
6946 val = parse_fpa_immediate (&str);
6947 if (val == FAIL)
6948 goto failure;
6949 /* FPA immediates are encoded as registers 8-15.
6950 parse_fpa_immediate has already applied the offset. */
6951 inst.operands[i].reg = val;
6952 inst.operands[i].isreg = 1;
6953 break;
6954
6955 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6956 I32z: po_imm_or_fail (0, 32, FALSE); break;
6957
6958 /* Two kinds of register. */
6959 case OP_RIWR_RIWC:
6960 {
6961 struct reg_entry *rege = arm_reg_parse_multi (&str);
6962 if (!rege
6963 || (rege->type != REG_TYPE_MMXWR
6964 && rege->type != REG_TYPE_MMXWC
6965 && rege->type != REG_TYPE_MMXWCG))
6966 {
6967 inst.error = _("iWMMXt data or control register expected");
6968 goto failure;
6969 }
6970 inst.operands[i].reg = rege->number;
6971 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6972 }
6973 break;
6974
6975 case OP_RIWC_RIWG:
6976 {
6977 struct reg_entry *rege = arm_reg_parse_multi (&str);
6978 if (!rege
6979 || (rege->type != REG_TYPE_MMXWC
6980 && rege->type != REG_TYPE_MMXWCG))
6981 {
6982 inst.error = _("iWMMXt control register expected");
6983 goto failure;
6984 }
6985 inst.operands[i].reg = rege->number;
6986 inst.operands[i].isreg = 1;
6987 }
6988 break;
6989
6990 /* Misc */
6991 case OP_CPSF: val = parse_cps_flags (&str); break;
6992 case OP_ENDI: val = parse_endian_specifier (&str); break;
6993 case OP_oROR: val = parse_ror (&str); break;
6994 case OP_COND: val = parse_cond (&str); break;
6995 case OP_oBARRIER_I15:
6996 po_barrier_or_imm (str); break;
6997 immediate:
6998 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6999 goto failure;
7000 break;
7001
7002 case OP_wPSR:
7003 case OP_rPSR:
7004 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7005 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7006 {
7007 inst.error = _("Banked registers are not available with this "
7008 "architecture.");
7009 goto failure;
7010 }
7011 break;
7012 try_psr:
7013 val = parse_psr (&str, op_parse_code == OP_wPSR);
7014 break;
7015
7016 case OP_APSR_RR:
7017 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7018 break;
7019 try_apsr:
7020 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7021 instruction). */
7022 if (strncasecmp (str, "APSR_", 5) == 0)
7023 {
7024 unsigned found = 0;
7025 str += 5;
7026 while (found < 15)
7027 switch (*str++)
7028 {
7029 case 'c': found = (found & 1) ? 16 : found | 1; break;
7030 case 'n': found = (found & 2) ? 16 : found | 2; break;
7031 case 'z': found = (found & 4) ? 16 : found | 4; break;
7032 case 'v': found = (found & 8) ? 16 : found | 8; break;
7033 default: found = 16;
7034 }
7035 if (found != 15)
7036 goto failure;
7037 inst.operands[i].isvec = 1;
7038 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7039 inst.operands[i].reg = REG_PC;
7040 }
7041 else
7042 goto failure;
7043 break;
7044
7045 case OP_TB:
7046 po_misc_or_fail (parse_tb (&str));
7047 break;
7048
7049 /* Register lists. */
7050 case OP_REGLST:
7051 val = parse_reg_list (&str);
7052 if (*str == '^')
7053 {
7054 inst.operands[i].writeback = 1;
7055 str++;
7056 }
7057 break;
7058
7059 case OP_VRSLST:
7060 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7061 break;
7062
7063 case OP_VRDLST:
7064 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7065 break;
7066
7067 case OP_VRSDLST:
7068 /* Allow Q registers too. */
7069 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7070 REGLIST_NEON_D);
7071 if (val == FAIL)
7072 {
7073 inst.error = NULL;
7074 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7075 REGLIST_VFP_S);
7076 inst.operands[i].issingle = 1;
7077 }
7078 break;
7079
7080 case OP_NRDLST:
7081 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7082 REGLIST_NEON_D);
7083 break;
7084
7085 case OP_NSTRLST:
7086 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7087 &inst.operands[i].vectype);
7088 break;
7089
7090 /* Addressing modes */
7091 case OP_ADDR:
7092 po_misc_or_fail (parse_address (&str, i));
7093 break;
7094
7095 case OP_ADDRGLDR:
7096 po_misc_or_fail_no_backtrack (
7097 parse_address_group_reloc (&str, i, GROUP_LDR));
7098 break;
7099
7100 case OP_ADDRGLDRS:
7101 po_misc_or_fail_no_backtrack (
7102 parse_address_group_reloc (&str, i, GROUP_LDRS));
7103 break;
7104
7105 case OP_ADDRGLDC:
7106 po_misc_or_fail_no_backtrack (
7107 parse_address_group_reloc (&str, i, GROUP_LDC));
7108 break;
7109
7110 case OP_SH:
7111 po_misc_or_fail (parse_shifter_operand (&str, i));
7112 break;
7113
7114 case OP_SHG:
7115 po_misc_or_fail_no_backtrack (
7116 parse_shifter_operand_group_reloc (&str, i));
7117 break;
7118
7119 case OP_oSHll:
7120 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7121 break;
7122
7123 case OP_oSHar:
7124 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7125 break;
7126
7127 case OP_oSHllar:
7128 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7129 break;
7130
7131 default:
7132 as_fatal (_("unhandled operand code %d"), op_parse_code);
7133 }
7134
7135 /* Various value-based sanity checks and shared operations. We
7136 do not signal immediate failures for the register constraints;
7137 this allows a syntax error to take precedence. */
7138 switch (op_parse_code)
7139 {
7140 case OP_oRRnpc:
7141 case OP_RRnpc:
7142 case OP_RRnpcb:
7143 case OP_RRw:
7144 case OP_oRRw:
7145 case OP_RRnpc_I0:
7146 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7147 inst.error = BAD_PC;
7148 break;
7149
7150 case OP_oRRnpcsp:
7151 case OP_RRnpcsp:
7152 if (inst.operands[i].isreg)
7153 {
7154 if (inst.operands[i].reg == REG_PC)
7155 inst.error = BAD_PC;
7156 else if (inst.operands[i].reg == REG_SP)
7157 inst.error = BAD_SP;
7158 }
7159 break;
7160
7161 case OP_RRnpctw:
7162 if (inst.operands[i].isreg
7163 && inst.operands[i].reg == REG_PC
7164 && (inst.operands[i].writeback || thumb))
7165 inst.error = BAD_PC;
7166 break;
7167
7168 case OP_CPSF:
7169 case OP_ENDI:
7170 case OP_oROR:
7171 case OP_wPSR:
7172 case OP_rPSR:
7173 case OP_COND:
7174 case OP_oBARRIER_I15:
7175 case OP_REGLST:
7176 case OP_VRSLST:
7177 case OP_VRDLST:
7178 case OP_VRSDLST:
7179 case OP_NRDLST:
7180 case OP_NSTRLST:
7181 if (val == FAIL)
7182 goto failure;
7183 inst.operands[i].imm = val;
7184 break;
7185
7186 default:
7187 break;
7188 }
7189
7190 /* If we get here, this operand was successfully parsed. */
7191 inst.operands[i].present = 1;
7192 continue;
7193
7194 bad_args:
7195 inst.error = BAD_ARGS;
7196
7197 failure:
7198 if (!backtrack_pos)
7199 {
7200 /* The parse routine should already have set inst.error, but set a
7201 default here just in case. */
7202 if (!inst.error)
7203 inst.error = _("syntax error");
7204 return FAIL;
7205 }
7206
7207 /* Do not backtrack over a trailing optional argument that
7208 absorbed some text. We will only fail again, with the
7209 'garbage following instruction' error message, which is
7210 probably less helpful than the current one. */
7211 if (backtrack_index == i && backtrack_pos != str
7212 && upat[i+1] == OP_stop)
7213 {
7214 if (!inst.error)
7215 inst.error = _("syntax error");
7216 return FAIL;
7217 }
7218
7219 /* Try again, skipping the optional argument at backtrack_pos. */
7220 str = backtrack_pos;
7221 inst.error = backtrack_error;
7222 inst.operands[backtrack_index].present = 0;
7223 i = backtrack_index;
7224 backtrack_pos = 0;
7225 }
7226
7227 /* Check that we have parsed all the arguments. */
7228 if (*str != '\0' && !inst.error)
7229 inst.error = _("garbage following instruction");
7230
7231 return inst.error ? FAIL : SUCCESS;
7232 }
7233
7234 #undef po_char_or_fail
7235 #undef po_reg_or_fail
7236 #undef po_reg_or_goto
7237 #undef po_imm_or_fail
7238 #undef po_scalar_or_fail
7239 #undef po_barrier_or_imm
7240
7241 /* Shorthand macro for instruction encoding functions issuing errors. */
7242 #define constraint(expr, err) \
7243 do \
7244 { \
7245 if (expr) \
7246 { \
7247 inst.error = err; \
7248 return; \
7249 } \
7250 } \
7251 while (0)
7252
7253 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7254 instructions are unpredictable if these registers are used. This
7255 is the BadReg predicate in ARM's Thumb-2 documentation. */
7256 #define reject_bad_reg(reg) \
7257 do \
7258 if (reg == REG_SP || reg == REG_PC) \
7259 { \
7260 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7261 return; \
7262 } \
7263 while (0)
7264
7265 /* If REG is R13 (the stack pointer), warn that its use is
7266 deprecated. */
7267 #define warn_deprecated_sp(reg) \
7268 do \
7269 if (warn_on_deprecated && reg == REG_SP) \
7270 as_tsktsk (_("use of r13 is deprecated")); \
7271 while (0)
7272
7273 /* Functions for operand encoding. ARM, then Thumb. */
7274
7275 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7276
7277 /* If VAL can be encoded in the immediate field of an ARM instruction,
7278 return the encoded form. Otherwise, return FAIL. */
7279
7280 static unsigned int
7281 encode_arm_immediate (unsigned int val)
7282 {
7283 unsigned int a, i;
7284
7285 for (i = 0; i < 32; i += 2)
7286 if ((a = rotate_left (val, i)) <= 0xff)
7287 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7288
7289 return FAIL;
7290 }
7291
7292 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7293 return the encoded form. Otherwise, return FAIL. */
7294 static unsigned int
7295 encode_thumb32_immediate (unsigned int val)
7296 {
7297 unsigned int a, i;
7298
7299 if (val <= 0xff)
7300 return val;
7301
7302 for (i = 1; i <= 24; i++)
7303 {
7304 a = val >> i;
7305 if ((val & ~(0xff << i)) == 0)
7306 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7307 }
7308
7309 a = val & 0xff;
7310 if (val == ((a << 16) | a))
7311 return 0x100 | a;
7312 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7313 return 0x300 | a;
7314
7315 a = val & 0xff00;
7316 if (val == ((a << 16) | a))
7317 return 0x200 | (a >> 8);
7318
7319 return FAIL;
7320 }
7321 /* Encode a VFP SP or DP register number into inst.instruction. */
7322
7323 static void
7324 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7325 {
7326 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7327 && reg > 15)
7328 {
7329 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7330 {
7331 if (thumb_mode)
7332 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7333 fpu_vfp_ext_d32);
7334 else
7335 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7336 fpu_vfp_ext_d32);
7337 }
7338 else
7339 {
7340 first_error (_("D register out of range for selected VFP version"));
7341 return;
7342 }
7343 }
7344
7345 switch (pos)
7346 {
7347 case VFP_REG_Sd:
7348 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7349 break;
7350
7351 case VFP_REG_Sn:
7352 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7353 break;
7354
7355 case VFP_REG_Sm:
7356 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7357 break;
7358
7359 case VFP_REG_Dd:
7360 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7361 break;
7362
7363 case VFP_REG_Dn:
7364 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7365 break;
7366
7367 case VFP_REG_Dm:
7368 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7369 break;
7370
7371 default:
7372 abort ();
7373 }
7374 }
7375
7376 /* Encode a <shift> in an ARM-format instruction. The immediate,
7377 if any, is handled by md_apply_fix. */
7378 static void
7379 encode_arm_shift (int i)
7380 {
7381 if (inst.operands[i].shift_kind == SHIFT_RRX)
7382 inst.instruction |= SHIFT_ROR << 5;
7383 else
7384 {
7385 inst.instruction |= inst.operands[i].shift_kind << 5;
7386 if (inst.operands[i].immisreg)
7387 {
7388 inst.instruction |= SHIFT_BY_REG;
7389 inst.instruction |= inst.operands[i].imm << 8;
7390 }
7391 else
7392 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7393 }
7394 }
7395
7396 static void
7397 encode_arm_shifter_operand (int i)
7398 {
7399 if (inst.operands[i].isreg)
7400 {
7401 inst.instruction |= inst.operands[i].reg;
7402 encode_arm_shift (i);
7403 }
7404 else
7405 {
7406 inst.instruction |= INST_IMMEDIATE;
7407 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7408 inst.instruction |= inst.operands[i].imm;
7409 }
7410 }
7411
7412 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7413 static void
7414 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7415 {
7416 /* PR 14260:
7417 Generate an error if the operand is not a register. */
7418 constraint (!inst.operands[i].isreg,
7419 _("Instruction does not support =N addresses"));
7420
7421 inst.instruction |= inst.operands[i].reg << 16;
7422
7423 if (inst.operands[i].preind)
7424 {
7425 if (is_t)
7426 {
7427 inst.error = _("instruction does not accept preindexed addressing");
7428 return;
7429 }
7430 inst.instruction |= PRE_INDEX;
7431 if (inst.operands[i].writeback)
7432 inst.instruction |= WRITE_BACK;
7433
7434 }
7435 else if (inst.operands[i].postind)
7436 {
7437 gas_assert (inst.operands[i].writeback);
7438 if (is_t)
7439 inst.instruction |= WRITE_BACK;
7440 }
7441 else /* unindexed - only for coprocessor */
7442 {
7443 inst.error = _("instruction does not accept unindexed addressing");
7444 return;
7445 }
7446
7447 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7448 && (((inst.instruction & 0x000f0000) >> 16)
7449 == ((inst.instruction & 0x0000f000) >> 12)))
7450 as_warn ((inst.instruction & LOAD_BIT)
7451 ? _("destination register same as write-back base")
7452 : _("source register same as write-back base"));
7453 }
7454
7455 /* inst.operands[i] was set up by parse_address. Encode it into an
7456 ARM-format mode 2 load or store instruction. If is_t is true,
7457 reject forms that cannot be used with a T instruction (i.e. not
7458 post-indexed). */
7459 static void
7460 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7461 {
7462 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7463
7464 encode_arm_addr_mode_common (i, is_t);
7465
7466 if (inst.operands[i].immisreg)
7467 {
7468 constraint ((inst.operands[i].imm == REG_PC
7469 || (is_pc && inst.operands[i].writeback)),
7470 BAD_PC_ADDRESSING);
7471 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7472 inst.instruction |= inst.operands[i].imm;
7473 if (!inst.operands[i].negative)
7474 inst.instruction |= INDEX_UP;
7475 if (inst.operands[i].shifted)
7476 {
7477 if (inst.operands[i].shift_kind == SHIFT_RRX)
7478 inst.instruction |= SHIFT_ROR << 5;
7479 else
7480 {
7481 inst.instruction |= inst.operands[i].shift_kind << 5;
7482 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7483 }
7484 }
7485 }
7486 else /* immediate offset in inst.reloc */
7487 {
7488 if (is_pc && !inst.reloc.pc_rel)
7489 {
7490 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7491
7492 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7493 cannot use PC in addressing.
7494 PC cannot be used in writeback addressing, either. */
7495 constraint ((is_t || inst.operands[i].writeback),
7496 BAD_PC_ADDRESSING);
7497
7498 /* Use of PC in str is deprecated for ARMv7. */
7499 if (warn_on_deprecated
7500 && !is_load
7501 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7502 as_tsktsk (_("use of PC in this instruction is deprecated"));
7503 }
7504
7505 if (inst.reloc.type == BFD_RELOC_UNUSED)
7506 {
7507 /* Prefer + for zero encoded value. */
7508 if (!inst.operands[i].negative)
7509 inst.instruction |= INDEX_UP;
7510 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7511 }
7512 }
7513 }
7514
7515 /* inst.operands[i] was set up by parse_address. Encode it into an
7516 ARM-format mode 3 load or store instruction. Reject forms that
7517 cannot be used with such instructions. If is_t is true, reject
7518 forms that cannot be used with a T instruction (i.e. not
7519 post-indexed). */
7520 static void
7521 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7522 {
7523 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7524 {
7525 inst.error = _("instruction does not accept scaled register index");
7526 return;
7527 }
7528
7529 encode_arm_addr_mode_common (i, is_t);
7530
7531 if (inst.operands[i].immisreg)
7532 {
7533 constraint ((inst.operands[i].imm == REG_PC
7534 || (is_t && inst.operands[i].reg == REG_PC)),
7535 BAD_PC_ADDRESSING);
7536 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7537 BAD_PC_WRITEBACK);
7538 inst.instruction |= inst.operands[i].imm;
7539 if (!inst.operands[i].negative)
7540 inst.instruction |= INDEX_UP;
7541 }
7542 else /* immediate offset in inst.reloc */
7543 {
7544 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7545 && inst.operands[i].writeback),
7546 BAD_PC_WRITEBACK);
7547 inst.instruction |= HWOFFSET_IMM;
7548 if (inst.reloc.type == BFD_RELOC_UNUSED)
7549 {
7550 /* Prefer + for zero encoded value. */
7551 if (!inst.operands[i].negative)
7552 inst.instruction |= INDEX_UP;
7553
7554 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7555 }
7556 }
7557 }
7558
7559 /* Write immediate bits [7:0] to the following locations:
7560
7561 |28/24|23 19|18 16|15 4|3 0|
7562 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7563
7564 This function is used by VMOV/VMVN/VORR/VBIC. */
7565
7566 static void
7567 neon_write_immbits (unsigned immbits)
7568 {
7569 inst.instruction |= immbits & 0xf;
7570 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7571 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7572 }
7573
7574 /* Invert low-order SIZE bits of XHI:XLO. */
7575
7576 static void
7577 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7578 {
7579 unsigned immlo = xlo ? *xlo : 0;
7580 unsigned immhi = xhi ? *xhi : 0;
7581
7582 switch (size)
7583 {
7584 case 8:
7585 immlo = (~immlo) & 0xff;
7586 break;
7587
7588 case 16:
7589 immlo = (~immlo) & 0xffff;
7590 break;
7591
7592 case 64:
7593 immhi = (~immhi) & 0xffffffff;
7594 /* fall through. */
7595
7596 case 32:
7597 immlo = (~immlo) & 0xffffffff;
7598 break;
7599
7600 default:
7601 abort ();
7602 }
7603
7604 if (xlo)
7605 *xlo = immlo;
7606
7607 if (xhi)
7608 *xhi = immhi;
7609 }
7610
7611 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7612 A, B, C, D. */
7613
7614 static int
7615 neon_bits_same_in_bytes (unsigned imm)
7616 {
7617 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7618 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7619 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7620 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7621 }
7622
7623 /* For immediate of above form, return 0bABCD. */
7624
7625 static unsigned
7626 neon_squash_bits (unsigned imm)
7627 {
7628 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7629 | ((imm & 0x01000000) >> 21);
7630 }
7631
7632 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7633
7634 static unsigned
7635 neon_qfloat_bits (unsigned imm)
7636 {
7637 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7638 }
7639
7640 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7641 the instruction. *OP is passed as the initial value of the op field, and
7642 may be set to a different value depending on the constant (i.e.
7643 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7644 MVN). If the immediate looks like a repeated pattern then also
7645 try smaller element sizes. */
7646
7647 static int
7648 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7649 unsigned *immbits, int *op, int size,
7650 enum neon_el_type type)
7651 {
7652 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7653 float. */
7654 if (type == NT_float && !float_p)
7655 return FAIL;
7656
7657 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7658 {
7659 if (size != 32 || *op == 1)
7660 return FAIL;
7661 *immbits = neon_qfloat_bits (immlo);
7662 return 0xf;
7663 }
7664
7665 if (size == 64)
7666 {
7667 if (neon_bits_same_in_bytes (immhi)
7668 && neon_bits_same_in_bytes (immlo))
7669 {
7670 if (*op == 1)
7671 return FAIL;
7672 *immbits = (neon_squash_bits (immhi) << 4)
7673 | neon_squash_bits (immlo);
7674 *op = 1;
7675 return 0xe;
7676 }
7677
7678 if (immhi != immlo)
7679 return FAIL;
7680 }
7681
7682 if (size >= 32)
7683 {
7684 if (immlo == (immlo & 0x000000ff))
7685 {
7686 *immbits = immlo;
7687 return 0x0;
7688 }
7689 else if (immlo == (immlo & 0x0000ff00))
7690 {
7691 *immbits = immlo >> 8;
7692 return 0x2;
7693 }
7694 else if (immlo == (immlo & 0x00ff0000))
7695 {
7696 *immbits = immlo >> 16;
7697 return 0x4;
7698 }
7699 else if (immlo == (immlo & 0xff000000))
7700 {
7701 *immbits = immlo >> 24;
7702 return 0x6;
7703 }
7704 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7705 {
7706 *immbits = (immlo >> 8) & 0xff;
7707 return 0xc;
7708 }
7709 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7710 {
7711 *immbits = (immlo >> 16) & 0xff;
7712 return 0xd;
7713 }
7714
7715 if ((immlo & 0xffff) != (immlo >> 16))
7716 return FAIL;
7717 immlo &= 0xffff;
7718 }
7719
7720 if (size >= 16)
7721 {
7722 if (immlo == (immlo & 0x000000ff))
7723 {
7724 *immbits = immlo;
7725 return 0x8;
7726 }
7727 else if (immlo == (immlo & 0x0000ff00))
7728 {
7729 *immbits = immlo >> 8;
7730 return 0xa;
7731 }
7732
7733 if ((immlo & 0xff) != (immlo >> 8))
7734 return FAIL;
7735 immlo &= 0xff;
7736 }
7737
7738 if (immlo == (immlo & 0x000000ff))
7739 {
7740 /* Don't allow MVN with 8-bit immediate. */
7741 if (*op == 1)
7742 return FAIL;
7743 *immbits = immlo;
7744 return 0xe;
7745 }
7746
7747 return FAIL;
7748 }
7749
7750 enum lit_type
7751 {
7752 CONST_THUMB,
7753 CONST_ARM,
7754 CONST_VEC
7755 };
7756
7757 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7758 Determine whether it can be performed with a move instruction; if
7759 it can, convert inst.instruction to that move instruction and
7760 return TRUE; if it can't, convert inst.instruction to a literal-pool
7761 load and return FALSE. If this is not a valid thing to do in the
7762 current context, set inst.error and return TRUE.
7763
7764 inst.operands[i] describes the destination register. */
7765
7766 static bfd_boolean
7767 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7768 {
7769 unsigned long tbit;
7770 bfd_boolean thumb_p = (t == CONST_THUMB);
7771 bfd_boolean arm_p = (t == CONST_ARM);
7772 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7773
7774 if (thumb_p)
7775 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7776 else
7777 tbit = LOAD_BIT;
7778
7779 if ((inst.instruction & tbit) == 0)
7780 {
7781 inst.error = _("invalid pseudo operation");
7782 return TRUE;
7783 }
7784 if (inst.reloc.exp.X_op != O_constant
7785 && inst.reloc.exp.X_op != O_symbol
7786 && inst.reloc.exp.X_op != O_big)
7787 {
7788 inst.error = _("constant expression expected");
7789 return TRUE;
7790 }
7791 if ((inst.reloc.exp.X_op == O_constant
7792 || inst.reloc.exp.X_op == O_big)
7793 && !inst.operands[i].issingle)
7794 {
7795 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7796 {
7797 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7798 {
7799 /* This can be done with a mov(1) instruction. */
7800 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7801 inst.instruction |= inst.reloc.exp.X_add_number;
7802 return TRUE;
7803 }
7804 }
7805 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7806 {
7807 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7808 if (value != FAIL)
7809 {
7810 /* This can be done with a mov instruction. */
7811 inst.instruction &= LITERAL_MASK;
7812 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7813 inst.instruction |= value & 0xfff;
7814 return TRUE;
7815 }
7816
7817 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7818 if (value != FAIL)
7819 {
7820 /* This can be done with a mvn instruction. */
7821 inst.instruction &= LITERAL_MASK;
7822 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7823 inst.instruction |= value & 0xfff;
7824 return TRUE;
7825 }
7826 }
7827 else if (vec64_p)
7828 {
7829 int op = 0;
7830 unsigned immbits = 0;
7831 unsigned immlo = inst.operands[1].imm;
7832 unsigned immhi = inst.operands[1].regisimm
7833 ? inst.operands[1].reg
7834 : inst.reloc.exp.X_unsigned
7835 ? 0
7836 : ((bfd_int64_t)((int) immlo)) >> 32;
7837 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7838 &op, 64, NT_invtype);
7839
7840 if (cmode == FAIL)
7841 {
7842 neon_invert_size (&immlo, &immhi, 64);
7843 op = !op;
7844 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7845 &op, 64, NT_invtype);
7846 }
7847 if (cmode != FAIL)
7848 {
7849 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7850 | (1 << 23)
7851 | (cmode << 8)
7852 | (op << 5)
7853 | (1 << 4);
7854 /* Fill other bits in vmov encoding for both thumb and arm. */
7855 if (thumb_mode)
7856 inst.instruction |= (0x7 << 29) | (0xF << 24);
7857 else
7858 inst.instruction |= (0xF << 28) | (0x1 << 25);
7859 neon_write_immbits (immbits);
7860 return TRUE;
7861 }
7862 }
7863 }
7864
7865 if (add_to_lit_pool ((!inst.operands[i].isvec
7866 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7867 return TRUE;
7868
7869 inst.operands[1].reg = REG_PC;
7870 inst.operands[1].isreg = 1;
7871 inst.operands[1].preind = 1;
7872 inst.reloc.pc_rel = 1;
7873 inst.reloc.type = (thumb_p
7874 ? BFD_RELOC_ARM_THUMB_OFFSET
7875 : (mode_3
7876 ? BFD_RELOC_ARM_HWLITERAL
7877 : BFD_RELOC_ARM_LITERAL));
7878 return FALSE;
7879 }
7880
7881 /* inst.operands[i] was set up by parse_address. Encode it into an
7882 ARM-format instruction. Reject all forms which cannot be encoded
7883 into a coprocessor load/store instruction. If wb_ok is false,
7884 reject use of writeback; if unind_ok is false, reject use of
7885 unindexed addressing. If reloc_override is not 0, use it instead
7886 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7887 (in which case it is preserved). */
7888
7889 static int
7890 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7891 {
7892 if (!inst.operands[i].isreg)
7893 {
7894 /* PR 18256 */
7895 if (! inst.operands[0].isvec)
7896 {
7897 inst.error = _("invalid co-processor operand");
7898 return FAIL;
7899 }
7900 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7901 return SUCCESS;
7902 }
7903
7904 inst.instruction |= inst.operands[i].reg << 16;
7905
7906 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7907
7908 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7909 {
7910 gas_assert (!inst.operands[i].writeback);
7911 if (!unind_ok)
7912 {
7913 inst.error = _("instruction does not support unindexed addressing");
7914 return FAIL;
7915 }
7916 inst.instruction |= inst.operands[i].imm;
7917 inst.instruction |= INDEX_UP;
7918 return SUCCESS;
7919 }
7920
7921 if (inst.operands[i].preind)
7922 inst.instruction |= PRE_INDEX;
7923
7924 if (inst.operands[i].writeback)
7925 {
7926 if (inst.operands[i].reg == REG_PC)
7927 {
7928 inst.error = _("pc may not be used with write-back");
7929 return FAIL;
7930 }
7931 if (!wb_ok)
7932 {
7933 inst.error = _("instruction does not support writeback");
7934 return FAIL;
7935 }
7936 inst.instruction |= WRITE_BACK;
7937 }
7938
7939 if (reloc_override)
7940 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7941 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7942 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7943 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7944 {
7945 if (thumb_mode)
7946 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7947 else
7948 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7949 }
7950
7951 /* Prefer + for zero encoded value. */
7952 if (!inst.operands[i].negative)
7953 inst.instruction |= INDEX_UP;
7954
7955 return SUCCESS;
7956 }
7957
7958 /* Functions for instruction encoding, sorted by sub-architecture.
7959 First some generics; their names are taken from the conventional
7960 bit positions for register arguments in ARM format instructions. */
7961
7962 static void
7963 do_noargs (void)
7964 {
7965 }
7966
7967 static void
7968 do_rd (void)
7969 {
7970 inst.instruction |= inst.operands[0].reg << 12;
7971 }
7972
7973 static void
7974 do_rd_rm (void)
7975 {
7976 inst.instruction |= inst.operands[0].reg << 12;
7977 inst.instruction |= inst.operands[1].reg;
7978 }
7979
7980 static void
7981 do_rm_rn (void)
7982 {
7983 inst.instruction |= inst.operands[0].reg;
7984 inst.instruction |= inst.operands[1].reg << 16;
7985 }
7986
7987 static void
7988 do_rd_rn (void)
7989 {
7990 inst.instruction |= inst.operands[0].reg << 12;
7991 inst.instruction |= inst.operands[1].reg << 16;
7992 }
7993
7994 static void
7995 do_rn_rd (void)
7996 {
7997 inst.instruction |= inst.operands[0].reg << 16;
7998 inst.instruction |= inst.operands[1].reg << 12;
7999 }
8000
8001 static bfd_boolean
8002 check_obsolete (const arm_feature_set *feature, const char *msg)
8003 {
8004 if (ARM_CPU_IS_ANY (cpu_variant))
8005 {
8006 as_tsktsk ("%s", msg);
8007 return TRUE;
8008 }
8009 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8010 {
8011 as_bad ("%s", msg);
8012 return TRUE;
8013 }
8014
8015 return FALSE;
8016 }
8017
8018 static void
8019 do_rd_rm_rn (void)
8020 {
8021 unsigned Rn = inst.operands[2].reg;
8022 /* Enforce restrictions on SWP instruction. */
8023 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8024 {
8025 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8026 _("Rn must not overlap other operands"));
8027
8028 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8029 */
8030 if (!check_obsolete (&arm_ext_v8,
8031 _("swp{b} use is obsoleted for ARMv8 and later"))
8032 && warn_on_deprecated
8033 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8034 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8035 }
8036
8037 inst.instruction |= inst.operands[0].reg << 12;
8038 inst.instruction |= inst.operands[1].reg;
8039 inst.instruction |= Rn << 16;
8040 }
8041
8042 static void
8043 do_rd_rn_rm (void)
8044 {
8045 inst.instruction |= inst.operands[0].reg << 12;
8046 inst.instruction |= inst.operands[1].reg << 16;
8047 inst.instruction |= inst.operands[2].reg;
8048 }
8049
8050 static void
8051 do_rm_rd_rn (void)
8052 {
8053 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8054 constraint (((inst.reloc.exp.X_op != O_constant
8055 && inst.reloc.exp.X_op != O_illegal)
8056 || inst.reloc.exp.X_add_number != 0),
8057 BAD_ADDR_MODE);
8058 inst.instruction |= inst.operands[0].reg;
8059 inst.instruction |= inst.operands[1].reg << 12;
8060 inst.instruction |= inst.operands[2].reg << 16;
8061 }
8062
8063 static void
8064 do_imm0 (void)
8065 {
8066 inst.instruction |= inst.operands[0].imm;
8067 }
8068
8069 static void
8070 do_rd_cpaddr (void)
8071 {
8072 inst.instruction |= inst.operands[0].reg << 12;
8073 encode_arm_cp_address (1, TRUE, TRUE, 0);
8074 }
8075
8076 /* ARM instructions, in alphabetical order by function name (except
8077 that wrapper functions appear immediately after the function they
8078 wrap). */
8079
8080 /* This is a pseudo-op of the form "adr rd, label" to be converted
8081 into a relative address of the form "add rd, pc, #label-.-8". */
8082
8083 static void
8084 do_adr (void)
8085 {
8086 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8087
8088 /* Frag hacking will turn this into a sub instruction if the offset turns
8089 out to be negative. */
8090 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8091 inst.reloc.pc_rel = 1;
8092 inst.reloc.exp.X_add_number -= 8;
8093 }
8094
8095 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8096 into a relative address of the form:
8097 add rd, pc, #low(label-.-8)"
8098 add rd, rd, #high(label-.-8)" */
8099
8100 static void
8101 do_adrl (void)
8102 {
8103 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8104
8105 /* Frag hacking will turn this into a sub instruction if the offset turns
8106 out to be negative. */
8107 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8108 inst.reloc.pc_rel = 1;
8109 inst.size = INSN_SIZE * 2;
8110 inst.reloc.exp.X_add_number -= 8;
8111 }
8112
8113 static void
8114 do_arit (void)
8115 {
8116 if (!inst.operands[1].present)
8117 inst.operands[1].reg = inst.operands[0].reg;
8118 inst.instruction |= inst.operands[0].reg << 12;
8119 inst.instruction |= inst.operands[1].reg << 16;
8120 encode_arm_shifter_operand (2);
8121 }
8122
8123 static void
8124 do_barrier (void)
8125 {
8126 if (inst.operands[0].present)
8127 inst.instruction |= inst.operands[0].imm;
8128 else
8129 inst.instruction |= 0xf;
8130 }
8131
8132 static void
8133 do_bfc (void)
8134 {
8135 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8136 constraint (msb > 32, _("bit-field extends past end of register"));
8137 /* The instruction encoding stores the LSB and MSB,
8138 not the LSB and width. */
8139 inst.instruction |= inst.operands[0].reg << 12;
8140 inst.instruction |= inst.operands[1].imm << 7;
8141 inst.instruction |= (msb - 1) << 16;
8142 }
8143
8144 static void
8145 do_bfi (void)
8146 {
8147 unsigned int msb;
8148
8149 /* #0 in second position is alternative syntax for bfc, which is
8150 the same instruction but with REG_PC in the Rm field. */
8151 if (!inst.operands[1].isreg)
8152 inst.operands[1].reg = REG_PC;
8153
8154 msb = inst.operands[2].imm + inst.operands[3].imm;
8155 constraint (msb > 32, _("bit-field extends past end of register"));
8156 /* The instruction encoding stores the LSB and MSB,
8157 not the LSB and width. */
8158 inst.instruction |= inst.operands[0].reg << 12;
8159 inst.instruction |= inst.operands[1].reg;
8160 inst.instruction |= inst.operands[2].imm << 7;
8161 inst.instruction |= (msb - 1) << 16;
8162 }
8163
8164 static void
8165 do_bfx (void)
8166 {
8167 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8168 _("bit-field extends past end of register"));
8169 inst.instruction |= inst.operands[0].reg << 12;
8170 inst.instruction |= inst.operands[1].reg;
8171 inst.instruction |= inst.operands[2].imm << 7;
8172 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8173 }
8174
8175 /* ARM V5 breakpoint instruction (argument parse)
8176 BKPT <16 bit unsigned immediate>
8177 Instruction is not conditional.
8178 The bit pattern given in insns[] has the COND_ALWAYS condition,
8179 and it is an error if the caller tried to override that. */
8180
8181 static void
8182 do_bkpt (void)
8183 {
8184 /* Top 12 of 16 bits to bits 19:8. */
8185 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8186
8187 /* Bottom 4 of 16 bits to bits 3:0. */
8188 inst.instruction |= inst.operands[0].imm & 0xf;
8189 }
8190
8191 static void
8192 encode_branch (int default_reloc)
8193 {
8194 if (inst.operands[0].hasreloc)
8195 {
8196 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8197 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8198 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8199 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8200 ? BFD_RELOC_ARM_PLT32
8201 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8202 }
8203 else
8204 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8205 inst.reloc.pc_rel = 1;
8206 }
8207
8208 static void
8209 do_branch (void)
8210 {
8211 #ifdef OBJ_ELF
8212 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8213 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8214 else
8215 #endif
8216 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8217 }
8218
8219 static void
8220 do_bl (void)
8221 {
8222 #ifdef OBJ_ELF
8223 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8224 {
8225 if (inst.cond == COND_ALWAYS)
8226 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8227 else
8228 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8229 }
8230 else
8231 #endif
8232 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8233 }
8234
8235 /* ARM V5 branch-link-exchange instruction (argument parse)
8236 BLX <target_addr> ie BLX(1)
8237 BLX{<condition>} <Rm> ie BLX(2)
8238 Unfortunately, there are two different opcodes for this mnemonic.
8239 So, the insns[].value is not used, and the code here zaps values
8240 into inst.instruction.
8241 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8242
8243 static void
8244 do_blx (void)
8245 {
8246 if (inst.operands[0].isreg)
8247 {
8248 /* Arg is a register; the opcode provided by insns[] is correct.
8249 It is not illegal to do "blx pc", just useless. */
8250 if (inst.operands[0].reg == REG_PC)
8251 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8252
8253 inst.instruction |= inst.operands[0].reg;
8254 }
8255 else
8256 {
8257 /* Arg is an address; this instruction cannot be executed
8258 conditionally, and the opcode must be adjusted.
8259 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8260 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8261 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8262 inst.instruction = 0xfa000000;
8263 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8264 }
8265 }
8266
8267 static void
8268 do_bx (void)
8269 {
8270 bfd_boolean want_reloc;
8271
8272 if (inst.operands[0].reg == REG_PC)
8273 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8274
8275 inst.instruction |= inst.operands[0].reg;
8276 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8277 it is for ARMv4t or earlier. */
8278 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8279 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8280 want_reloc = TRUE;
8281
8282 #ifdef OBJ_ELF
8283 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8284 #endif
8285 want_reloc = FALSE;
8286
8287 if (want_reloc)
8288 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8289 }
8290
8291
8292 /* ARM v5TEJ. Jump to Jazelle code. */
8293
8294 static void
8295 do_bxj (void)
8296 {
8297 if (inst.operands[0].reg == REG_PC)
8298 as_tsktsk (_("use of r15 in bxj is not really useful"));
8299
8300 inst.instruction |= inst.operands[0].reg;
8301 }
8302
8303 /* Co-processor data operation:
8304 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8305 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8306 static void
8307 do_cdp (void)
8308 {
8309 inst.instruction |= inst.operands[0].reg << 8;
8310 inst.instruction |= inst.operands[1].imm << 20;
8311 inst.instruction |= inst.operands[2].reg << 12;
8312 inst.instruction |= inst.operands[3].reg << 16;
8313 inst.instruction |= inst.operands[4].reg;
8314 inst.instruction |= inst.operands[5].imm << 5;
8315 }
8316
8317 static void
8318 do_cmp (void)
8319 {
8320 inst.instruction |= inst.operands[0].reg << 16;
8321 encode_arm_shifter_operand (1);
8322 }
8323
8324 /* Transfer between coprocessor and ARM registers.
8325 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8326 MRC2
8327 MCR{cond}
8328 MCR2
8329
8330 No special properties. */
8331
8332 struct deprecated_coproc_regs_s
8333 {
8334 unsigned cp;
8335 int opc1;
8336 unsigned crn;
8337 unsigned crm;
8338 int opc2;
8339 arm_feature_set deprecated;
8340 arm_feature_set obsoleted;
8341 const char *dep_msg;
8342 const char *obs_msg;
8343 };
8344
8345 #define DEPR_ACCESS_V8 \
8346 N_("This coprocessor register access is deprecated in ARMv8")
8347
8348 /* Table of all deprecated coprocessor registers. */
8349 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8350 {
8351 {15, 0, 7, 10, 5, /* CP15DMB. */
8352 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8353 DEPR_ACCESS_V8, NULL},
8354 {15, 0, 7, 10, 4, /* CP15DSB. */
8355 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8356 DEPR_ACCESS_V8, NULL},
8357 {15, 0, 7, 5, 4, /* CP15ISB. */
8358 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8359 DEPR_ACCESS_V8, NULL},
8360 {14, 6, 1, 0, 0, /* TEEHBR. */
8361 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8362 DEPR_ACCESS_V8, NULL},
8363 {14, 6, 0, 0, 0, /* TEECR. */
8364 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8365 DEPR_ACCESS_V8, NULL},
8366 };
8367
8368 #undef DEPR_ACCESS_V8
8369
8370 static const size_t deprecated_coproc_reg_count =
8371 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8372
8373 static void
8374 do_co_reg (void)
8375 {
8376 unsigned Rd;
8377 size_t i;
8378
8379 Rd = inst.operands[2].reg;
8380 if (thumb_mode)
8381 {
8382 if (inst.instruction == 0xee000010
8383 || inst.instruction == 0xfe000010)
8384 /* MCR, MCR2 */
8385 reject_bad_reg (Rd);
8386 else
8387 /* MRC, MRC2 */
8388 constraint (Rd == REG_SP, BAD_SP);
8389 }
8390 else
8391 {
8392 /* MCR */
8393 if (inst.instruction == 0xe000010)
8394 constraint (Rd == REG_PC, BAD_PC);
8395 }
8396
8397 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8398 {
8399 const struct deprecated_coproc_regs_s *r =
8400 deprecated_coproc_regs + i;
8401
8402 if (inst.operands[0].reg == r->cp
8403 && inst.operands[1].imm == r->opc1
8404 && inst.operands[3].reg == r->crn
8405 && inst.operands[4].reg == r->crm
8406 && inst.operands[5].imm == r->opc2)
8407 {
8408 if (! ARM_CPU_IS_ANY (cpu_variant)
8409 && warn_on_deprecated
8410 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8411 as_tsktsk ("%s", r->dep_msg);
8412 }
8413 }
8414
8415 inst.instruction |= inst.operands[0].reg << 8;
8416 inst.instruction |= inst.operands[1].imm << 21;
8417 inst.instruction |= Rd << 12;
8418 inst.instruction |= inst.operands[3].reg << 16;
8419 inst.instruction |= inst.operands[4].reg;
8420 inst.instruction |= inst.operands[5].imm << 5;
8421 }
8422
8423 /* Transfer between coprocessor register and pair of ARM registers.
8424 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8425 MCRR2
8426 MRRC{cond}
8427 MRRC2
8428
8429 Two XScale instructions are special cases of these:
8430
8431 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8432 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8433
8434 Result unpredictable if Rd or Rn is R15. */
8435
8436 static void
8437 do_co_reg2c (void)
8438 {
8439 unsigned Rd, Rn;
8440
8441 Rd = inst.operands[2].reg;
8442 Rn = inst.operands[3].reg;
8443
8444 if (thumb_mode)
8445 {
8446 reject_bad_reg (Rd);
8447 reject_bad_reg (Rn);
8448 }
8449 else
8450 {
8451 constraint (Rd == REG_PC, BAD_PC);
8452 constraint (Rn == REG_PC, BAD_PC);
8453 }
8454
8455 inst.instruction |= inst.operands[0].reg << 8;
8456 inst.instruction |= inst.operands[1].imm << 4;
8457 inst.instruction |= Rd << 12;
8458 inst.instruction |= Rn << 16;
8459 inst.instruction |= inst.operands[4].reg;
8460 }
8461
8462 static void
8463 do_cpsi (void)
8464 {
8465 inst.instruction |= inst.operands[0].imm << 6;
8466 if (inst.operands[1].present)
8467 {
8468 inst.instruction |= CPSI_MMOD;
8469 inst.instruction |= inst.operands[1].imm;
8470 }
8471 }
8472
8473 static void
8474 do_dbg (void)
8475 {
8476 inst.instruction |= inst.operands[0].imm;
8477 }
8478
8479 static void
8480 do_div (void)
8481 {
8482 unsigned Rd, Rn, Rm;
8483
8484 Rd = inst.operands[0].reg;
8485 Rn = (inst.operands[1].present
8486 ? inst.operands[1].reg : Rd);
8487 Rm = inst.operands[2].reg;
8488
8489 constraint ((Rd == REG_PC), BAD_PC);
8490 constraint ((Rn == REG_PC), BAD_PC);
8491 constraint ((Rm == REG_PC), BAD_PC);
8492
8493 inst.instruction |= Rd << 16;
8494 inst.instruction |= Rn << 0;
8495 inst.instruction |= Rm << 8;
8496 }
8497
8498 static void
8499 do_it (void)
8500 {
8501 /* There is no IT instruction in ARM mode. We
8502 process it to do the validation as if in
8503 thumb mode, just in case the code gets
8504 assembled for thumb using the unified syntax. */
8505
8506 inst.size = 0;
8507 if (unified_syntax)
8508 {
8509 set_it_insn_type (IT_INSN);
8510 now_it.mask = (inst.instruction & 0xf) | 0x10;
8511 now_it.cc = inst.operands[0].imm;
8512 }
8513 }
8514
8515 /* If there is only one register in the register list,
8516 then return its register number. Otherwise return -1. */
8517 static int
8518 only_one_reg_in_list (int range)
8519 {
8520 int i = ffs (range) - 1;
8521 return (i > 15 || range != (1 << i)) ? -1 : i;
8522 }
8523
8524 static void
8525 encode_ldmstm(int from_push_pop_mnem)
8526 {
8527 int base_reg = inst.operands[0].reg;
8528 int range = inst.operands[1].imm;
8529 int one_reg;
8530
8531 inst.instruction |= base_reg << 16;
8532 inst.instruction |= range;
8533
8534 if (inst.operands[1].writeback)
8535 inst.instruction |= LDM_TYPE_2_OR_3;
8536
8537 if (inst.operands[0].writeback)
8538 {
8539 inst.instruction |= WRITE_BACK;
8540 /* Check for unpredictable uses of writeback. */
8541 if (inst.instruction & LOAD_BIT)
8542 {
8543 /* Not allowed in LDM type 2. */
8544 if ((inst.instruction & LDM_TYPE_2_OR_3)
8545 && ((range & (1 << REG_PC)) == 0))
8546 as_warn (_("writeback of base register is UNPREDICTABLE"));
8547 /* Only allowed if base reg not in list for other types. */
8548 else if (range & (1 << base_reg))
8549 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8550 }
8551 else /* STM. */
8552 {
8553 /* Not allowed for type 2. */
8554 if (inst.instruction & LDM_TYPE_2_OR_3)
8555 as_warn (_("writeback of base register is UNPREDICTABLE"));
8556 /* Only allowed if base reg not in list, or first in list. */
8557 else if ((range & (1 << base_reg))
8558 && (range & ((1 << base_reg) - 1)))
8559 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8560 }
8561 }
8562
8563 /* If PUSH/POP has only one register, then use the A2 encoding. */
8564 one_reg = only_one_reg_in_list (range);
8565 if (from_push_pop_mnem && one_reg >= 0)
8566 {
8567 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8568
8569 inst.instruction &= A_COND_MASK;
8570 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8571 inst.instruction |= one_reg << 12;
8572 }
8573 }
8574
8575 static void
8576 do_ldmstm (void)
8577 {
8578 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8579 }
8580
8581 /* ARMv5TE load-consecutive (argument parse)
8582 Mode is like LDRH.
8583
8584 LDRccD R, mode
8585 STRccD R, mode. */
8586
8587 static void
8588 do_ldrd (void)
8589 {
8590 constraint (inst.operands[0].reg % 2 != 0,
8591 _("first transfer register must be even"));
8592 constraint (inst.operands[1].present
8593 && inst.operands[1].reg != inst.operands[0].reg + 1,
8594 _("can only transfer two consecutive registers"));
8595 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8596 constraint (!inst.operands[2].isreg, _("'[' expected"));
8597
8598 if (!inst.operands[1].present)
8599 inst.operands[1].reg = inst.operands[0].reg + 1;
8600
8601 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8602 register and the first register written; we have to diagnose
8603 overlap between the base and the second register written here. */
8604
8605 if (inst.operands[2].reg == inst.operands[1].reg
8606 && (inst.operands[2].writeback || inst.operands[2].postind))
8607 as_warn (_("base register written back, and overlaps "
8608 "second transfer register"));
8609
8610 if (!(inst.instruction & V4_STR_BIT))
8611 {
8612 /* For an index-register load, the index register must not overlap the
8613 destination (even if not write-back). */
8614 if (inst.operands[2].immisreg
8615 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8616 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8617 as_warn (_("index register overlaps transfer register"));
8618 }
8619 inst.instruction |= inst.operands[0].reg << 12;
8620 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8621 }
8622
8623 static void
8624 do_ldrex (void)
8625 {
8626 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8627 || inst.operands[1].postind || inst.operands[1].writeback
8628 || inst.operands[1].immisreg || inst.operands[1].shifted
8629 || inst.operands[1].negative
8630 /* This can arise if the programmer has written
8631 strex rN, rM, foo
8632 or if they have mistakenly used a register name as the last
8633 operand, eg:
8634 strex rN, rM, rX
8635 It is very difficult to distinguish between these two cases
8636 because "rX" might actually be a label. ie the register
8637 name has been occluded by a symbol of the same name. So we
8638 just generate a general 'bad addressing mode' type error
8639 message and leave it up to the programmer to discover the
8640 true cause and fix their mistake. */
8641 || (inst.operands[1].reg == REG_PC),
8642 BAD_ADDR_MODE);
8643
8644 constraint (inst.reloc.exp.X_op != O_constant
8645 || inst.reloc.exp.X_add_number != 0,
8646 _("offset must be zero in ARM encoding"));
8647
8648 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8649
8650 inst.instruction |= inst.operands[0].reg << 12;
8651 inst.instruction |= inst.operands[1].reg << 16;
8652 inst.reloc.type = BFD_RELOC_UNUSED;
8653 }
8654
8655 static void
8656 do_ldrexd (void)
8657 {
8658 constraint (inst.operands[0].reg % 2 != 0,
8659 _("even register required"));
8660 constraint (inst.operands[1].present
8661 && inst.operands[1].reg != inst.operands[0].reg + 1,
8662 _("can only load two consecutive registers"));
8663 /* If op 1 were present and equal to PC, this function wouldn't
8664 have been called in the first place. */
8665 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8666
8667 inst.instruction |= inst.operands[0].reg << 12;
8668 inst.instruction |= inst.operands[2].reg << 16;
8669 }
8670
8671 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8672 which is not a multiple of four is UNPREDICTABLE. */
8673 static void
8674 check_ldr_r15_aligned (void)
8675 {
8676 constraint (!(inst.operands[1].immisreg)
8677 && (inst.operands[0].reg == REG_PC
8678 && inst.operands[1].reg == REG_PC
8679 && (inst.reloc.exp.X_add_number & 0x3)),
8680 _("ldr to register 15 must be 4-byte alligned"));
8681 }
8682
8683 static void
8684 do_ldst (void)
8685 {
8686 inst.instruction |= inst.operands[0].reg << 12;
8687 if (!inst.operands[1].isreg)
8688 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8689 return;
8690 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8691 check_ldr_r15_aligned ();
8692 }
8693
8694 static void
8695 do_ldstt (void)
8696 {
8697 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8698 reject [Rn,...]. */
8699 if (inst.operands[1].preind)
8700 {
8701 constraint (inst.reloc.exp.X_op != O_constant
8702 || inst.reloc.exp.X_add_number != 0,
8703 _("this instruction requires a post-indexed address"));
8704
8705 inst.operands[1].preind = 0;
8706 inst.operands[1].postind = 1;
8707 inst.operands[1].writeback = 1;
8708 }
8709 inst.instruction |= inst.operands[0].reg << 12;
8710 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8711 }
8712
8713 /* Halfword and signed-byte load/store operations. */
8714
8715 static void
8716 do_ldstv4 (void)
8717 {
8718 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8719 inst.instruction |= inst.operands[0].reg << 12;
8720 if (!inst.operands[1].isreg)
8721 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8722 return;
8723 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8724 }
8725
8726 static void
8727 do_ldsttv4 (void)
8728 {
8729 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8730 reject [Rn,...]. */
8731 if (inst.operands[1].preind)
8732 {
8733 constraint (inst.reloc.exp.X_op != O_constant
8734 || inst.reloc.exp.X_add_number != 0,
8735 _("this instruction requires a post-indexed address"));
8736
8737 inst.operands[1].preind = 0;
8738 inst.operands[1].postind = 1;
8739 inst.operands[1].writeback = 1;
8740 }
8741 inst.instruction |= inst.operands[0].reg << 12;
8742 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8743 }
8744
8745 /* Co-processor register load/store.
8746 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8747 static void
8748 do_lstc (void)
8749 {
8750 inst.instruction |= inst.operands[0].reg << 8;
8751 inst.instruction |= inst.operands[1].reg << 12;
8752 encode_arm_cp_address (2, TRUE, TRUE, 0);
8753 }
8754
8755 static void
8756 do_mlas (void)
8757 {
8758 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8759 if (inst.operands[0].reg == inst.operands[1].reg
8760 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8761 && !(inst.instruction & 0x00400000))
8762 as_tsktsk (_("Rd and Rm should be different in mla"));
8763
8764 inst.instruction |= inst.operands[0].reg << 16;
8765 inst.instruction |= inst.operands[1].reg;
8766 inst.instruction |= inst.operands[2].reg << 8;
8767 inst.instruction |= inst.operands[3].reg << 12;
8768 }
8769
8770 static void
8771 do_mov (void)
8772 {
8773 inst.instruction |= inst.operands[0].reg << 12;
8774 encode_arm_shifter_operand (1);
8775 }
8776
8777 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8778 static void
8779 do_mov16 (void)
8780 {
8781 bfd_vma imm;
8782 bfd_boolean top;
8783
8784 top = (inst.instruction & 0x00400000) != 0;
8785 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8786 _(":lower16: not allowed this instruction"));
8787 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8788 _(":upper16: not allowed instruction"));
8789 inst.instruction |= inst.operands[0].reg << 12;
8790 if (inst.reloc.type == BFD_RELOC_UNUSED)
8791 {
8792 imm = inst.reloc.exp.X_add_number;
8793 /* The value is in two pieces: 0:11, 16:19. */
8794 inst.instruction |= (imm & 0x00000fff);
8795 inst.instruction |= (imm & 0x0000f000) << 4;
8796 }
8797 }
8798
8799 static void do_vfp_nsyn_opcode (const char *);
8800
8801 static int
8802 do_vfp_nsyn_mrs (void)
8803 {
8804 if (inst.operands[0].isvec)
8805 {
8806 if (inst.operands[1].reg != 1)
8807 first_error (_("operand 1 must be FPSCR"));
8808 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8809 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8810 do_vfp_nsyn_opcode ("fmstat");
8811 }
8812 else if (inst.operands[1].isvec)
8813 do_vfp_nsyn_opcode ("fmrx");
8814 else
8815 return FAIL;
8816
8817 return SUCCESS;
8818 }
8819
8820 static int
8821 do_vfp_nsyn_msr (void)
8822 {
8823 if (inst.operands[0].isvec)
8824 do_vfp_nsyn_opcode ("fmxr");
8825 else
8826 return FAIL;
8827
8828 return SUCCESS;
8829 }
8830
8831 static void
8832 do_vmrs (void)
8833 {
8834 unsigned Rt = inst.operands[0].reg;
8835
8836 if (thumb_mode && Rt == REG_SP)
8837 {
8838 inst.error = BAD_SP;
8839 return;
8840 }
8841
8842 /* APSR_ sets isvec. All other refs to PC are illegal. */
8843 if (!inst.operands[0].isvec && Rt == REG_PC)
8844 {
8845 inst.error = BAD_PC;
8846 return;
8847 }
8848
8849 /* If we get through parsing the register name, we just insert the number
8850 generated into the instruction without further validation. */
8851 inst.instruction |= (inst.operands[1].reg << 16);
8852 inst.instruction |= (Rt << 12);
8853 }
8854
8855 static void
8856 do_vmsr (void)
8857 {
8858 unsigned Rt = inst.operands[1].reg;
8859
8860 if (thumb_mode)
8861 reject_bad_reg (Rt);
8862 else if (Rt == REG_PC)
8863 {
8864 inst.error = BAD_PC;
8865 return;
8866 }
8867
8868 /* If we get through parsing the register name, we just insert the number
8869 generated into the instruction without further validation. */
8870 inst.instruction |= (inst.operands[0].reg << 16);
8871 inst.instruction |= (Rt << 12);
8872 }
8873
8874 static void
8875 do_mrs (void)
8876 {
8877 unsigned br;
8878
8879 if (do_vfp_nsyn_mrs () == SUCCESS)
8880 return;
8881
8882 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8883 inst.instruction |= inst.operands[0].reg << 12;
8884
8885 if (inst.operands[1].isreg)
8886 {
8887 br = inst.operands[1].reg;
8888 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8889 as_bad (_("bad register for mrs"));
8890 }
8891 else
8892 {
8893 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8894 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8895 != (PSR_c|PSR_f),
8896 _("'APSR', 'CPSR' or 'SPSR' expected"));
8897 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8898 }
8899
8900 inst.instruction |= br;
8901 }
8902
8903 /* Two possible forms:
8904 "{C|S}PSR_<field>, Rm",
8905 "{C|S}PSR_f, #expression". */
8906
8907 static void
8908 do_msr (void)
8909 {
8910 if (do_vfp_nsyn_msr () == SUCCESS)
8911 return;
8912
8913 inst.instruction |= inst.operands[0].imm;
8914 if (inst.operands[1].isreg)
8915 inst.instruction |= inst.operands[1].reg;
8916 else
8917 {
8918 inst.instruction |= INST_IMMEDIATE;
8919 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8920 inst.reloc.pc_rel = 0;
8921 }
8922 }
8923
8924 static void
8925 do_mul (void)
8926 {
8927 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8928
8929 if (!inst.operands[2].present)
8930 inst.operands[2].reg = inst.operands[0].reg;
8931 inst.instruction |= inst.operands[0].reg << 16;
8932 inst.instruction |= inst.operands[1].reg;
8933 inst.instruction |= inst.operands[2].reg << 8;
8934
8935 if (inst.operands[0].reg == inst.operands[1].reg
8936 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8937 as_tsktsk (_("Rd and Rm should be different in mul"));
8938 }
8939
8940 /* Long Multiply Parser
8941 UMULL RdLo, RdHi, Rm, Rs
8942 SMULL RdLo, RdHi, Rm, Rs
8943 UMLAL RdLo, RdHi, Rm, Rs
8944 SMLAL RdLo, RdHi, Rm, Rs. */
8945
8946 static void
8947 do_mull (void)
8948 {
8949 inst.instruction |= inst.operands[0].reg << 12;
8950 inst.instruction |= inst.operands[1].reg << 16;
8951 inst.instruction |= inst.operands[2].reg;
8952 inst.instruction |= inst.operands[3].reg << 8;
8953
8954 /* rdhi and rdlo must be different. */
8955 if (inst.operands[0].reg == inst.operands[1].reg)
8956 as_tsktsk (_("rdhi and rdlo must be different"));
8957
8958 /* rdhi, rdlo and rm must all be different before armv6. */
8959 if ((inst.operands[0].reg == inst.operands[2].reg
8960 || inst.operands[1].reg == inst.operands[2].reg)
8961 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8962 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8963 }
8964
8965 static void
8966 do_nop (void)
8967 {
8968 if (inst.operands[0].present
8969 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8970 {
8971 /* Architectural NOP hints are CPSR sets with no bits selected. */
8972 inst.instruction &= 0xf0000000;
8973 inst.instruction |= 0x0320f000;
8974 if (inst.operands[0].present)
8975 inst.instruction |= inst.operands[0].imm;
8976 }
8977 }
8978
8979 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8980 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8981 Condition defaults to COND_ALWAYS.
8982 Error if Rd, Rn or Rm are R15. */
8983
8984 static void
8985 do_pkhbt (void)
8986 {
8987 inst.instruction |= inst.operands[0].reg << 12;
8988 inst.instruction |= inst.operands[1].reg << 16;
8989 inst.instruction |= inst.operands[2].reg;
8990 if (inst.operands[3].present)
8991 encode_arm_shift (3);
8992 }
8993
8994 /* ARM V6 PKHTB (Argument Parse). */
8995
8996 static void
8997 do_pkhtb (void)
8998 {
8999 if (!inst.operands[3].present)
9000 {
9001 /* If the shift specifier is omitted, turn the instruction
9002 into pkhbt rd, rm, rn. */
9003 inst.instruction &= 0xfff00010;
9004 inst.instruction |= inst.operands[0].reg << 12;
9005 inst.instruction |= inst.operands[1].reg;
9006 inst.instruction |= inst.operands[2].reg << 16;
9007 }
9008 else
9009 {
9010 inst.instruction |= inst.operands[0].reg << 12;
9011 inst.instruction |= inst.operands[1].reg << 16;
9012 inst.instruction |= inst.operands[2].reg;
9013 encode_arm_shift (3);
9014 }
9015 }
9016
9017 /* ARMv5TE: Preload-Cache
9018 MP Extensions: Preload for write
9019
9020 PLD(W) <addr_mode>
9021
9022 Syntactically, like LDR with B=1, W=0, L=1. */
9023
9024 static void
9025 do_pld (void)
9026 {
9027 constraint (!inst.operands[0].isreg,
9028 _("'[' expected after PLD mnemonic"));
9029 constraint (inst.operands[0].postind,
9030 _("post-indexed expression used in preload instruction"));
9031 constraint (inst.operands[0].writeback,
9032 _("writeback used in preload instruction"));
9033 constraint (!inst.operands[0].preind,
9034 _("unindexed addressing used in preload instruction"));
9035 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9036 }
9037
9038 /* ARMv7: PLI <addr_mode> */
9039 static void
9040 do_pli (void)
9041 {
9042 constraint (!inst.operands[0].isreg,
9043 _("'[' expected after PLI mnemonic"));
9044 constraint (inst.operands[0].postind,
9045 _("post-indexed expression used in preload instruction"));
9046 constraint (inst.operands[0].writeback,
9047 _("writeback used in preload instruction"));
9048 constraint (!inst.operands[0].preind,
9049 _("unindexed addressing used in preload instruction"));
9050 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9051 inst.instruction &= ~PRE_INDEX;
9052 }
9053
9054 static void
9055 do_push_pop (void)
9056 {
9057 constraint (inst.operands[0].writeback,
9058 _("push/pop do not support {reglist}^"));
9059 inst.operands[1] = inst.operands[0];
9060 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9061 inst.operands[0].isreg = 1;
9062 inst.operands[0].writeback = 1;
9063 inst.operands[0].reg = REG_SP;
9064 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9065 }
9066
9067 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9068 word at the specified address and the following word
9069 respectively.
9070 Unconditionally executed.
9071 Error if Rn is R15. */
9072
9073 static void
9074 do_rfe (void)
9075 {
9076 inst.instruction |= inst.operands[0].reg << 16;
9077 if (inst.operands[0].writeback)
9078 inst.instruction |= WRITE_BACK;
9079 }
9080
9081 /* ARM V6 ssat (argument parse). */
9082
9083 static void
9084 do_ssat (void)
9085 {
9086 inst.instruction |= inst.operands[0].reg << 12;
9087 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9088 inst.instruction |= inst.operands[2].reg;
9089
9090 if (inst.operands[3].present)
9091 encode_arm_shift (3);
9092 }
9093
9094 /* ARM V6 usat (argument parse). */
9095
9096 static void
9097 do_usat (void)
9098 {
9099 inst.instruction |= inst.operands[0].reg << 12;
9100 inst.instruction |= inst.operands[1].imm << 16;
9101 inst.instruction |= inst.operands[2].reg;
9102
9103 if (inst.operands[3].present)
9104 encode_arm_shift (3);
9105 }
9106
9107 /* ARM V6 ssat16 (argument parse). */
9108
9109 static void
9110 do_ssat16 (void)
9111 {
9112 inst.instruction |= inst.operands[0].reg << 12;
9113 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9114 inst.instruction |= inst.operands[2].reg;
9115 }
9116
9117 static void
9118 do_usat16 (void)
9119 {
9120 inst.instruction |= inst.operands[0].reg << 12;
9121 inst.instruction |= inst.operands[1].imm << 16;
9122 inst.instruction |= inst.operands[2].reg;
9123 }
9124
9125 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9126 preserving the other bits.
9127
9128 setend <endian_specifier>, where <endian_specifier> is either
9129 BE or LE. */
9130
9131 static void
9132 do_setend (void)
9133 {
9134 if (warn_on_deprecated
9135 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9136 as_tsktsk (_("setend use is deprecated for ARMv8"));
9137
9138 if (inst.operands[0].imm)
9139 inst.instruction |= 0x200;
9140 }
9141
9142 static void
9143 do_shift (void)
9144 {
9145 unsigned int Rm = (inst.operands[1].present
9146 ? inst.operands[1].reg
9147 : inst.operands[0].reg);
9148
9149 inst.instruction |= inst.operands[0].reg << 12;
9150 inst.instruction |= Rm;
9151 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9152 {
9153 inst.instruction |= inst.operands[2].reg << 8;
9154 inst.instruction |= SHIFT_BY_REG;
9155 /* PR 12854: Error on extraneous shifts. */
9156 constraint (inst.operands[2].shifted,
9157 _("extraneous shift as part of operand to shift insn"));
9158 }
9159 else
9160 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9161 }
9162
9163 static void
9164 do_smc (void)
9165 {
9166 inst.reloc.type = BFD_RELOC_ARM_SMC;
9167 inst.reloc.pc_rel = 0;
9168 }
9169
9170 static void
9171 do_hvc (void)
9172 {
9173 inst.reloc.type = BFD_RELOC_ARM_HVC;
9174 inst.reloc.pc_rel = 0;
9175 }
9176
9177 static void
9178 do_swi (void)
9179 {
9180 inst.reloc.type = BFD_RELOC_ARM_SWI;
9181 inst.reloc.pc_rel = 0;
9182 }
9183
9184 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9185 SMLAxy{cond} Rd,Rm,Rs,Rn
9186 SMLAWy{cond} Rd,Rm,Rs,Rn
9187 Error if any register is R15. */
9188
9189 static void
9190 do_smla (void)
9191 {
9192 inst.instruction |= inst.operands[0].reg << 16;
9193 inst.instruction |= inst.operands[1].reg;
9194 inst.instruction |= inst.operands[2].reg << 8;
9195 inst.instruction |= inst.operands[3].reg << 12;
9196 }
9197
9198 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9199 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9200 Error if any register is R15.
9201 Warning if Rdlo == Rdhi. */
9202
9203 static void
9204 do_smlal (void)
9205 {
9206 inst.instruction |= inst.operands[0].reg << 12;
9207 inst.instruction |= inst.operands[1].reg << 16;
9208 inst.instruction |= inst.operands[2].reg;
9209 inst.instruction |= inst.operands[3].reg << 8;
9210
9211 if (inst.operands[0].reg == inst.operands[1].reg)
9212 as_tsktsk (_("rdhi and rdlo must be different"));
9213 }
9214
9215 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9216 SMULxy{cond} Rd,Rm,Rs
9217 Error if any register is R15. */
9218
9219 static void
9220 do_smul (void)
9221 {
9222 inst.instruction |= inst.operands[0].reg << 16;
9223 inst.instruction |= inst.operands[1].reg;
9224 inst.instruction |= inst.operands[2].reg << 8;
9225 }
9226
9227 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9228 the same for both ARM and Thumb-2. */
9229
9230 static void
9231 do_srs (void)
9232 {
9233 int reg;
9234
9235 if (inst.operands[0].present)
9236 {
9237 reg = inst.operands[0].reg;
9238 constraint (reg != REG_SP, _("SRS base register must be r13"));
9239 }
9240 else
9241 reg = REG_SP;
9242
9243 inst.instruction |= reg << 16;
9244 inst.instruction |= inst.operands[1].imm;
9245 if (inst.operands[0].writeback || inst.operands[1].writeback)
9246 inst.instruction |= WRITE_BACK;
9247 }
9248
9249 /* ARM V6 strex (argument parse). */
9250
9251 static void
9252 do_strex (void)
9253 {
9254 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9255 || inst.operands[2].postind || inst.operands[2].writeback
9256 || inst.operands[2].immisreg || inst.operands[2].shifted
9257 || inst.operands[2].negative
9258 /* See comment in do_ldrex(). */
9259 || (inst.operands[2].reg == REG_PC),
9260 BAD_ADDR_MODE);
9261
9262 constraint (inst.operands[0].reg == inst.operands[1].reg
9263 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9264
9265 constraint (inst.reloc.exp.X_op != O_constant
9266 || inst.reloc.exp.X_add_number != 0,
9267 _("offset must be zero in ARM encoding"));
9268
9269 inst.instruction |= inst.operands[0].reg << 12;
9270 inst.instruction |= inst.operands[1].reg;
9271 inst.instruction |= inst.operands[2].reg << 16;
9272 inst.reloc.type = BFD_RELOC_UNUSED;
9273 }
9274
9275 static void
9276 do_t_strexbh (void)
9277 {
9278 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9279 || inst.operands[2].postind || inst.operands[2].writeback
9280 || inst.operands[2].immisreg || inst.operands[2].shifted
9281 || inst.operands[2].negative,
9282 BAD_ADDR_MODE);
9283
9284 constraint (inst.operands[0].reg == inst.operands[1].reg
9285 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9286
9287 do_rm_rd_rn ();
9288 }
9289
9290 static void
9291 do_strexd (void)
9292 {
9293 constraint (inst.operands[1].reg % 2 != 0,
9294 _("even register required"));
9295 constraint (inst.operands[2].present
9296 && inst.operands[2].reg != inst.operands[1].reg + 1,
9297 _("can only store two consecutive registers"));
9298 /* If op 2 were present and equal to PC, this function wouldn't
9299 have been called in the first place. */
9300 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9301
9302 constraint (inst.operands[0].reg == inst.operands[1].reg
9303 || inst.operands[0].reg == inst.operands[1].reg + 1
9304 || inst.operands[0].reg == inst.operands[3].reg,
9305 BAD_OVERLAP);
9306
9307 inst.instruction |= inst.operands[0].reg << 12;
9308 inst.instruction |= inst.operands[1].reg;
9309 inst.instruction |= inst.operands[3].reg << 16;
9310 }
9311
9312 /* ARM V8 STRL. */
9313 static void
9314 do_stlex (void)
9315 {
9316 constraint (inst.operands[0].reg == inst.operands[1].reg
9317 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9318
9319 do_rd_rm_rn ();
9320 }
9321
9322 static void
9323 do_t_stlex (void)
9324 {
9325 constraint (inst.operands[0].reg == inst.operands[1].reg
9326 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9327
9328 do_rm_rd_rn ();
9329 }
9330
9331 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9332 extends it to 32-bits, and adds the result to a value in another
9333 register. You can specify a rotation by 0, 8, 16, or 24 bits
9334 before extracting the 16-bit value.
9335 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9336 Condition defaults to COND_ALWAYS.
9337 Error if any register uses R15. */
9338
9339 static void
9340 do_sxtah (void)
9341 {
9342 inst.instruction |= inst.operands[0].reg << 12;
9343 inst.instruction |= inst.operands[1].reg << 16;
9344 inst.instruction |= inst.operands[2].reg;
9345 inst.instruction |= inst.operands[3].imm << 10;
9346 }
9347
9348 /* ARM V6 SXTH.
9349
9350 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9351 Condition defaults to COND_ALWAYS.
9352 Error if any register uses R15. */
9353
9354 static void
9355 do_sxth (void)
9356 {
9357 inst.instruction |= inst.operands[0].reg << 12;
9358 inst.instruction |= inst.operands[1].reg;
9359 inst.instruction |= inst.operands[2].imm << 10;
9360 }
9361 \f
9362 /* VFP instructions. In a logical order: SP variant first, monad
9363 before dyad, arithmetic then move then load/store. */
9364
9365 static void
9366 do_vfp_sp_monadic (void)
9367 {
9368 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9369 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9370 }
9371
9372 static void
9373 do_vfp_sp_dyadic (void)
9374 {
9375 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9376 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9377 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9378 }
9379
9380 static void
9381 do_vfp_sp_compare_z (void)
9382 {
9383 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9384 }
9385
9386 static void
9387 do_vfp_dp_sp_cvt (void)
9388 {
9389 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9390 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9391 }
9392
9393 static void
9394 do_vfp_sp_dp_cvt (void)
9395 {
9396 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9397 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9398 }
9399
9400 static void
9401 do_vfp_reg_from_sp (void)
9402 {
9403 inst.instruction |= inst.operands[0].reg << 12;
9404 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9405 }
9406
9407 static void
9408 do_vfp_reg2_from_sp2 (void)
9409 {
9410 constraint (inst.operands[2].imm != 2,
9411 _("only two consecutive VFP SP registers allowed here"));
9412 inst.instruction |= inst.operands[0].reg << 12;
9413 inst.instruction |= inst.operands[1].reg << 16;
9414 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9415 }
9416
9417 static void
9418 do_vfp_sp_from_reg (void)
9419 {
9420 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9421 inst.instruction |= inst.operands[1].reg << 12;
9422 }
9423
9424 static void
9425 do_vfp_sp2_from_reg2 (void)
9426 {
9427 constraint (inst.operands[0].imm != 2,
9428 _("only two consecutive VFP SP registers allowed here"));
9429 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9430 inst.instruction |= inst.operands[1].reg << 12;
9431 inst.instruction |= inst.operands[2].reg << 16;
9432 }
9433
9434 static void
9435 do_vfp_sp_ldst (void)
9436 {
9437 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9438 encode_arm_cp_address (1, FALSE, TRUE, 0);
9439 }
9440
9441 static void
9442 do_vfp_dp_ldst (void)
9443 {
9444 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9445 encode_arm_cp_address (1, FALSE, TRUE, 0);
9446 }
9447
9448
9449 static void
9450 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9451 {
9452 if (inst.operands[0].writeback)
9453 inst.instruction |= WRITE_BACK;
9454 else
9455 constraint (ldstm_type != VFP_LDSTMIA,
9456 _("this addressing mode requires base-register writeback"));
9457 inst.instruction |= inst.operands[0].reg << 16;
9458 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9459 inst.instruction |= inst.operands[1].imm;
9460 }
9461
9462 static void
9463 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9464 {
9465 int count;
9466
9467 if (inst.operands[0].writeback)
9468 inst.instruction |= WRITE_BACK;
9469 else
9470 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9471 _("this addressing mode requires base-register writeback"));
9472
9473 inst.instruction |= inst.operands[0].reg << 16;
9474 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9475
9476 count = inst.operands[1].imm << 1;
9477 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9478 count += 1;
9479
9480 inst.instruction |= count;
9481 }
9482
9483 static void
9484 do_vfp_sp_ldstmia (void)
9485 {
9486 vfp_sp_ldstm (VFP_LDSTMIA);
9487 }
9488
9489 static void
9490 do_vfp_sp_ldstmdb (void)
9491 {
9492 vfp_sp_ldstm (VFP_LDSTMDB);
9493 }
9494
9495 static void
9496 do_vfp_dp_ldstmia (void)
9497 {
9498 vfp_dp_ldstm (VFP_LDSTMIA);
9499 }
9500
9501 static void
9502 do_vfp_dp_ldstmdb (void)
9503 {
9504 vfp_dp_ldstm (VFP_LDSTMDB);
9505 }
9506
9507 static void
9508 do_vfp_xp_ldstmia (void)
9509 {
9510 vfp_dp_ldstm (VFP_LDSTMIAX);
9511 }
9512
9513 static void
9514 do_vfp_xp_ldstmdb (void)
9515 {
9516 vfp_dp_ldstm (VFP_LDSTMDBX);
9517 }
9518
9519 static void
9520 do_vfp_dp_rd_rm (void)
9521 {
9522 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9523 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9524 }
9525
9526 static void
9527 do_vfp_dp_rn_rd (void)
9528 {
9529 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9530 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9531 }
9532
9533 static void
9534 do_vfp_dp_rd_rn (void)
9535 {
9536 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9537 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9538 }
9539
9540 static void
9541 do_vfp_dp_rd_rn_rm (void)
9542 {
9543 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9544 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9545 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9546 }
9547
9548 static void
9549 do_vfp_dp_rd (void)
9550 {
9551 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9552 }
9553
9554 static void
9555 do_vfp_dp_rm_rd_rn (void)
9556 {
9557 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9558 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9559 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9560 }
9561
9562 /* VFPv3 instructions. */
9563 static void
9564 do_vfp_sp_const (void)
9565 {
9566 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9567 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9568 inst.instruction |= (inst.operands[1].imm & 0x0f);
9569 }
9570
9571 static void
9572 do_vfp_dp_const (void)
9573 {
9574 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9575 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9576 inst.instruction |= (inst.operands[1].imm & 0x0f);
9577 }
9578
9579 static void
9580 vfp_conv (int srcsize)
9581 {
9582 int immbits = srcsize - inst.operands[1].imm;
9583
9584 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9585 {
9586 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9587 i.e. immbits must be in range 0 - 16. */
9588 inst.error = _("immediate value out of range, expected range [0, 16]");
9589 return;
9590 }
9591 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9592 {
9593 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9594 i.e. immbits must be in range 0 - 31. */
9595 inst.error = _("immediate value out of range, expected range [1, 32]");
9596 return;
9597 }
9598
9599 inst.instruction |= (immbits & 1) << 5;
9600 inst.instruction |= (immbits >> 1);
9601 }
9602
9603 static void
9604 do_vfp_sp_conv_16 (void)
9605 {
9606 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9607 vfp_conv (16);
9608 }
9609
9610 static void
9611 do_vfp_dp_conv_16 (void)
9612 {
9613 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9614 vfp_conv (16);
9615 }
9616
9617 static void
9618 do_vfp_sp_conv_32 (void)
9619 {
9620 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9621 vfp_conv (32);
9622 }
9623
9624 static void
9625 do_vfp_dp_conv_32 (void)
9626 {
9627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9628 vfp_conv (32);
9629 }
9630 \f
9631 /* FPA instructions. Also in a logical order. */
9632
9633 static void
9634 do_fpa_cmp (void)
9635 {
9636 inst.instruction |= inst.operands[0].reg << 16;
9637 inst.instruction |= inst.operands[1].reg;
9638 }
9639
9640 static void
9641 do_fpa_ldmstm (void)
9642 {
9643 inst.instruction |= inst.operands[0].reg << 12;
9644 switch (inst.operands[1].imm)
9645 {
9646 case 1: inst.instruction |= CP_T_X; break;
9647 case 2: inst.instruction |= CP_T_Y; break;
9648 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9649 case 4: break;
9650 default: abort ();
9651 }
9652
9653 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9654 {
9655 /* The instruction specified "ea" or "fd", so we can only accept
9656 [Rn]{!}. The instruction does not really support stacking or
9657 unstacking, so we have to emulate these by setting appropriate
9658 bits and offsets. */
9659 constraint (inst.reloc.exp.X_op != O_constant
9660 || inst.reloc.exp.X_add_number != 0,
9661 _("this instruction does not support indexing"));
9662
9663 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9664 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9665
9666 if (!(inst.instruction & INDEX_UP))
9667 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9668
9669 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9670 {
9671 inst.operands[2].preind = 0;
9672 inst.operands[2].postind = 1;
9673 }
9674 }
9675
9676 encode_arm_cp_address (2, TRUE, TRUE, 0);
9677 }
9678 \f
9679 /* iWMMXt instructions: strictly in alphabetical order. */
9680
9681 static void
9682 do_iwmmxt_tandorc (void)
9683 {
9684 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9685 }
9686
9687 static void
9688 do_iwmmxt_textrc (void)
9689 {
9690 inst.instruction |= inst.operands[0].reg << 12;
9691 inst.instruction |= inst.operands[1].imm;
9692 }
9693
9694 static void
9695 do_iwmmxt_textrm (void)
9696 {
9697 inst.instruction |= inst.operands[0].reg << 12;
9698 inst.instruction |= inst.operands[1].reg << 16;
9699 inst.instruction |= inst.operands[2].imm;
9700 }
9701
9702 static void
9703 do_iwmmxt_tinsr (void)
9704 {
9705 inst.instruction |= inst.operands[0].reg << 16;
9706 inst.instruction |= inst.operands[1].reg << 12;
9707 inst.instruction |= inst.operands[2].imm;
9708 }
9709
9710 static void
9711 do_iwmmxt_tmia (void)
9712 {
9713 inst.instruction |= inst.operands[0].reg << 5;
9714 inst.instruction |= inst.operands[1].reg;
9715 inst.instruction |= inst.operands[2].reg << 12;
9716 }
9717
9718 static void
9719 do_iwmmxt_waligni (void)
9720 {
9721 inst.instruction |= inst.operands[0].reg << 12;
9722 inst.instruction |= inst.operands[1].reg << 16;
9723 inst.instruction |= inst.operands[2].reg;
9724 inst.instruction |= inst.operands[3].imm << 20;
9725 }
9726
9727 static void
9728 do_iwmmxt_wmerge (void)
9729 {
9730 inst.instruction |= inst.operands[0].reg << 12;
9731 inst.instruction |= inst.operands[1].reg << 16;
9732 inst.instruction |= inst.operands[2].reg;
9733 inst.instruction |= inst.operands[3].imm << 21;
9734 }
9735
9736 static void
9737 do_iwmmxt_wmov (void)
9738 {
9739 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9740 inst.instruction |= inst.operands[0].reg << 12;
9741 inst.instruction |= inst.operands[1].reg << 16;
9742 inst.instruction |= inst.operands[1].reg;
9743 }
9744
9745 static void
9746 do_iwmmxt_wldstbh (void)
9747 {
9748 int reloc;
9749 inst.instruction |= inst.operands[0].reg << 12;
9750 if (thumb_mode)
9751 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9752 else
9753 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9754 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9755 }
9756
9757 static void
9758 do_iwmmxt_wldstw (void)
9759 {
9760 /* RIWR_RIWC clears .isreg for a control register. */
9761 if (!inst.operands[0].isreg)
9762 {
9763 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9764 inst.instruction |= 0xf0000000;
9765 }
9766
9767 inst.instruction |= inst.operands[0].reg << 12;
9768 encode_arm_cp_address (1, TRUE, TRUE, 0);
9769 }
9770
9771 static void
9772 do_iwmmxt_wldstd (void)
9773 {
9774 inst.instruction |= inst.operands[0].reg << 12;
9775 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9776 && inst.operands[1].immisreg)
9777 {
9778 inst.instruction &= ~0x1a000ff;
9779 inst.instruction |= (0xf << 28);
9780 if (inst.operands[1].preind)
9781 inst.instruction |= PRE_INDEX;
9782 if (!inst.operands[1].negative)
9783 inst.instruction |= INDEX_UP;
9784 if (inst.operands[1].writeback)
9785 inst.instruction |= WRITE_BACK;
9786 inst.instruction |= inst.operands[1].reg << 16;
9787 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9788 inst.instruction |= inst.operands[1].imm;
9789 }
9790 else
9791 encode_arm_cp_address (1, TRUE, FALSE, 0);
9792 }
9793
9794 static void
9795 do_iwmmxt_wshufh (void)
9796 {
9797 inst.instruction |= inst.operands[0].reg << 12;
9798 inst.instruction |= inst.operands[1].reg << 16;
9799 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9800 inst.instruction |= (inst.operands[2].imm & 0x0f);
9801 }
9802
9803 static void
9804 do_iwmmxt_wzero (void)
9805 {
9806 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9807 inst.instruction |= inst.operands[0].reg;
9808 inst.instruction |= inst.operands[0].reg << 12;
9809 inst.instruction |= inst.operands[0].reg << 16;
9810 }
9811
9812 static void
9813 do_iwmmxt_wrwrwr_or_imm5 (void)
9814 {
9815 if (inst.operands[2].isreg)
9816 do_rd_rn_rm ();
9817 else {
9818 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9819 _("immediate operand requires iWMMXt2"));
9820 do_rd_rn ();
9821 if (inst.operands[2].imm == 0)
9822 {
9823 switch ((inst.instruction >> 20) & 0xf)
9824 {
9825 case 4:
9826 case 5:
9827 case 6:
9828 case 7:
9829 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9830 inst.operands[2].imm = 16;
9831 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9832 break;
9833 case 8:
9834 case 9:
9835 case 10:
9836 case 11:
9837 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9838 inst.operands[2].imm = 32;
9839 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9840 break;
9841 case 12:
9842 case 13:
9843 case 14:
9844 case 15:
9845 {
9846 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9847 unsigned long wrn;
9848 wrn = (inst.instruction >> 16) & 0xf;
9849 inst.instruction &= 0xff0fff0f;
9850 inst.instruction |= wrn;
9851 /* Bail out here; the instruction is now assembled. */
9852 return;
9853 }
9854 }
9855 }
9856 /* Map 32 -> 0, etc. */
9857 inst.operands[2].imm &= 0x1f;
9858 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9859 }
9860 }
9861 \f
9862 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9863 operations first, then control, shift, and load/store. */
9864
9865 /* Insns like "foo X,Y,Z". */
9866
9867 static void
9868 do_mav_triple (void)
9869 {
9870 inst.instruction |= inst.operands[0].reg << 16;
9871 inst.instruction |= inst.operands[1].reg;
9872 inst.instruction |= inst.operands[2].reg << 12;
9873 }
9874
9875 /* Insns like "foo W,X,Y,Z".
9876 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9877
9878 static void
9879 do_mav_quad (void)
9880 {
9881 inst.instruction |= inst.operands[0].reg << 5;
9882 inst.instruction |= inst.operands[1].reg << 12;
9883 inst.instruction |= inst.operands[2].reg << 16;
9884 inst.instruction |= inst.operands[3].reg;
9885 }
9886
9887 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9888 static void
9889 do_mav_dspsc (void)
9890 {
9891 inst.instruction |= inst.operands[1].reg << 12;
9892 }
9893
9894 /* Maverick shift immediate instructions.
9895 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9896 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9897
9898 static void
9899 do_mav_shift (void)
9900 {
9901 int imm = inst.operands[2].imm;
9902
9903 inst.instruction |= inst.operands[0].reg << 12;
9904 inst.instruction |= inst.operands[1].reg << 16;
9905
9906 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9907 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9908 Bit 4 should be 0. */
9909 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9910
9911 inst.instruction |= imm;
9912 }
9913 \f
9914 /* XScale instructions. Also sorted arithmetic before move. */
9915
9916 /* Xscale multiply-accumulate (argument parse)
9917 MIAcc acc0,Rm,Rs
9918 MIAPHcc acc0,Rm,Rs
9919 MIAxycc acc0,Rm,Rs. */
9920
9921 static void
9922 do_xsc_mia (void)
9923 {
9924 inst.instruction |= inst.operands[1].reg;
9925 inst.instruction |= inst.operands[2].reg << 12;
9926 }
9927
9928 /* Xscale move-accumulator-register (argument parse)
9929
9930 MARcc acc0,RdLo,RdHi. */
9931
9932 static void
9933 do_xsc_mar (void)
9934 {
9935 inst.instruction |= inst.operands[1].reg << 12;
9936 inst.instruction |= inst.operands[2].reg << 16;
9937 }
9938
9939 /* Xscale move-register-accumulator (argument parse)
9940
9941 MRAcc RdLo,RdHi,acc0. */
9942
9943 static void
9944 do_xsc_mra (void)
9945 {
9946 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9947 inst.instruction |= inst.operands[0].reg << 12;
9948 inst.instruction |= inst.operands[1].reg << 16;
9949 }
9950 \f
9951 /* Encoding functions relevant only to Thumb. */
9952
9953 /* inst.operands[i] is a shifted-register operand; encode
9954 it into inst.instruction in the format used by Thumb32. */
9955
9956 static void
9957 encode_thumb32_shifted_operand (int i)
9958 {
9959 unsigned int value = inst.reloc.exp.X_add_number;
9960 unsigned int shift = inst.operands[i].shift_kind;
9961
9962 constraint (inst.operands[i].immisreg,
9963 _("shift by register not allowed in thumb mode"));
9964 inst.instruction |= inst.operands[i].reg;
9965 if (shift == SHIFT_RRX)
9966 inst.instruction |= SHIFT_ROR << 4;
9967 else
9968 {
9969 constraint (inst.reloc.exp.X_op != O_constant,
9970 _("expression too complex"));
9971
9972 constraint (value > 32
9973 || (value == 32 && (shift == SHIFT_LSL
9974 || shift == SHIFT_ROR)),
9975 _("shift expression is too large"));
9976
9977 if (value == 0)
9978 shift = SHIFT_LSL;
9979 else if (value == 32)
9980 value = 0;
9981
9982 inst.instruction |= shift << 4;
9983 inst.instruction |= (value & 0x1c) << 10;
9984 inst.instruction |= (value & 0x03) << 6;
9985 }
9986 }
9987
9988
9989 /* inst.operands[i] was set up by parse_address. Encode it into a
9990 Thumb32 format load or store instruction. Reject forms that cannot
9991 be used with such instructions. If is_t is true, reject forms that
9992 cannot be used with a T instruction; if is_d is true, reject forms
9993 that cannot be used with a D instruction. If it is a store insn,
9994 reject PC in Rn. */
9995
9996 static void
9997 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9998 {
9999 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10000
10001 constraint (!inst.operands[i].isreg,
10002 _("Instruction does not support =N addresses"));
10003
10004 inst.instruction |= inst.operands[i].reg << 16;
10005 if (inst.operands[i].immisreg)
10006 {
10007 constraint (is_pc, BAD_PC_ADDRESSING);
10008 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10009 constraint (inst.operands[i].negative,
10010 _("Thumb does not support negative register indexing"));
10011 constraint (inst.operands[i].postind,
10012 _("Thumb does not support register post-indexing"));
10013 constraint (inst.operands[i].writeback,
10014 _("Thumb does not support register indexing with writeback"));
10015 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10016 _("Thumb supports only LSL in shifted register indexing"));
10017
10018 inst.instruction |= inst.operands[i].imm;
10019 if (inst.operands[i].shifted)
10020 {
10021 constraint (inst.reloc.exp.X_op != O_constant,
10022 _("expression too complex"));
10023 constraint (inst.reloc.exp.X_add_number < 0
10024 || inst.reloc.exp.X_add_number > 3,
10025 _("shift out of range"));
10026 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10027 }
10028 inst.reloc.type = BFD_RELOC_UNUSED;
10029 }
10030 else if (inst.operands[i].preind)
10031 {
10032 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10033 constraint (is_t && inst.operands[i].writeback,
10034 _("cannot use writeback with this instruction"));
10035 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10036 BAD_PC_ADDRESSING);
10037
10038 if (is_d)
10039 {
10040 inst.instruction |= 0x01000000;
10041 if (inst.operands[i].writeback)
10042 inst.instruction |= 0x00200000;
10043 }
10044 else
10045 {
10046 inst.instruction |= 0x00000c00;
10047 if (inst.operands[i].writeback)
10048 inst.instruction |= 0x00000100;
10049 }
10050 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10051 }
10052 else if (inst.operands[i].postind)
10053 {
10054 gas_assert (inst.operands[i].writeback);
10055 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10056 constraint (is_t, _("cannot use post-indexing with this instruction"));
10057
10058 if (is_d)
10059 inst.instruction |= 0x00200000;
10060 else
10061 inst.instruction |= 0x00000900;
10062 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10063 }
10064 else /* unindexed - only for coprocessor */
10065 inst.error = _("instruction does not accept unindexed addressing");
10066 }
10067
10068 /* Table of Thumb instructions which exist in both 16- and 32-bit
10069 encodings (the latter only in post-V6T2 cores). The index is the
10070 value used in the insns table below. When there is more than one
10071 possible 16-bit encoding for the instruction, this table always
10072 holds variant (1).
10073 Also contains several pseudo-instructions used during relaxation. */
10074 #define T16_32_TAB \
10075 X(_adc, 4140, eb400000), \
10076 X(_adcs, 4140, eb500000), \
10077 X(_add, 1c00, eb000000), \
10078 X(_adds, 1c00, eb100000), \
10079 X(_addi, 0000, f1000000), \
10080 X(_addis, 0000, f1100000), \
10081 X(_add_pc,000f, f20f0000), \
10082 X(_add_sp,000d, f10d0000), \
10083 X(_adr, 000f, f20f0000), \
10084 X(_and, 4000, ea000000), \
10085 X(_ands, 4000, ea100000), \
10086 X(_asr, 1000, fa40f000), \
10087 X(_asrs, 1000, fa50f000), \
10088 X(_b, e000, f000b000), \
10089 X(_bcond, d000, f0008000), \
10090 X(_bic, 4380, ea200000), \
10091 X(_bics, 4380, ea300000), \
10092 X(_cmn, 42c0, eb100f00), \
10093 X(_cmp, 2800, ebb00f00), \
10094 X(_cpsie, b660, f3af8400), \
10095 X(_cpsid, b670, f3af8600), \
10096 X(_cpy, 4600, ea4f0000), \
10097 X(_dec_sp,80dd, f1ad0d00), \
10098 X(_eor, 4040, ea800000), \
10099 X(_eors, 4040, ea900000), \
10100 X(_inc_sp,00dd, f10d0d00), \
10101 X(_ldmia, c800, e8900000), \
10102 X(_ldr, 6800, f8500000), \
10103 X(_ldrb, 7800, f8100000), \
10104 X(_ldrh, 8800, f8300000), \
10105 X(_ldrsb, 5600, f9100000), \
10106 X(_ldrsh, 5e00, f9300000), \
10107 X(_ldr_pc,4800, f85f0000), \
10108 X(_ldr_pc2,4800, f85f0000), \
10109 X(_ldr_sp,9800, f85d0000), \
10110 X(_lsl, 0000, fa00f000), \
10111 X(_lsls, 0000, fa10f000), \
10112 X(_lsr, 0800, fa20f000), \
10113 X(_lsrs, 0800, fa30f000), \
10114 X(_mov, 2000, ea4f0000), \
10115 X(_movs, 2000, ea5f0000), \
10116 X(_mul, 4340, fb00f000), \
10117 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10118 X(_mvn, 43c0, ea6f0000), \
10119 X(_mvns, 43c0, ea7f0000), \
10120 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10121 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10122 X(_orr, 4300, ea400000), \
10123 X(_orrs, 4300, ea500000), \
10124 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10125 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10126 X(_rev, ba00, fa90f080), \
10127 X(_rev16, ba40, fa90f090), \
10128 X(_revsh, bac0, fa90f0b0), \
10129 X(_ror, 41c0, fa60f000), \
10130 X(_rors, 41c0, fa70f000), \
10131 X(_sbc, 4180, eb600000), \
10132 X(_sbcs, 4180, eb700000), \
10133 X(_stmia, c000, e8800000), \
10134 X(_str, 6000, f8400000), \
10135 X(_strb, 7000, f8000000), \
10136 X(_strh, 8000, f8200000), \
10137 X(_str_sp,9000, f84d0000), \
10138 X(_sub, 1e00, eba00000), \
10139 X(_subs, 1e00, ebb00000), \
10140 X(_subi, 8000, f1a00000), \
10141 X(_subis, 8000, f1b00000), \
10142 X(_sxtb, b240, fa4ff080), \
10143 X(_sxth, b200, fa0ff080), \
10144 X(_tst, 4200, ea100f00), \
10145 X(_uxtb, b2c0, fa5ff080), \
10146 X(_uxth, b280, fa1ff080), \
10147 X(_nop, bf00, f3af8000), \
10148 X(_yield, bf10, f3af8001), \
10149 X(_wfe, bf20, f3af8002), \
10150 X(_wfi, bf30, f3af8003), \
10151 X(_sev, bf40, f3af8004), \
10152 X(_sevl, bf50, f3af8005), \
10153 X(_udf, de00, f7f0a000)
10154
10155 /* To catch errors in encoding functions, the codes are all offset by
10156 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10157 as 16-bit instructions. */
10158 #define X(a,b,c) T_MNEM##a
10159 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10160 #undef X
10161
10162 #define X(a,b,c) 0x##b
10163 static const unsigned short thumb_op16[] = { T16_32_TAB };
10164 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10165 #undef X
10166
10167 #define X(a,b,c) 0x##c
10168 static const unsigned int thumb_op32[] = { T16_32_TAB };
10169 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10170 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10171 #undef X
10172 #undef T16_32_TAB
10173
10174 /* Thumb instruction encoders, in alphabetical order. */
10175
10176 /* ADDW or SUBW. */
10177
10178 static void
10179 do_t_add_sub_w (void)
10180 {
10181 int Rd, Rn;
10182
10183 Rd = inst.operands[0].reg;
10184 Rn = inst.operands[1].reg;
10185
10186 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10187 is the SP-{plus,minus}-immediate form of the instruction. */
10188 if (Rn == REG_SP)
10189 constraint (Rd == REG_PC, BAD_PC);
10190 else
10191 reject_bad_reg (Rd);
10192
10193 inst.instruction |= (Rn << 16) | (Rd << 8);
10194 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10195 }
10196
10197 /* Parse an add or subtract instruction. We get here with inst.instruction
10198 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10199
10200 static void
10201 do_t_add_sub (void)
10202 {
10203 int Rd, Rs, Rn;
10204
10205 Rd = inst.operands[0].reg;
10206 Rs = (inst.operands[1].present
10207 ? inst.operands[1].reg /* Rd, Rs, foo */
10208 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10209
10210 if (Rd == REG_PC)
10211 set_it_insn_type_last ();
10212
10213 if (unified_syntax)
10214 {
10215 bfd_boolean flags;
10216 bfd_boolean narrow;
10217 int opcode;
10218
10219 flags = (inst.instruction == T_MNEM_adds
10220 || inst.instruction == T_MNEM_subs);
10221 if (flags)
10222 narrow = !in_it_block ();
10223 else
10224 narrow = in_it_block ();
10225 if (!inst.operands[2].isreg)
10226 {
10227 int add;
10228
10229 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10230
10231 add = (inst.instruction == T_MNEM_add
10232 || inst.instruction == T_MNEM_adds);
10233 opcode = 0;
10234 if (inst.size_req != 4)
10235 {
10236 /* Attempt to use a narrow opcode, with relaxation if
10237 appropriate. */
10238 if (Rd == REG_SP && Rs == REG_SP && !flags)
10239 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10240 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10241 opcode = T_MNEM_add_sp;
10242 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10243 opcode = T_MNEM_add_pc;
10244 else if (Rd <= 7 && Rs <= 7 && narrow)
10245 {
10246 if (flags)
10247 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10248 else
10249 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10250 }
10251 if (opcode)
10252 {
10253 inst.instruction = THUMB_OP16(opcode);
10254 inst.instruction |= (Rd << 4) | Rs;
10255 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10256 if (inst.size_req != 2)
10257 inst.relax = opcode;
10258 }
10259 else
10260 constraint (inst.size_req == 2, BAD_HIREG);
10261 }
10262 if (inst.size_req == 4
10263 || (inst.size_req != 2 && !opcode))
10264 {
10265 if (Rd == REG_PC)
10266 {
10267 constraint (add, BAD_PC);
10268 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10269 _("only SUBS PC, LR, #const allowed"));
10270 constraint (inst.reloc.exp.X_op != O_constant,
10271 _("expression too complex"));
10272 constraint (inst.reloc.exp.X_add_number < 0
10273 || inst.reloc.exp.X_add_number > 0xff,
10274 _("immediate value out of range"));
10275 inst.instruction = T2_SUBS_PC_LR
10276 | inst.reloc.exp.X_add_number;
10277 inst.reloc.type = BFD_RELOC_UNUSED;
10278 return;
10279 }
10280 else if (Rs == REG_PC)
10281 {
10282 /* Always use addw/subw. */
10283 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10284 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10285 }
10286 else
10287 {
10288 inst.instruction = THUMB_OP32 (inst.instruction);
10289 inst.instruction = (inst.instruction & 0xe1ffffff)
10290 | 0x10000000;
10291 if (flags)
10292 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10293 else
10294 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10295 }
10296 inst.instruction |= Rd << 8;
10297 inst.instruction |= Rs << 16;
10298 }
10299 }
10300 else
10301 {
10302 unsigned int value = inst.reloc.exp.X_add_number;
10303 unsigned int shift = inst.operands[2].shift_kind;
10304
10305 Rn = inst.operands[2].reg;
10306 /* See if we can do this with a 16-bit instruction. */
10307 if (!inst.operands[2].shifted && inst.size_req != 4)
10308 {
10309 if (Rd > 7 || Rs > 7 || Rn > 7)
10310 narrow = FALSE;
10311
10312 if (narrow)
10313 {
10314 inst.instruction = ((inst.instruction == T_MNEM_adds
10315 || inst.instruction == T_MNEM_add)
10316 ? T_OPCODE_ADD_R3
10317 : T_OPCODE_SUB_R3);
10318 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10319 return;
10320 }
10321
10322 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10323 {
10324 /* Thumb-1 cores (except v6-M) require at least one high
10325 register in a narrow non flag setting add. */
10326 if (Rd > 7 || Rn > 7
10327 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10328 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10329 {
10330 if (Rd == Rn)
10331 {
10332 Rn = Rs;
10333 Rs = Rd;
10334 }
10335 inst.instruction = T_OPCODE_ADD_HI;
10336 inst.instruction |= (Rd & 8) << 4;
10337 inst.instruction |= (Rd & 7);
10338 inst.instruction |= Rn << 3;
10339 return;
10340 }
10341 }
10342 }
10343
10344 constraint (Rd == REG_PC, BAD_PC);
10345 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10346 constraint (Rs == REG_PC, BAD_PC);
10347 reject_bad_reg (Rn);
10348
10349 /* If we get here, it can't be done in 16 bits. */
10350 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10351 _("shift must be constant"));
10352 inst.instruction = THUMB_OP32 (inst.instruction);
10353 inst.instruction |= Rd << 8;
10354 inst.instruction |= Rs << 16;
10355 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10356 _("shift value over 3 not allowed in thumb mode"));
10357 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10358 _("only LSL shift allowed in thumb mode"));
10359 encode_thumb32_shifted_operand (2);
10360 }
10361 }
10362 else
10363 {
10364 constraint (inst.instruction == T_MNEM_adds
10365 || inst.instruction == T_MNEM_subs,
10366 BAD_THUMB32);
10367
10368 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10369 {
10370 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10371 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10372 BAD_HIREG);
10373
10374 inst.instruction = (inst.instruction == T_MNEM_add
10375 ? 0x0000 : 0x8000);
10376 inst.instruction |= (Rd << 4) | Rs;
10377 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10378 return;
10379 }
10380
10381 Rn = inst.operands[2].reg;
10382 constraint (inst.operands[2].shifted, _("unshifted register required"));
10383
10384 /* We now have Rd, Rs, and Rn set to registers. */
10385 if (Rd > 7 || Rs > 7 || Rn > 7)
10386 {
10387 /* Can't do this for SUB. */
10388 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10389 inst.instruction = T_OPCODE_ADD_HI;
10390 inst.instruction |= (Rd & 8) << 4;
10391 inst.instruction |= (Rd & 7);
10392 if (Rs == Rd)
10393 inst.instruction |= Rn << 3;
10394 else if (Rn == Rd)
10395 inst.instruction |= Rs << 3;
10396 else
10397 constraint (1, _("dest must overlap one source register"));
10398 }
10399 else
10400 {
10401 inst.instruction = (inst.instruction == T_MNEM_add
10402 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10403 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10404 }
10405 }
10406 }
10407
10408 static void
10409 do_t_adr (void)
10410 {
10411 unsigned Rd;
10412
10413 Rd = inst.operands[0].reg;
10414 reject_bad_reg (Rd);
10415
10416 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10417 {
10418 /* Defer to section relaxation. */
10419 inst.relax = inst.instruction;
10420 inst.instruction = THUMB_OP16 (inst.instruction);
10421 inst.instruction |= Rd << 4;
10422 }
10423 else if (unified_syntax && inst.size_req != 2)
10424 {
10425 /* Generate a 32-bit opcode. */
10426 inst.instruction = THUMB_OP32 (inst.instruction);
10427 inst.instruction |= Rd << 8;
10428 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10429 inst.reloc.pc_rel = 1;
10430 }
10431 else
10432 {
10433 /* Generate a 16-bit opcode. */
10434 inst.instruction = THUMB_OP16 (inst.instruction);
10435 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10436 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10437 inst.reloc.pc_rel = 1;
10438
10439 inst.instruction |= Rd << 4;
10440 }
10441 }
10442
10443 /* Arithmetic instructions for which there is just one 16-bit
10444 instruction encoding, and it allows only two low registers.
10445 For maximal compatibility with ARM syntax, we allow three register
10446 operands even when Thumb-32 instructions are not available, as long
10447 as the first two are identical. For instance, both "sbc r0,r1" and
10448 "sbc r0,r0,r1" are allowed. */
10449 static void
10450 do_t_arit3 (void)
10451 {
10452 int Rd, Rs, Rn;
10453
10454 Rd = inst.operands[0].reg;
10455 Rs = (inst.operands[1].present
10456 ? inst.operands[1].reg /* Rd, Rs, foo */
10457 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10458 Rn = inst.operands[2].reg;
10459
10460 reject_bad_reg (Rd);
10461 reject_bad_reg (Rs);
10462 if (inst.operands[2].isreg)
10463 reject_bad_reg (Rn);
10464
10465 if (unified_syntax)
10466 {
10467 if (!inst.operands[2].isreg)
10468 {
10469 /* For an immediate, we always generate a 32-bit opcode;
10470 section relaxation will shrink it later if possible. */
10471 inst.instruction = THUMB_OP32 (inst.instruction);
10472 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10473 inst.instruction |= Rd << 8;
10474 inst.instruction |= Rs << 16;
10475 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10476 }
10477 else
10478 {
10479 bfd_boolean narrow;
10480
10481 /* See if we can do this with a 16-bit instruction. */
10482 if (THUMB_SETS_FLAGS (inst.instruction))
10483 narrow = !in_it_block ();
10484 else
10485 narrow = in_it_block ();
10486
10487 if (Rd > 7 || Rn > 7 || Rs > 7)
10488 narrow = FALSE;
10489 if (inst.operands[2].shifted)
10490 narrow = FALSE;
10491 if (inst.size_req == 4)
10492 narrow = FALSE;
10493
10494 if (narrow
10495 && Rd == Rs)
10496 {
10497 inst.instruction = THUMB_OP16 (inst.instruction);
10498 inst.instruction |= Rd;
10499 inst.instruction |= Rn << 3;
10500 return;
10501 }
10502
10503 /* If we get here, it can't be done in 16 bits. */
10504 constraint (inst.operands[2].shifted
10505 && inst.operands[2].immisreg,
10506 _("shift must be constant"));
10507 inst.instruction = THUMB_OP32 (inst.instruction);
10508 inst.instruction |= Rd << 8;
10509 inst.instruction |= Rs << 16;
10510 encode_thumb32_shifted_operand (2);
10511 }
10512 }
10513 else
10514 {
10515 /* On its face this is a lie - the instruction does set the
10516 flags. However, the only supported mnemonic in this mode
10517 says it doesn't. */
10518 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10519
10520 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10521 _("unshifted register required"));
10522 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10523 constraint (Rd != Rs,
10524 _("dest and source1 must be the same register"));
10525
10526 inst.instruction = THUMB_OP16 (inst.instruction);
10527 inst.instruction |= Rd;
10528 inst.instruction |= Rn << 3;
10529 }
10530 }
10531
10532 /* Similarly, but for instructions where the arithmetic operation is
10533 commutative, so we can allow either of them to be different from
10534 the destination operand in a 16-bit instruction. For instance, all
10535 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10536 accepted. */
10537 static void
10538 do_t_arit3c (void)
10539 {
10540 int Rd, Rs, Rn;
10541
10542 Rd = inst.operands[0].reg;
10543 Rs = (inst.operands[1].present
10544 ? inst.operands[1].reg /* Rd, Rs, foo */
10545 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10546 Rn = inst.operands[2].reg;
10547
10548 reject_bad_reg (Rd);
10549 reject_bad_reg (Rs);
10550 if (inst.operands[2].isreg)
10551 reject_bad_reg (Rn);
10552
10553 if (unified_syntax)
10554 {
10555 if (!inst.operands[2].isreg)
10556 {
10557 /* For an immediate, we always generate a 32-bit opcode;
10558 section relaxation will shrink it later if possible. */
10559 inst.instruction = THUMB_OP32 (inst.instruction);
10560 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10561 inst.instruction |= Rd << 8;
10562 inst.instruction |= Rs << 16;
10563 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10564 }
10565 else
10566 {
10567 bfd_boolean narrow;
10568
10569 /* See if we can do this with a 16-bit instruction. */
10570 if (THUMB_SETS_FLAGS (inst.instruction))
10571 narrow = !in_it_block ();
10572 else
10573 narrow = in_it_block ();
10574
10575 if (Rd > 7 || Rn > 7 || Rs > 7)
10576 narrow = FALSE;
10577 if (inst.operands[2].shifted)
10578 narrow = FALSE;
10579 if (inst.size_req == 4)
10580 narrow = FALSE;
10581
10582 if (narrow)
10583 {
10584 if (Rd == Rs)
10585 {
10586 inst.instruction = THUMB_OP16 (inst.instruction);
10587 inst.instruction |= Rd;
10588 inst.instruction |= Rn << 3;
10589 return;
10590 }
10591 if (Rd == Rn)
10592 {
10593 inst.instruction = THUMB_OP16 (inst.instruction);
10594 inst.instruction |= Rd;
10595 inst.instruction |= Rs << 3;
10596 return;
10597 }
10598 }
10599
10600 /* If we get here, it can't be done in 16 bits. */
10601 constraint (inst.operands[2].shifted
10602 && inst.operands[2].immisreg,
10603 _("shift must be constant"));
10604 inst.instruction = THUMB_OP32 (inst.instruction);
10605 inst.instruction |= Rd << 8;
10606 inst.instruction |= Rs << 16;
10607 encode_thumb32_shifted_operand (2);
10608 }
10609 }
10610 else
10611 {
10612 /* On its face this is a lie - the instruction does set the
10613 flags. However, the only supported mnemonic in this mode
10614 says it doesn't. */
10615 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10616
10617 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10618 _("unshifted register required"));
10619 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10620
10621 inst.instruction = THUMB_OP16 (inst.instruction);
10622 inst.instruction |= Rd;
10623
10624 if (Rd == Rs)
10625 inst.instruction |= Rn << 3;
10626 else if (Rd == Rn)
10627 inst.instruction |= Rs << 3;
10628 else
10629 constraint (1, _("dest must overlap one source register"));
10630 }
10631 }
10632
10633 static void
10634 do_t_bfc (void)
10635 {
10636 unsigned Rd;
10637 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10638 constraint (msb > 32, _("bit-field extends past end of register"));
10639 /* The instruction encoding stores the LSB and MSB,
10640 not the LSB and width. */
10641 Rd = inst.operands[0].reg;
10642 reject_bad_reg (Rd);
10643 inst.instruction |= Rd << 8;
10644 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10645 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10646 inst.instruction |= msb - 1;
10647 }
10648
10649 static void
10650 do_t_bfi (void)
10651 {
10652 int Rd, Rn;
10653 unsigned int msb;
10654
10655 Rd = inst.operands[0].reg;
10656 reject_bad_reg (Rd);
10657
10658 /* #0 in second position is alternative syntax for bfc, which is
10659 the same instruction but with REG_PC in the Rm field. */
10660 if (!inst.operands[1].isreg)
10661 Rn = REG_PC;
10662 else
10663 {
10664 Rn = inst.operands[1].reg;
10665 reject_bad_reg (Rn);
10666 }
10667
10668 msb = inst.operands[2].imm + inst.operands[3].imm;
10669 constraint (msb > 32, _("bit-field extends past end of register"));
10670 /* The instruction encoding stores the LSB and MSB,
10671 not the LSB and width. */
10672 inst.instruction |= Rd << 8;
10673 inst.instruction |= Rn << 16;
10674 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10675 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10676 inst.instruction |= msb - 1;
10677 }
10678
10679 static void
10680 do_t_bfx (void)
10681 {
10682 unsigned Rd, Rn;
10683
10684 Rd = inst.operands[0].reg;
10685 Rn = inst.operands[1].reg;
10686
10687 reject_bad_reg (Rd);
10688 reject_bad_reg (Rn);
10689
10690 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10691 _("bit-field extends past end of register"));
10692 inst.instruction |= Rd << 8;
10693 inst.instruction |= Rn << 16;
10694 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10695 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10696 inst.instruction |= inst.operands[3].imm - 1;
10697 }
10698
10699 /* ARM V5 Thumb BLX (argument parse)
10700 BLX <target_addr> which is BLX(1)
10701 BLX <Rm> which is BLX(2)
10702 Unfortunately, there are two different opcodes for this mnemonic.
10703 So, the insns[].value is not used, and the code here zaps values
10704 into inst.instruction.
10705
10706 ??? How to take advantage of the additional two bits of displacement
10707 available in Thumb32 mode? Need new relocation? */
10708
10709 static void
10710 do_t_blx (void)
10711 {
10712 set_it_insn_type_last ();
10713
10714 if (inst.operands[0].isreg)
10715 {
10716 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10717 /* We have a register, so this is BLX(2). */
10718 inst.instruction |= inst.operands[0].reg << 3;
10719 }
10720 else
10721 {
10722 /* No register. This must be BLX(1). */
10723 inst.instruction = 0xf000e800;
10724 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10725 }
10726 }
10727
10728 static void
10729 do_t_branch (void)
10730 {
10731 int opcode;
10732 int cond;
10733 int reloc;
10734
10735 cond = inst.cond;
10736 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10737
10738 if (in_it_block ())
10739 {
10740 /* Conditional branches inside IT blocks are encoded as unconditional
10741 branches. */
10742 cond = COND_ALWAYS;
10743 }
10744 else
10745 cond = inst.cond;
10746
10747 if (cond != COND_ALWAYS)
10748 opcode = T_MNEM_bcond;
10749 else
10750 opcode = inst.instruction;
10751
10752 if (unified_syntax
10753 && (inst.size_req == 4
10754 || (inst.size_req != 2
10755 && (inst.operands[0].hasreloc
10756 || inst.reloc.exp.X_op == O_constant))))
10757 {
10758 inst.instruction = THUMB_OP32(opcode);
10759 if (cond == COND_ALWAYS)
10760 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10761 else
10762 {
10763 gas_assert (cond != 0xF);
10764 inst.instruction |= cond << 22;
10765 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10766 }
10767 }
10768 else
10769 {
10770 inst.instruction = THUMB_OP16(opcode);
10771 if (cond == COND_ALWAYS)
10772 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10773 else
10774 {
10775 inst.instruction |= cond << 8;
10776 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10777 }
10778 /* Allow section relaxation. */
10779 if (unified_syntax && inst.size_req != 2)
10780 inst.relax = opcode;
10781 }
10782 inst.reloc.type = reloc;
10783 inst.reloc.pc_rel = 1;
10784 }
10785
10786 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10787 between the two is the maximum immediate allowed - which is passed in
10788 RANGE. */
10789 static void
10790 do_t_bkpt_hlt1 (int range)
10791 {
10792 constraint (inst.cond != COND_ALWAYS,
10793 _("instruction is always unconditional"));
10794 if (inst.operands[0].present)
10795 {
10796 constraint (inst.operands[0].imm > range,
10797 _("immediate value out of range"));
10798 inst.instruction |= inst.operands[0].imm;
10799 }
10800
10801 set_it_insn_type (NEUTRAL_IT_INSN);
10802 }
10803
10804 static void
10805 do_t_hlt (void)
10806 {
10807 do_t_bkpt_hlt1 (63);
10808 }
10809
10810 static void
10811 do_t_bkpt (void)
10812 {
10813 do_t_bkpt_hlt1 (255);
10814 }
10815
10816 static void
10817 do_t_branch23 (void)
10818 {
10819 set_it_insn_type_last ();
10820 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10821
10822 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10823 this file. We used to simply ignore the PLT reloc type here --
10824 the branch encoding is now needed to deal with TLSCALL relocs.
10825 So if we see a PLT reloc now, put it back to how it used to be to
10826 keep the preexisting behaviour. */
10827 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10828 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10829
10830 #if defined(OBJ_COFF)
10831 /* If the destination of the branch is a defined symbol which does not have
10832 the THUMB_FUNC attribute, then we must be calling a function which has
10833 the (interfacearm) attribute. We look for the Thumb entry point to that
10834 function and change the branch to refer to that function instead. */
10835 if ( inst.reloc.exp.X_op == O_symbol
10836 && inst.reloc.exp.X_add_symbol != NULL
10837 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10838 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10839 inst.reloc.exp.X_add_symbol =
10840 find_real_start (inst.reloc.exp.X_add_symbol);
10841 #endif
10842 }
10843
10844 static void
10845 do_t_bx (void)
10846 {
10847 set_it_insn_type_last ();
10848 inst.instruction |= inst.operands[0].reg << 3;
10849 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10850 should cause the alignment to be checked once it is known. This is
10851 because BX PC only works if the instruction is word aligned. */
10852 }
10853
10854 static void
10855 do_t_bxj (void)
10856 {
10857 int Rm;
10858
10859 set_it_insn_type_last ();
10860 Rm = inst.operands[0].reg;
10861 reject_bad_reg (Rm);
10862 inst.instruction |= Rm << 16;
10863 }
10864
10865 static void
10866 do_t_clz (void)
10867 {
10868 unsigned Rd;
10869 unsigned Rm;
10870
10871 Rd = inst.operands[0].reg;
10872 Rm = inst.operands[1].reg;
10873
10874 reject_bad_reg (Rd);
10875 reject_bad_reg (Rm);
10876
10877 inst.instruction |= Rd << 8;
10878 inst.instruction |= Rm << 16;
10879 inst.instruction |= Rm;
10880 }
10881
10882 static void
10883 do_t_cps (void)
10884 {
10885 set_it_insn_type (OUTSIDE_IT_INSN);
10886 inst.instruction |= inst.operands[0].imm;
10887 }
10888
10889 static void
10890 do_t_cpsi (void)
10891 {
10892 set_it_insn_type (OUTSIDE_IT_INSN);
10893 if (unified_syntax
10894 && (inst.operands[1].present || inst.size_req == 4)
10895 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10896 {
10897 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10898 inst.instruction = 0xf3af8000;
10899 inst.instruction |= imod << 9;
10900 inst.instruction |= inst.operands[0].imm << 5;
10901 if (inst.operands[1].present)
10902 inst.instruction |= 0x100 | inst.operands[1].imm;
10903 }
10904 else
10905 {
10906 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10907 && (inst.operands[0].imm & 4),
10908 _("selected processor does not support 'A' form "
10909 "of this instruction"));
10910 constraint (inst.operands[1].present || inst.size_req == 4,
10911 _("Thumb does not support the 2-argument "
10912 "form of this instruction"));
10913 inst.instruction |= inst.operands[0].imm;
10914 }
10915 }
10916
10917 /* THUMB CPY instruction (argument parse). */
10918
10919 static void
10920 do_t_cpy (void)
10921 {
10922 if (inst.size_req == 4)
10923 {
10924 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10925 inst.instruction |= inst.operands[0].reg << 8;
10926 inst.instruction |= inst.operands[1].reg;
10927 }
10928 else
10929 {
10930 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10931 inst.instruction |= (inst.operands[0].reg & 0x7);
10932 inst.instruction |= inst.operands[1].reg << 3;
10933 }
10934 }
10935
10936 static void
10937 do_t_cbz (void)
10938 {
10939 set_it_insn_type (OUTSIDE_IT_INSN);
10940 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10941 inst.instruction |= inst.operands[0].reg;
10942 inst.reloc.pc_rel = 1;
10943 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10944 }
10945
10946 static void
10947 do_t_dbg (void)
10948 {
10949 inst.instruction |= inst.operands[0].imm;
10950 }
10951
10952 static void
10953 do_t_div (void)
10954 {
10955 unsigned Rd, Rn, Rm;
10956
10957 Rd = inst.operands[0].reg;
10958 Rn = (inst.operands[1].present
10959 ? inst.operands[1].reg : Rd);
10960 Rm = inst.operands[2].reg;
10961
10962 reject_bad_reg (Rd);
10963 reject_bad_reg (Rn);
10964 reject_bad_reg (Rm);
10965
10966 inst.instruction |= Rd << 8;
10967 inst.instruction |= Rn << 16;
10968 inst.instruction |= Rm;
10969 }
10970
10971 static void
10972 do_t_hint (void)
10973 {
10974 if (unified_syntax && inst.size_req == 4)
10975 inst.instruction = THUMB_OP32 (inst.instruction);
10976 else
10977 inst.instruction = THUMB_OP16 (inst.instruction);
10978 }
10979
10980 static void
10981 do_t_it (void)
10982 {
10983 unsigned int cond = inst.operands[0].imm;
10984
10985 set_it_insn_type (IT_INSN);
10986 now_it.mask = (inst.instruction & 0xf) | 0x10;
10987 now_it.cc = cond;
10988 now_it.warn_deprecated = FALSE;
10989
10990 /* If the condition is a negative condition, invert the mask. */
10991 if ((cond & 0x1) == 0x0)
10992 {
10993 unsigned int mask = inst.instruction & 0x000f;
10994
10995 if ((mask & 0x7) == 0)
10996 {
10997 /* No conversion needed. */
10998 now_it.block_length = 1;
10999 }
11000 else if ((mask & 0x3) == 0)
11001 {
11002 mask ^= 0x8;
11003 now_it.block_length = 2;
11004 }
11005 else if ((mask & 0x1) == 0)
11006 {
11007 mask ^= 0xC;
11008 now_it.block_length = 3;
11009 }
11010 else
11011 {
11012 mask ^= 0xE;
11013 now_it.block_length = 4;
11014 }
11015
11016 inst.instruction &= 0xfff0;
11017 inst.instruction |= mask;
11018 }
11019
11020 inst.instruction |= cond << 4;
11021 }
11022
11023 /* Helper function used for both push/pop and ldm/stm. */
11024 static void
11025 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11026 {
11027 bfd_boolean load;
11028
11029 load = (inst.instruction & (1 << 20)) != 0;
11030
11031 if (mask & (1 << 13))
11032 inst.error = _("SP not allowed in register list");
11033
11034 if ((mask & (1 << base)) != 0
11035 && writeback)
11036 inst.error = _("having the base register in the register list when "
11037 "using write back is UNPREDICTABLE");
11038
11039 if (load)
11040 {
11041 if (mask & (1 << 15))
11042 {
11043 if (mask & (1 << 14))
11044 inst.error = _("LR and PC should not both be in register list");
11045 else
11046 set_it_insn_type_last ();
11047 }
11048 }
11049 else
11050 {
11051 if (mask & (1 << 15))
11052 inst.error = _("PC not allowed in register list");
11053 }
11054
11055 if ((mask & (mask - 1)) == 0)
11056 {
11057 /* Single register transfers implemented as str/ldr. */
11058 if (writeback)
11059 {
11060 if (inst.instruction & (1 << 23))
11061 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11062 else
11063 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11064 }
11065 else
11066 {
11067 if (inst.instruction & (1 << 23))
11068 inst.instruction = 0x00800000; /* ia -> [base] */
11069 else
11070 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11071 }
11072
11073 inst.instruction |= 0xf8400000;
11074 if (load)
11075 inst.instruction |= 0x00100000;
11076
11077 mask = ffs (mask) - 1;
11078 mask <<= 12;
11079 }
11080 else if (writeback)
11081 inst.instruction |= WRITE_BACK;
11082
11083 inst.instruction |= mask;
11084 inst.instruction |= base << 16;
11085 }
11086
11087 static void
11088 do_t_ldmstm (void)
11089 {
11090 /* This really doesn't seem worth it. */
11091 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11092 _("expression too complex"));
11093 constraint (inst.operands[1].writeback,
11094 _("Thumb load/store multiple does not support {reglist}^"));
11095
11096 if (unified_syntax)
11097 {
11098 bfd_boolean narrow;
11099 unsigned mask;
11100
11101 narrow = FALSE;
11102 /* See if we can use a 16-bit instruction. */
11103 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11104 && inst.size_req != 4
11105 && !(inst.operands[1].imm & ~0xff))
11106 {
11107 mask = 1 << inst.operands[0].reg;
11108
11109 if (inst.operands[0].reg <= 7)
11110 {
11111 if (inst.instruction == T_MNEM_stmia
11112 ? inst.operands[0].writeback
11113 : (inst.operands[0].writeback
11114 == !(inst.operands[1].imm & mask)))
11115 {
11116 if (inst.instruction == T_MNEM_stmia
11117 && (inst.operands[1].imm & mask)
11118 && (inst.operands[1].imm & (mask - 1)))
11119 as_warn (_("value stored for r%d is UNKNOWN"),
11120 inst.operands[0].reg);
11121
11122 inst.instruction = THUMB_OP16 (inst.instruction);
11123 inst.instruction |= inst.operands[0].reg << 8;
11124 inst.instruction |= inst.operands[1].imm;
11125 narrow = TRUE;
11126 }
11127 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11128 {
11129 /* This means 1 register in reg list one of 3 situations:
11130 1. Instruction is stmia, but without writeback.
11131 2. lmdia without writeback, but with Rn not in
11132 reglist.
11133 3. ldmia with writeback, but with Rn in reglist.
11134 Case 3 is UNPREDICTABLE behaviour, so we handle
11135 case 1 and 2 which can be converted into a 16-bit
11136 str or ldr. The SP cases are handled below. */
11137 unsigned long opcode;
11138 /* First, record an error for Case 3. */
11139 if (inst.operands[1].imm & mask
11140 && inst.operands[0].writeback)
11141 inst.error =
11142 _("having the base register in the register list when "
11143 "using write back is UNPREDICTABLE");
11144
11145 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11146 : T_MNEM_ldr);
11147 inst.instruction = THUMB_OP16 (opcode);
11148 inst.instruction |= inst.operands[0].reg << 3;
11149 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11150 narrow = TRUE;
11151 }
11152 }
11153 else if (inst.operands[0] .reg == REG_SP)
11154 {
11155 if (inst.operands[0].writeback)
11156 {
11157 inst.instruction =
11158 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11159 ? T_MNEM_push : T_MNEM_pop);
11160 inst.instruction |= inst.operands[1].imm;
11161 narrow = TRUE;
11162 }
11163 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11164 {
11165 inst.instruction =
11166 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11167 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11168 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11169 narrow = TRUE;
11170 }
11171 }
11172 }
11173
11174 if (!narrow)
11175 {
11176 if (inst.instruction < 0xffff)
11177 inst.instruction = THUMB_OP32 (inst.instruction);
11178
11179 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11180 inst.operands[0].writeback);
11181 }
11182 }
11183 else
11184 {
11185 constraint (inst.operands[0].reg > 7
11186 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11187 constraint (inst.instruction != T_MNEM_ldmia
11188 && inst.instruction != T_MNEM_stmia,
11189 _("Thumb-2 instruction only valid in unified syntax"));
11190 if (inst.instruction == T_MNEM_stmia)
11191 {
11192 if (!inst.operands[0].writeback)
11193 as_warn (_("this instruction will write back the base register"));
11194 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11195 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11196 as_warn (_("value stored for r%d is UNKNOWN"),
11197 inst.operands[0].reg);
11198 }
11199 else
11200 {
11201 if (!inst.operands[0].writeback
11202 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11203 as_warn (_("this instruction will write back the base register"));
11204 else if (inst.operands[0].writeback
11205 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11206 as_warn (_("this instruction will not write back the base register"));
11207 }
11208
11209 inst.instruction = THUMB_OP16 (inst.instruction);
11210 inst.instruction |= inst.operands[0].reg << 8;
11211 inst.instruction |= inst.operands[1].imm;
11212 }
11213 }
11214
11215 static void
11216 do_t_ldrex (void)
11217 {
11218 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11219 || inst.operands[1].postind || inst.operands[1].writeback
11220 || inst.operands[1].immisreg || inst.operands[1].shifted
11221 || inst.operands[1].negative,
11222 BAD_ADDR_MODE);
11223
11224 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11225
11226 inst.instruction |= inst.operands[0].reg << 12;
11227 inst.instruction |= inst.operands[1].reg << 16;
11228 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11229 }
11230
11231 static void
11232 do_t_ldrexd (void)
11233 {
11234 if (!inst.operands[1].present)
11235 {
11236 constraint (inst.operands[0].reg == REG_LR,
11237 _("r14 not allowed as first register "
11238 "when second register is omitted"));
11239 inst.operands[1].reg = inst.operands[0].reg + 1;
11240 }
11241 constraint (inst.operands[0].reg == inst.operands[1].reg,
11242 BAD_OVERLAP);
11243
11244 inst.instruction |= inst.operands[0].reg << 12;
11245 inst.instruction |= inst.operands[1].reg << 8;
11246 inst.instruction |= inst.operands[2].reg << 16;
11247 }
11248
11249 static void
11250 do_t_ldst (void)
11251 {
11252 unsigned long opcode;
11253 int Rn;
11254
11255 if (inst.operands[0].isreg
11256 && !inst.operands[0].preind
11257 && inst.operands[0].reg == REG_PC)
11258 set_it_insn_type_last ();
11259
11260 opcode = inst.instruction;
11261 if (unified_syntax)
11262 {
11263 if (!inst.operands[1].isreg)
11264 {
11265 if (opcode <= 0xffff)
11266 inst.instruction = THUMB_OP32 (opcode);
11267 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11268 return;
11269 }
11270 if (inst.operands[1].isreg
11271 && !inst.operands[1].writeback
11272 && !inst.operands[1].shifted && !inst.operands[1].postind
11273 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11274 && opcode <= 0xffff
11275 && inst.size_req != 4)
11276 {
11277 /* Insn may have a 16-bit form. */
11278 Rn = inst.operands[1].reg;
11279 if (inst.operands[1].immisreg)
11280 {
11281 inst.instruction = THUMB_OP16 (opcode);
11282 /* [Rn, Rik] */
11283 if (Rn <= 7 && inst.operands[1].imm <= 7)
11284 goto op16;
11285 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11286 reject_bad_reg (inst.operands[1].imm);
11287 }
11288 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11289 && opcode != T_MNEM_ldrsb)
11290 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11291 || (Rn == REG_SP && opcode == T_MNEM_str))
11292 {
11293 /* [Rn, #const] */
11294 if (Rn > 7)
11295 {
11296 if (Rn == REG_PC)
11297 {
11298 if (inst.reloc.pc_rel)
11299 opcode = T_MNEM_ldr_pc2;
11300 else
11301 opcode = T_MNEM_ldr_pc;
11302 }
11303 else
11304 {
11305 if (opcode == T_MNEM_ldr)
11306 opcode = T_MNEM_ldr_sp;
11307 else
11308 opcode = T_MNEM_str_sp;
11309 }
11310 inst.instruction = inst.operands[0].reg << 8;
11311 }
11312 else
11313 {
11314 inst.instruction = inst.operands[0].reg;
11315 inst.instruction |= inst.operands[1].reg << 3;
11316 }
11317 inst.instruction |= THUMB_OP16 (opcode);
11318 if (inst.size_req == 2)
11319 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11320 else
11321 inst.relax = opcode;
11322 return;
11323 }
11324 }
11325 /* Definitely a 32-bit variant. */
11326
11327 /* Warning for Erratum 752419. */
11328 if (opcode == T_MNEM_ldr
11329 && inst.operands[0].reg == REG_SP
11330 && inst.operands[1].writeback == 1
11331 && !inst.operands[1].immisreg)
11332 {
11333 if (no_cpu_selected ()
11334 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11335 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11336 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11337 as_warn (_("This instruction may be unpredictable "
11338 "if executed on M-profile cores "
11339 "with interrupts enabled."));
11340 }
11341
11342 /* Do some validations regarding addressing modes. */
11343 if (inst.operands[1].immisreg)
11344 reject_bad_reg (inst.operands[1].imm);
11345
11346 constraint (inst.operands[1].writeback == 1
11347 && inst.operands[0].reg == inst.operands[1].reg,
11348 BAD_OVERLAP);
11349
11350 inst.instruction = THUMB_OP32 (opcode);
11351 inst.instruction |= inst.operands[0].reg << 12;
11352 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11353 check_ldr_r15_aligned ();
11354 return;
11355 }
11356
11357 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11358
11359 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11360 {
11361 /* Only [Rn,Rm] is acceptable. */
11362 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11363 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11364 || inst.operands[1].postind || inst.operands[1].shifted
11365 || inst.operands[1].negative,
11366 _("Thumb does not support this addressing mode"));
11367 inst.instruction = THUMB_OP16 (inst.instruction);
11368 goto op16;
11369 }
11370
11371 inst.instruction = THUMB_OP16 (inst.instruction);
11372 if (!inst.operands[1].isreg)
11373 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11374 return;
11375
11376 constraint (!inst.operands[1].preind
11377 || inst.operands[1].shifted
11378 || inst.operands[1].writeback,
11379 _("Thumb does not support this addressing mode"));
11380 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11381 {
11382 constraint (inst.instruction & 0x0600,
11383 _("byte or halfword not valid for base register"));
11384 constraint (inst.operands[1].reg == REG_PC
11385 && !(inst.instruction & THUMB_LOAD_BIT),
11386 _("r15 based store not allowed"));
11387 constraint (inst.operands[1].immisreg,
11388 _("invalid base register for register offset"));
11389
11390 if (inst.operands[1].reg == REG_PC)
11391 inst.instruction = T_OPCODE_LDR_PC;
11392 else if (inst.instruction & THUMB_LOAD_BIT)
11393 inst.instruction = T_OPCODE_LDR_SP;
11394 else
11395 inst.instruction = T_OPCODE_STR_SP;
11396
11397 inst.instruction |= inst.operands[0].reg << 8;
11398 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11399 return;
11400 }
11401
11402 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11403 if (!inst.operands[1].immisreg)
11404 {
11405 /* Immediate offset. */
11406 inst.instruction |= inst.operands[0].reg;
11407 inst.instruction |= inst.operands[1].reg << 3;
11408 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11409 return;
11410 }
11411
11412 /* Register offset. */
11413 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11414 constraint (inst.operands[1].negative,
11415 _("Thumb does not support this addressing mode"));
11416
11417 op16:
11418 switch (inst.instruction)
11419 {
11420 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11421 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11422 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11423 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11424 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11425 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11426 case 0x5600 /* ldrsb */:
11427 case 0x5e00 /* ldrsh */: break;
11428 default: abort ();
11429 }
11430
11431 inst.instruction |= inst.operands[0].reg;
11432 inst.instruction |= inst.operands[1].reg << 3;
11433 inst.instruction |= inst.operands[1].imm << 6;
11434 }
11435
11436 static void
11437 do_t_ldstd (void)
11438 {
11439 if (!inst.operands[1].present)
11440 {
11441 inst.operands[1].reg = inst.operands[0].reg + 1;
11442 constraint (inst.operands[0].reg == REG_LR,
11443 _("r14 not allowed here"));
11444 constraint (inst.operands[0].reg == REG_R12,
11445 _("r12 not allowed here"));
11446 }
11447
11448 if (inst.operands[2].writeback
11449 && (inst.operands[0].reg == inst.operands[2].reg
11450 || inst.operands[1].reg == inst.operands[2].reg))
11451 as_warn (_("base register written back, and overlaps "
11452 "one of transfer registers"));
11453
11454 inst.instruction |= inst.operands[0].reg << 12;
11455 inst.instruction |= inst.operands[1].reg << 8;
11456 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11457 }
11458
11459 static void
11460 do_t_ldstt (void)
11461 {
11462 inst.instruction |= inst.operands[0].reg << 12;
11463 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11464 }
11465
11466 static void
11467 do_t_mla (void)
11468 {
11469 unsigned Rd, Rn, Rm, Ra;
11470
11471 Rd = inst.operands[0].reg;
11472 Rn = inst.operands[1].reg;
11473 Rm = inst.operands[2].reg;
11474 Ra = inst.operands[3].reg;
11475
11476 reject_bad_reg (Rd);
11477 reject_bad_reg (Rn);
11478 reject_bad_reg (Rm);
11479 reject_bad_reg (Ra);
11480
11481 inst.instruction |= Rd << 8;
11482 inst.instruction |= Rn << 16;
11483 inst.instruction |= Rm;
11484 inst.instruction |= Ra << 12;
11485 }
11486
11487 static void
11488 do_t_mlal (void)
11489 {
11490 unsigned RdLo, RdHi, Rn, Rm;
11491
11492 RdLo = inst.operands[0].reg;
11493 RdHi = inst.operands[1].reg;
11494 Rn = inst.operands[2].reg;
11495 Rm = inst.operands[3].reg;
11496
11497 reject_bad_reg (RdLo);
11498 reject_bad_reg (RdHi);
11499 reject_bad_reg (Rn);
11500 reject_bad_reg (Rm);
11501
11502 inst.instruction |= RdLo << 12;
11503 inst.instruction |= RdHi << 8;
11504 inst.instruction |= Rn << 16;
11505 inst.instruction |= Rm;
11506 }
11507
11508 static void
11509 do_t_mov_cmp (void)
11510 {
11511 unsigned Rn, Rm;
11512
11513 Rn = inst.operands[0].reg;
11514 Rm = inst.operands[1].reg;
11515
11516 if (Rn == REG_PC)
11517 set_it_insn_type_last ();
11518
11519 if (unified_syntax)
11520 {
11521 int r0off = (inst.instruction == T_MNEM_mov
11522 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11523 unsigned long opcode;
11524 bfd_boolean narrow;
11525 bfd_boolean low_regs;
11526
11527 low_regs = (Rn <= 7 && Rm <= 7);
11528 opcode = inst.instruction;
11529 if (in_it_block ())
11530 narrow = opcode != T_MNEM_movs;
11531 else
11532 narrow = opcode != T_MNEM_movs || low_regs;
11533 if (inst.size_req == 4
11534 || inst.operands[1].shifted)
11535 narrow = FALSE;
11536
11537 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11538 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11539 && !inst.operands[1].shifted
11540 && Rn == REG_PC
11541 && Rm == REG_LR)
11542 {
11543 inst.instruction = T2_SUBS_PC_LR;
11544 return;
11545 }
11546
11547 if (opcode == T_MNEM_cmp)
11548 {
11549 constraint (Rn == REG_PC, BAD_PC);
11550 if (narrow)
11551 {
11552 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11553 but valid. */
11554 warn_deprecated_sp (Rm);
11555 /* R15 was documented as a valid choice for Rm in ARMv6,
11556 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11557 tools reject R15, so we do too. */
11558 constraint (Rm == REG_PC, BAD_PC);
11559 }
11560 else
11561 reject_bad_reg (Rm);
11562 }
11563 else if (opcode == T_MNEM_mov
11564 || opcode == T_MNEM_movs)
11565 {
11566 if (inst.operands[1].isreg)
11567 {
11568 if (opcode == T_MNEM_movs)
11569 {
11570 reject_bad_reg (Rn);
11571 reject_bad_reg (Rm);
11572 }
11573 else if (narrow)
11574 {
11575 /* This is mov.n. */
11576 if ((Rn == REG_SP || Rn == REG_PC)
11577 && (Rm == REG_SP || Rm == REG_PC))
11578 {
11579 as_tsktsk (_("Use of r%u as a source register is "
11580 "deprecated when r%u is the destination "
11581 "register."), Rm, Rn);
11582 }
11583 }
11584 else
11585 {
11586 /* This is mov.w. */
11587 constraint (Rn == REG_PC, BAD_PC);
11588 constraint (Rm == REG_PC, BAD_PC);
11589 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11590 }
11591 }
11592 else
11593 reject_bad_reg (Rn);
11594 }
11595
11596 if (!inst.operands[1].isreg)
11597 {
11598 /* Immediate operand. */
11599 if (!in_it_block () && opcode == T_MNEM_mov)
11600 narrow = 0;
11601 if (low_regs && narrow)
11602 {
11603 inst.instruction = THUMB_OP16 (opcode);
11604 inst.instruction |= Rn << 8;
11605 if (inst.size_req == 2)
11606 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11607 else
11608 inst.relax = opcode;
11609 }
11610 else
11611 {
11612 inst.instruction = THUMB_OP32 (inst.instruction);
11613 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11614 inst.instruction |= Rn << r0off;
11615 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11616 }
11617 }
11618 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11619 && (inst.instruction == T_MNEM_mov
11620 || inst.instruction == T_MNEM_movs))
11621 {
11622 /* Register shifts are encoded as separate shift instructions. */
11623 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11624
11625 if (in_it_block ())
11626 narrow = !flags;
11627 else
11628 narrow = flags;
11629
11630 if (inst.size_req == 4)
11631 narrow = FALSE;
11632
11633 if (!low_regs || inst.operands[1].imm > 7)
11634 narrow = FALSE;
11635
11636 if (Rn != Rm)
11637 narrow = FALSE;
11638
11639 switch (inst.operands[1].shift_kind)
11640 {
11641 case SHIFT_LSL:
11642 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11643 break;
11644 case SHIFT_ASR:
11645 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11646 break;
11647 case SHIFT_LSR:
11648 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11649 break;
11650 case SHIFT_ROR:
11651 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11652 break;
11653 default:
11654 abort ();
11655 }
11656
11657 inst.instruction = opcode;
11658 if (narrow)
11659 {
11660 inst.instruction |= Rn;
11661 inst.instruction |= inst.operands[1].imm << 3;
11662 }
11663 else
11664 {
11665 if (flags)
11666 inst.instruction |= CONDS_BIT;
11667
11668 inst.instruction |= Rn << 8;
11669 inst.instruction |= Rm << 16;
11670 inst.instruction |= inst.operands[1].imm;
11671 }
11672 }
11673 else if (!narrow)
11674 {
11675 /* Some mov with immediate shift have narrow variants.
11676 Register shifts are handled above. */
11677 if (low_regs && inst.operands[1].shifted
11678 && (inst.instruction == T_MNEM_mov
11679 || inst.instruction == T_MNEM_movs))
11680 {
11681 if (in_it_block ())
11682 narrow = (inst.instruction == T_MNEM_mov);
11683 else
11684 narrow = (inst.instruction == T_MNEM_movs);
11685 }
11686
11687 if (narrow)
11688 {
11689 switch (inst.operands[1].shift_kind)
11690 {
11691 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11692 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11693 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11694 default: narrow = FALSE; break;
11695 }
11696 }
11697
11698 if (narrow)
11699 {
11700 inst.instruction |= Rn;
11701 inst.instruction |= Rm << 3;
11702 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11703 }
11704 else
11705 {
11706 inst.instruction = THUMB_OP32 (inst.instruction);
11707 inst.instruction |= Rn << r0off;
11708 encode_thumb32_shifted_operand (1);
11709 }
11710 }
11711 else
11712 switch (inst.instruction)
11713 {
11714 case T_MNEM_mov:
11715 /* In v4t or v5t a move of two lowregs produces unpredictable
11716 results. Don't allow this. */
11717 if (low_regs)
11718 {
11719 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11720 "MOV Rd, Rs with two low registers is not "
11721 "permitted on this architecture");
11722 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11723 arm_ext_v6);
11724 }
11725
11726 inst.instruction = T_OPCODE_MOV_HR;
11727 inst.instruction |= (Rn & 0x8) << 4;
11728 inst.instruction |= (Rn & 0x7);
11729 inst.instruction |= Rm << 3;
11730 break;
11731
11732 case T_MNEM_movs:
11733 /* We know we have low registers at this point.
11734 Generate LSLS Rd, Rs, #0. */
11735 inst.instruction = T_OPCODE_LSL_I;
11736 inst.instruction |= Rn;
11737 inst.instruction |= Rm << 3;
11738 break;
11739
11740 case T_MNEM_cmp:
11741 if (low_regs)
11742 {
11743 inst.instruction = T_OPCODE_CMP_LR;
11744 inst.instruction |= Rn;
11745 inst.instruction |= Rm << 3;
11746 }
11747 else
11748 {
11749 inst.instruction = T_OPCODE_CMP_HR;
11750 inst.instruction |= (Rn & 0x8) << 4;
11751 inst.instruction |= (Rn & 0x7);
11752 inst.instruction |= Rm << 3;
11753 }
11754 break;
11755 }
11756 return;
11757 }
11758
11759 inst.instruction = THUMB_OP16 (inst.instruction);
11760
11761 /* PR 10443: Do not silently ignore shifted operands. */
11762 constraint (inst.operands[1].shifted,
11763 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11764
11765 if (inst.operands[1].isreg)
11766 {
11767 if (Rn < 8 && Rm < 8)
11768 {
11769 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11770 since a MOV instruction produces unpredictable results. */
11771 if (inst.instruction == T_OPCODE_MOV_I8)
11772 inst.instruction = T_OPCODE_ADD_I3;
11773 else
11774 inst.instruction = T_OPCODE_CMP_LR;
11775
11776 inst.instruction |= Rn;
11777 inst.instruction |= Rm << 3;
11778 }
11779 else
11780 {
11781 if (inst.instruction == T_OPCODE_MOV_I8)
11782 inst.instruction = T_OPCODE_MOV_HR;
11783 else
11784 inst.instruction = T_OPCODE_CMP_HR;
11785 do_t_cpy ();
11786 }
11787 }
11788 else
11789 {
11790 constraint (Rn > 7,
11791 _("only lo regs allowed with immediate"));
11792 inst.instruction |= Rn << 8;
11793 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11794 }
11795 }
11796
11797 static void
11798 do_t_mov16 (void)
11799 {
11800 unsigned Rd;
11801 bfd_vma imm;
11802 bfd_boolean top;
11803
11804 top = (inst.instruction & 0x00800000) != 0;
11805 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11806 {
11807 constraint (top, _(":lower16: not allowed this instruction"));
11808 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11809 }
11810 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11811 {
11812 constraint (!top, _(":upper16: not allowed this instruction"));
11813 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11814 }
11815
11816 Rd = inst.operands[0].reg;
11817 reject_bad_reg (Rd);
11818
11819 inst.instruction |= Rd << 8;
11820 if (inst.reloc.type == BFD_RELOC_UNUSED)
11821 {
11822 imm = inst.reloc.exp.X_add_number;
11823 inst.instruction |= (imm & 0xf000) << 4;
11824 inst.instruction |= (imm & 0x0800) << 15;
11825 inst.instruction |= (imm & 0x0700) << 4;
11826 inst.instruction |= (imm & 0x00ff);
11827 }
11828 }
11829
11830 static void
11831 do_t_mvn_tst (void)
11832 {
11833 unsigned Rn, Rm;
11834
11835 Rn = inst.operands[0].reg;
11836 Rm = inst.operands[1].reg;
11837
11838 if (inst.instruction == T_MNEM_cmp
11839 || inst.instruction == T_MNEM_cmn)
11840 constraint (Rn == REG_PC, BAD_PC);
11841 else
11842 reject_bad_reg (Rn);
11843 reject_bad_reg (Rm);
11844
11845 if (unified_syntax)
11846 {
11847 int r0off = (inst.instruction == T_MNEM_mvn
11848 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11849 bfd_boolean narrow;
11850
11851 if (inst.size_req == 4
11852 || inst.instruction > 0xffff
11853 || inst.operands[1].shifted
11854 || Rn > 7 || Rm > 7)
11855 narrow = FALSE;
11856 else if (inst.instruction == T_MNEM_cmn
11857 || inst.instruction == T_MNEM_tst)
11858 narrow = TRUE;
11859 else if (THUMB_SETS_FLAGS (inst.instruction))
11860 narrow = !in_it_block ();
11861 else
11862 narrow = in_it_block ();
11863
11864 if (!inst.operands[1].isreg)
11865 {
11866 /* For an immediate, we always generate a 32-bit opcode;
11867 section relaxation will shrink it later if possible. */
11868 if (inst.instruction < 0xffff)
11869 inst.instruction = THUMB_OP32 (inst.instruction);
11870 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11871 inst.instruction |= Rn << r0off;
11872 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11873 }
11874 else
11875 {
11876 /* See if we can do this with a 16-bit instruction. */
11877 if (narrow)
11878 {
11879 inst.instruction = THUMB_OP16 (inst.instruction);
11880 inst.instruction |= Rn;
11881 inst.instruction |= Rm << 3;
11882 }
11883 else
11884 {
11885 constraint (inst.operands[1].shifted
11886 && inst.operands[1].immisreg,
11887 _("shift must be constant"));
11888 if (inst.instruction < 0xffff)
11889 inst.instruction = THUMB_OP32 (inst.instruction);
11890 inst.instruction |= Rn << r0off;
11891 encode_thumb32_shifted_operand (1);
11892 }
11893 }
11894 }
11895 else
11896 {
11897 constraint (inst.instruction > 0xffff
11898 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11899 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11900 _("unshifted register required"));
11901 constraint (Rn > 7 || Rm > 7,
11902 BAD_HIREG);
11903
11904 inst.instruction = THUMB_OP16 (inst.instruction);
11905 inst.instruction |= Rn;
11906 inst.instruction |= Rm << 3;
11907 }
11908 }
11909
11910 static void
11911 do_t_mrs (void)
11912 {
11913 unsigned Rd;
11914
11915 if (do_vfp_nsyn_mrs () == SUCCESS)
11916 return;
11917
11918 Rd = inst.operands[0].reg;
11919 reject_bad_reg (Rd);
11920 inst.instruction |= Rd << 8;
11921
11922 if (inst.operands[1].isreg)
11923 {
11924 unsigned br = inst.operands[1].reg;
11925 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11926 as_bad (_("bad register for mrs"));
11927
11928 inst.instruction |= br & (0xf << 16);
11929 inst.instruction |= (br & 0x300) >> 4;
11930 inst.instruction |= (br & SPSR_BIT) >> 2;
11931 }
11932 else
11933 {
11934 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11935
11936 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11937 {
11938 /* PR gas/12698: The constraint is only applied for m_profile.
11939 If the user has specified -march=all, we want to ignore it as
11940 we are building for any CPU type, including non-m variants. */
11941 bfd_boolean m_profile =
11942 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
11943 constraint ((flags != 0) && m_profile, _("selected processor does "
11944 "not support requested special purpose register"));
11945 }
11946 else
11947 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11948 devices). */
11949 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11950 _("'APSR', 'CPSR' or 'SPSR' expected"));
11951
11952 inst.instruction |= (flags & SPSR_BIT) >> 2;
11953 inst.instruction |= inst.operands[1].imm & 0xff;
11954 inst.instruction |= 0xf0000;
11955 }
11956 }
11957
11958 static void
11959 do_t_msr (void)
11960 {
11961 int flags;
11962 unsigned Rn;
11963
11964 if (do_vfp_nsyn_msr () == SUCCESS)
11965 return;
11966
11967 constraint (!inst.operands[1].isreg,
11968 _("Thumb encoding does not support an immediate here"));
11969
11970 if (inst.operands[0].isreg)
11971 flags = (int)(inst.operands[0].reg);
11972 else
11973 flags = inst.operands[0].imm;
11974
11975 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11976 {
11977 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11978
11979 /* PR gas/12698: The constraint is only applied for m_profile.
11980 If the user has specified -march=all, we want to ignore it as
11981 we are building for any CPU type, including non-m variants. */
11982 bfd_boolean m_profile =
11983 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
11984 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11985 && (bits & ~(PSR_s | PSR_f)) != 0)
11986 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11987 && bits != PSR_f)) && m_profile,
11988 _("selected processor does not support requested special "
11989 "purpose register"));
11990 }
11991 else
11992 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11993 "requested special purpose register"));
11994
11995 Rn = inst.operands[1].reg;
11996 reject_bad_reg (Rn);
11997
11998 inst.instruction |= (flags & SPSR_BIT) >> 2;
11999 inst.instruction |= (flags & 0xf0000) >> 8;
12000 inst.instruction |= (flags & 0x300) >> 4;
12001 inst.instruction |= (flags & 0xff);
12002 inst.instruction |= Rn << 16;
12003 }
12004
12005 static void
12006 do_t_mul (void)
12007 {
12008 bfd_boolean narrow;
12009 unsigned Rd, Rn, Rm;
12010
12011 if (!inst.operands[2].present)
12012 inst.operands[2].reg = inst.operands[0].reg;
12013
12014 Rd = inst.operands[0].reg;
12015 Rn = inst.operands[1].reg;
12016 Rm = inst.operands[2].reg;
12017
12018 if (unified_syntax)
12019 {
12020 if (inst.size_req == 4
12021 || (Rd != Rn
12022 && Rd != Rm)
12023 || Rn > 7
12024 || Rm > 7)
12025 narrow = FALSE;
12026 else if (inst.instruction == T_MNEM_muls)
12027 narrow = !in_it_block ();
12028 else
12029 narrow = in_it_block ();
12030 }
12031 else
12032 {
12033 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12034 constraint (Rn > 7 || Rm > 7,
12035 BAD_HIREG);
12036 narrow = TRUE;
12037 }
12038
12039 if (narrow)
12040 {
12041 /* 16-bit MULS/Conditional MUL. */
12042 inst.instruction = THUMB_OP16 (inst.instruction);
12043 inst.instruction |= Rd;
12044
12045 if (Rd == Rn)
12046 inst.instruction |= Rm << 3;
12047 else if (Rd == Rm)
12048 inst.instruction |= Rn << 3;
12049 else
12050 constraint (1, _("dest must overlap one source register"));
12051 }
12052 else
12053 {
12054 constraint (inst.instruction != T_MNEM_mul,
12055 _("Thumb-2 MUL must not set flags"));
12056 /* 32-bit MUL. */
12057 inst.instruction = THUMB_OP32 (inst.instruction);
12058 inst.instruction |= Rd << 8;
12059 inst.instruction |= Rn << 16;
12060 inst.instruction |= Rm << 0;
12061
12062 reject_bad_reg (Rd);
12063 reject_bad_reg (Rn);
12064 reject_bad_reg (Rm);
12065 }
12066 }
12067
12068 static void
12069 do_t_mull (void)
12070 {
12071 unsigned RdLo, RdHi, Rn, Rm;
12072
12073 RdLo = inst.operands[0].reg;
12074 RdHi = inst.operands[1].reg;
12075 Rn = inst.operands[2].reg;
12076 Rm = inst.operands[3].reg;
12077
12078 reject_bad_reg (RdLo);
12079 reject_bad_reg (RdHi);
12080 reject_bad_reg (Rn);
12081 reject_bad_reg (Rm);
12082
12083 inst.instruction |= RdLo << 12;
12084 inst.instruction |= RdHi << 8;
12085 inst.instruction |= Rn << 16;
12086 inst.instruction |= Rm;
12087
12088 if (RdLo == RdHi)
12089 as_tsktsk (_("rdhi and rdlo must be different"));
12090 }
12091
12092 static void
12093 do_t_nop (void)
12094 {
12095 set_it_insn_type (NEUTRAL_IT_INSN);
12096
12097 if (unified_syntax)
12098 {
12099 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12100 {
12101 inst.instruction = THUMB_OP32 (inst.instruction);
12102 inst.instruction |= inst.operands[0].imm;
12103 }
12104 else
12105 {
12106 /* PR9722: Check for Thumb2 availability before
12107 generating a thumb2 nop instruction. */
12108 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12109 {
12110 inst.instruction = THUMB_OP16 (inst.instruction);
12111 inst.instruction |= inst.operands[0].imm << 4;
12112 }
12113 else
12114 inst.instruction = 0x46c0;
12115 }
12116 }
12117 else
12118 {
12119 constraint (inst.operands[0].present,
12120 _("Thumb does not support NOP with hints"));
12121 inst.instruction = 0x46c0;
12122 }
12123 }
12124
12125 static void
12126 do_t_neg (void)
12127 {
12128 if (unified_syntax)
12129 {
12130 bfd_boolean narrow;
12131
12132 if (THUMB_SETS_FLAGS (inst.instruction))
12133 narrow = !in_it_block ();
12134 else
12135 narrow = in_it_block ();
12136 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12137 narrow = FALSE;
12138 if (inst.size_req == 4)
12139 narrow = FALSE;
12140
12141 if (!narrow)
12142 {
12143 inst.instruction = THUMB_OP32 (inst.instruction);
12144 inst.instruction |= inst.operands[0].reg << 8;
12145 inst.instruction |= inst.operands[1].reg << 16;
12146 }
12147 else
12148 {
12149 inst.instruction = THUMB_OP16 (inst.instruction);
12150 inst.instruction |= inst.operands[0].reg;
12151 inst.instruction |= inst.operands[1].reg << 3;
12152 }
12153 }
12154 else
12155 {
12156 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12157 BAD_HIREG);
12158 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12159
12160 inst.instruction = THUMB_OP16 (inst.instruction);
12161 inst.instruction |= inst.operands[0].reg;
12162 inst.instruction |= inst.operands[1].reg << 3;
12163 }
12164 }
12165
12166 static void
12167 do_t_orn (void)
12168 {
12169 unsigned Rd, Rn;
12170
12171 Rd = inst.operands[0].reg;
12172 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12173
12174 reject_bad_reg (Rd);
12175 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12176 reject_bad_reg (Rn);
12177
12178 inst.instruction |= Rd << 8;
12179 inst.instruction |= Rn << 16;
12180
12181 if (!inst.operands[2].isreg)
12182 {
12183 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12184 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12185 }
12186 else
12187 {
12188 unsigned Rm;
12189
12190 Rm = inst.operands[2].reg;
12191 reject_bad_reg (Rm);
12192
12193 constraint (inst.operands[2].shifted
12194 && inst.operands[2].immisreg,
12195 _("shift must be constant"));
12196 encode_thumb32_shifted_operand (2);
12197 }
12198 }
12199
12200 static void
12201 do_t_pkhbt (void)
12202 {
12203 unsigned Rd, Rn, Rm;
12204
12205 Rd = inst.operands[0].reg;
12206 Rn = inst.operands[1].reg;
12207 Rm = inst.operands[2].reg;
12208
12209 reject_bad_reg (Rd);
12210 reject_bad_reg (Rn);
12211 reject_bad_reg (Rm);
12212
12213 inst.instruction |= Rd << 8;
12214 inst.instruction |= Rn << 16;
12215 inst.instruction |= Rm;
12216 if (inst.operands[3].present)
12217 {
12218 unsigned int val = inst.reloc.exp.X_add_number;
12219 constraint (inst.reloc.exp.X_op != O_constant,
12220 _("expression too complex"));
12221 inst.instruction |= (val & 0x1c) << 10;
12222 inst.instruction |= (val & 0x03) << 6;
12223 }
12224 }
12225
12226 static void
12227 do_t_pkhtb (void)
12228 {
12229 if (!inst.operands[3].present)
12230 {
12231 unsigned Rtmp;
12232
12233 inst.instruction &= ~0x00000020;
12234
12235 /* PR 10168. Swap the Rm and Rn registers. */
12236 Rtmp = inst.operands[1].reg;
12237 inst.operands[1].reg = inst.operands[2].reg;
12238 inst.operands[2].reg = Rtmp;
12239 }
12240 do_t_pkhbt ();
12241 }
12242
12243 static void
12244 do_t_pld (void)
12245 {
12246 if (inst.operands[0].immisreg)
12247 reject_bad_reg (inst.operands[0].imm);
12248
12249 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12250 }
12251
12252 static void
12253 do_t_push_pop (void)
12254 {
12255 unsigned mask;
12256
12257 constraint (inst.operands[0].writeback,
12258 _("push/pop do not support {reglist}^"));
12259 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12260 _("expression too complex"));
12261
12262 mask = inst.operands[0].imm;
12263 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12264 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12265 else if (inst.size_req != 4
12266 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12267 ? REG_LR : REG_PC)))
12268 {
12269 inst.instruction = THUMB_OP16 (inst.instruction);
12270 inst.instruction |= THUMB_PP_PC_LR;
12271 inst.instruction |= mask & 0xff;
12272 }
12273 else if (unified_syntax)
12274 {
12275 inst.instruction = THUMB_OP32 (inst.instruction);
12276 encode_thumb2_ldmstm (13, mask, TRUE);
12277 }
12278 else
12279 {
12280 inst.error = _("invalid register list to push/pop instruction");
12281 return;
12282 }
12283 }
12284
12285 static void
12286 do_t_rbit (void)
12287 {
12288 unsigned Rd, Rm;
12289
12290 Rd = inst.operands[0].reg;
12291 Rm = inst.operands[1].reg;
12292
12293 reject_bad_reg (Rd);
12294 reject_bad_reg (Rm);
12295
12296 inst.instruction |= Rd << 8;
12297 inst.instruction |= Rm << 16;
12298 inst.instruction |= Rm;
12299 }
12300
12301 static void
12302 do_t_rev (void)
12303 {
12304 unsigned Rd, Rm;
12305
12306 Rd = inst.operands[0].reg;
12307 Rm = inst.operands[1].reg;
12308
12309 reject_bad_reg (Rd);
12310 reject_bad_reg (Rm);
12311
12312 if (Rd <= 7 && Rm <= 7
12313 && inst.size_req != 4)
12314 {
12315 inst.instruction = THUMB_OP16 (inst.instruction);
12316 inst.instruction |= Rd;
12317 inst.instruction |= Rm << 3;
12318 }
12319 else if (unified_syntax)
12320 {
12321 inst.instruction = THUMB_OP32 (inst.instruction);
12322 inst.instruction |= Rd << 8;
12323 inst.instruction |= Rm << 16;
12324 inst.instruction |= Rm;
12325 }
12326 else
12327 inst.error = BAD_HIREG;
12328 }
12329
12330 static void
12331 do_t_rrx (void)
12332 {
12333 unsigned Rd, Rm;
12334
12335 Rd = inst.operands[0].reg;
12336 Rm = inst.operands[1].reg;
12337
12338 reject_bad_reg (Rd);
12339 reject_bad_reg (Rm);
12340
12341 inst.instruction |= Rd << 8;
12342 inst.instruction |= Rm;
12343 }
12344
12345 static void
12346 do_t_rsb (void)
12347 {
12348 unsigned Rd, Rs;
12349
12350 Rd = inst.operands[0].reg;
12351 Rs = (inst.operands[1].present
12352 ? inst.operands[1].reg /* Rd, Rs, foo */
12353 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12354
12355 reject_bad_reg (Rd);
12356 reject_bad_reg (Rs);
12357 if (inst.operands[2].isreg)
12358 reject_bad_reg (inst.operands[2].reg);
12359
12360 inst.instruction |= Rd << 8;
12361 inst.instruction |= Rs << 16;
12362 if (!inst.operands[2].isreg)
12363 {
12364 bfd_boolean narrow;
12365
12366 if ((inst.instruction & 0x00100000) != 0)
12367 narrow = !in_it_block ();
12368 else
12369 narrow = in_it_block ();
12370
12371 if (Rd > 7 || Rs > 7)
12372 narrow = FALSE;
12373
12374 if (inst.size_req == 4 || !unified_syntax)
12375 narrow = FALSE;
12376
12377 if (inst.reloc.exp.X_op != O_constant
12378 || inst.reloc.exp.X_add_number != 0)
12379 narrow = FALSE;
12380
12381 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12382 relaxation, but it doesn't seem worth the hassle. */
12383 if (narrow)
12384 {
12385 inst.reloc.type = BFD_RELOC_UNUSED;
12386 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12387 inst.instruction |= Rs << 3;
12388 inst.instruction |= Rd;
12389 }
12390 else
12391 {
12392 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12393 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12394 }
12395 }
12396 else
12397 encode_thumb32_shifted_operand (2);
12398 }
12399
12400 static void
12401 do_t_setend (void)
12402 {
12403 if (warn_on_deprecated
12404 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12405 as_tsktsk (_("setend use is deprecated for ARMv8"));
12406
12407 set_it_insn_type (OUTSIDE_IT_INSN);
12408 if (inst.operands[0].imm)
12409 inst.instruction |= 0x8;
12410 }
12411
12412 static void
12413 do_t_shift (void)
12414 {
12415 if (!inst.operands[1].present)
12416 inst.operands[1].reg = inst.operands[0].reg;
12417
12418 if (unified_syntax)
12419 {
12420 bfd_boolean narrow;
12421 int shift_kind;
12422
12423 switch (inst.instruction)
12424 {
12425 case T_MNEM_asr:
12426 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12427 case T_MNEM_lsl:
12428 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12429 case T_MNEM_lsr:
12430 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12431 case T_MNEM_ror:
12432 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12433 default: abort ();
12434 }
12435
12436 if (THUMB_SETS_FLAGS (inst.instruction))
12437 narrow = !in_it_block ();
12438 else
12439 narrow = in_it_block ();
12440 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12441 narrow = FALSE;
12442 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12443 narrow = FALSE;
12444 if (inst.operands[2].isreg
12445 && (inst.operands[1].reg != inst.operands[0].reg
12446 || inst.operands[2].reg > 7))
12447 narrow = FALSE;
12448 if (inst.size_req == 4)
12449 narrow = FALSE;
12450
12451 reject_bad_reg (inst.operands[0].reg);
12452 reject_bad_reg (inst.operands[1].reg);
12453
12454 if (!narrow)
12455 {
12456 if (inst.operands[2].isreg)
12457 {
12458 reject_bad_reg (inst.operands[2].reg);
12459 inst.instruction = THUMB_OP32 (inst.instruction);
12460 inst.instruction |= inst.operands[0].reg << 8;
12461 inst.instruction |= inst.operands[1].reg << 16;
12462 inst.instruction |= inst.operands[2].reg;
12463
12464 /* PR 12854: Error on extraneous shifts. */
12465 constraint (inst.operands[2].shifted,
12466 _("extraneous shift as part of operand to shift insn"));
12467 }
12468 else
12469 {
12470 inst.operands[1].shifted = 1;
12471 inst.operands[1].shift_kind = shift_kind;
12472 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12473 ? T_MNEM_movs : T_MNEM_mov);
12474 inst.instruction |= inst.operands[0].reg << 8;
12475 encode_thumb32_shifted_operand (1);
12476 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12477 inst.reloc.type = BFD_RELOC_UNUSED;
12478 }
12479 }
12480 else
12481 {
12482 if (inst.operands[2].isreg)
12483 {
12484 switch (shift_kind)
12485 {
12486 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12487 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12488 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12489 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12490 default: abort ();
12491 }
12492
12493 inst.instruction |= inst.operands[0].reg;
12494 inst.instruction |= inst.operands[2].reg << 3;
12495
12496 /* PR 12854: Error on extraneous shifts. */
12497 constraint (inst.operands[2].shifted,
12498 _("extraneous shift as part of operand to shift insn"));
12499 }
12500 else
12501 {
12502 switch (shift_kind)
12503 {
12504 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12505 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12506 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12507 default: abort ();
12508 }
12509 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12510 inst.instruction |= inst.operands[0].reg;
12511 inst.instruction |= inst.operands[1].reg << 3;
12512 }
12513 }
12514 }
12515 else
12516 {
12517 constraint (inst.operands[0].reg > 7
12518 || inst.operands[1].reg > 7, BAD_HIREG);
12519 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12520
12521 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12522 {
12523 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12524 constraint (inst.operands[0].reg != inst.operands[1].reg,
12525 _("source1 and dest must be same register"));
12526
12527 switch (inst.instruction)
12528 {
12529 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12530 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12531 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12532 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12533 default: abort ();
12534 }
12535
12536 inst.instruction |= inst.operands[0].reg;
12537 inst.instruction |= inst.operands[2].reg << 3;
12538
12539 /* PR 12854: Error on extraneous shifts. */
12540 constraint (inst.operands[2].shifted,
12541 _("extraneous shift as part of operand to shift insn"));
12542 }
12543 else
12544 {
12545 switch (inst.instruction)
12546 {
12547 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12548 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12549 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12550 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12551 default: abort ();
12552 }
12553 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12554 inst.instruction |= inst.operands[0].reg;
12555 inst.instruction |= inst.operands[1].reg << 3;
12556 }
12557 }
12558 }
12559
12560 static void
12561 do_t_simd (void)
12562 {
12563 unsigned Rd, Rn, Rm;
12564
12565 Rd = inst.operands[0].reg;
12566 Rn = inst.operands[1].reg;
12567 Rm = inst.operands[2].reg;
12568
12569 reject_bad_reg (Rd);
12570 reject_bad_reg (Rn);
12571 reject_bad_reg (Rm);
12572
12573 inst.instruction |= Rd << 8;
12574 inst.instruction |= Rn << 16;
12575 inst.instruction |= Rm;
12576 }
12577
12578 static void
12579 do_t_simd2 (void)
12580 {
12581 unsigned Rd, Rn, Rm;
12582
12583 Rd = inst.operands[0].reg;
12584 Rm = inst.operands[1].reg;
12585 Rn = inst.operands[2].reg;
12586
12587 reject_bad_reg (Rd);
12588 reject_bad_reg (Rn);
12589 reject_bad_reg (Rm);
12590
12591 inst.instruction |= Rd << 8;
12592 inst.instruction |= Rn << 16;
12593 inst.instruction |= Rm;
12594 }
12595
12596 static void
12597 do_t_smc (void)
12598 {
12599 unsigned int value = inst.reloc.exp.X_add_number;
12600 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12601 _("SMC is not permitted on this architecture"));
12602 constraint (inst.reloc.exp.X_op != O_constant,
12603 _("expression too complex"));
12604 inst.reloc.type = BFD_RELOC_UNUSED;
12605 inst.instruction |= (value & 0xf000) >> 12;
12606 inst.instruction |= (value & 0x0ff0);
12607 inst.instruction |= (value & 0x000f) << 16;
12608 /* PR gas/15623: SMC instructions must be last in an IT block. */
12609 set_it_insn_type_last ();
12610 }
12611
12612 static void
12613 do_t_hvc (void)
12614 {
12615 unsigned int value = inst.reloc.exp.X_add_number;
12616
12617 inst.reloc.type = BFD_RELOC_UNUSED;
12618 inst.instruction |= (value & 0x0fff);
12619 inst.instruction |= (value & 0xf000) << 4;
12620 }
12621
12622 static void
12623 do_t_ssat_usat (int bias)
12624 {
12625 unsigned Rd, Rn;
12626
12627 Rd = inst.operands[0].reg;
12628 Rn = inst.operands[2].reg;
12629
12630 reject_bad_reg (Rd);
12631 reject_bad_reg (Rn);
12632
12633 inst.instruction |= Rd << 8;
12634 inst.instruction |= inst.operands[1].imm - bias;
12635 inst.instruction |= Rn << 16;
12636
12637 if (inst.operands[3].present)
12638 {
12639 offsetT shift_amount = inst.reloc.exp.X_add_number;
12640
12641 inst.reloc.type = BFD_RELOC_UNUSED;
12642
12643 constraint (inst.reloc.exp.X_op != O_constant,
12644 _("expression too complex"));
12645
12646 if (shift_amount != 0)
12647 {
12648 constraint (shift_amount > 31,
12649 _("shift expression is too large"));
12650
12651 if (inst.operands[3].shift_kind == SHIFT_ASR)
12652 inst.instruction |= 0x00200000; /* sh bit. */
12653
12654 inst.instruction |= (shift_amount & 0x1c) << 10;
12655 inst.instruction |= (shift_amount & 0x03) << 6;
12656 }
12657 }
12658 }
12659
12660 static void
12661 do_t_ssat (void)
12662 {
12663 do_t_ssat_usat (1);
12664 }
12665
12666 static void
12667 do_t_ssat16 (void)
12668 {
12669 unsigned Rd, Rn;
12670
12671 Rd = inst.operands[0].reg;
12672 Rn = inst.operands[2].reg;
12673
12674 reject_bad_reg (Rd);
12675 reject_bad_reg (Rn);
12676
12677 inst.instruction |= Rd << 8;
12678 inst.instruction |= inst.operands[1].imm - 1;
12679 inst.instruction |= Rn << 16;
12680 }
12681
12682 static void
12683 do_t_strex (void)
12684 {
12685 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12686 || inst.operands[2].postind || inst.operands[2].writeback
12687 || inst.operands[2].immisreg || inst.operands[2].shifted
12688 || inst.operands[2].negative,
12689 BAD_ADDR_MODE);
12690
12691 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12692
12693 inst.instruction |= inst.operands[0].reg << 8;
12694 inst.instruction |= inst.operands[1].reg << 12;
12695 inst.instruction |= inst.operands[2].reg << 16;
12696 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12697 }
12698
12699 static void
12700 do_t_strexd (void)
12701 {
12702 if (!inst.operands[2].present)
12703 inst.operands[2].reg = inst.operands[1].reg + 1;
12704
12705 constraint (inst.operands[0].reg == inst.operands[1].reg
12706 || inst.operands[0].reg == inst.operands[2].reg
12707 || inst.operands[0].reg == inst.operands[3].reg,
12708 BAD_OVERLAP);
12709
12710 inst.instruction |= inst.operands[0].reg;
12711 inst.instruction |= inst.operands[1].reg << 12;
12712 inst.instruction |= inst.operands[2].reg << 8;
12713 inst.instruction |= inst.operands[3].reg << 16;
12714 }
12715
12716 static void
12717 do_t_sxtah (void)
12718 {
12719 unsigned Rd, Rn, Rm;
12720
12721 Rd = inst.operands[0].reg;
12722 Rn = inst.operands[1].reg;
12723 Rm = inst.operands[2].reg;
12724
12725 reject_bad_reg (Rd);
12726 reject_bad_reg (Rn);
12727 reject_bad_reg (Rm);
12728
12729 inst.instruction |= Rd << 8;
12730 inst.instruction |= Rn << 16;
12731 inst.instruction |= Rm;
12732 inst.instruction |= inst.operands[3].imm << 4;
12733 }
12734
12735 static void
12736 do_t_sxth (void)
12737 {
12738 unsigned Rd, Rm;
12739
12740 Rd = inst.operands[0].reg;
12741 Rm = inst.operands[1].reg;
12742
12743 reject_bad_reg (Rd);
12744 reject_bad_reg (Rm);
12745
12746 if (inst.instruction <= 0xffff
12747 && inst.size_req != 4
12748 && Rd <= 7 && Rm <= 7
12749 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12750 {
12751 inst.instruction = THUMB_OP16 (inst.instruction);
12752 inst.instruction |= Rd;
12753 inst.instruction |= Rm << 3;
12754 }
12755 else if (unified_syntax)
12756 {
12757 if (inst.instruction <= 0xffff)
12758 inst.instruction = THUMB_OP32 (inst.instruction);
12759 inst.instruction |= Rd << 8;
12760 inst.instruction |= Rm;
12761 inst.instruction |= inst.operands[2].imm << 4;
12762 }
12763 else
12764 {
12765 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12766 _("Thumb encoding does not support rotation"));
12767 constraint (1, BAD_HIREG);
12768 }
12769 }
12770
12771 static void
12772 do_t_swi (void)
12773 {
12774 /* We have to do the following check manually as ARM_EXT_OS only applies
12775 to ARM_EXT_V6M. */
12776 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12777 {
12778 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12779 /* This only applies to the v6m howver, not later architectures. */
12780 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12781 as_bad (_("SVC is not permitted on this architecture"));
12782 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12783 }
12784
12785 inst.reloc.type = BFD_RELOC_ARM_SWI;
12786 }
12787
12788 static void
12789 do_t_tb (void)
12790 {
12791 unsigned Rn, Rm;
12792 int half;
12793
12794 half = (inst.instruction & 0x10) != 0;
12795 set_it_insn_type_last ();
12796 constraint (inst.operands[0].immisreg,
12797 _("instruction requires register index"));
12798
12799 Rn = inst.operands[0].reg;
12800 Rm = inst.operands[0].imm;
12801
12802 constraint (Rn == REG_SP, BAD_SP);
12803 reject_bad_reg (Rm);
12804
12805 constraint (!half && inst.operands[0].shifted,
12806 _("instruction does not allow shifted index"));
12807 inst.instruction |= (Rn << 16) | Rm;
12808 }
12809
12810 static void
12811 do_t_udf (void)
12812 {
12813 if (!inst.operands[0].present)
12814 inst.operands[0].imm = 0;
12815
12816 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12817 {
12818 constraint (inst.size_req == 2,
12819 _("immediate value out of range"));
12820 inst.instruction = THUMB_OP32 (inst.instruction);
12821 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12822 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12823 }
12824 else
12825 {
12826 inst.instruction = THUMB_OP16 (inst.instruction);
12827 inst.instruction |= inst.operands[0].imm;
12828 }
12829
12830 set_it_insn_type (NEUTRAL_IT_INSN);
12831 }
12832
12833
12834 static void
12835 do_t_usat (void)
12836 {
12837 do_t_ssat_usat (0);
12838 }
12839
12840 static void
12841 do_t_usat16 (void)
12842 {
12843 unsigned Rd, Rn;
12844
12845 Rd = inst.operands[0].reg;
12846 Rn = inst.operands[2].reg;
12847
12848 reject_bad_reg (Rd);
12849 reject_bad_reg (Rn);
12850
12851 inst.instruction |= Rd << 8;
12852 inst.instruction |= inst.operands[1].imm;
12853 inst.instruction |= Rn << 16;
12854 }
12855
12856 /* Neon instruction encoder helpers. */
12857
12858 /* Encodings for the different types for various Neon opcodes. */
12859
12860 /* An "invalid" code for the following tables. */
12861 #define N_INV -1u
12862
12863 struct neon_tab_entry
12864 {
12865 unsigned integer;
12866 unsigned float_or_poly;
12867 unsigned scalar_or_imm;
12868 };
12869
12870 /* Map overloaded Neon opcodes to their respective encodings. */
12871 #define NEON_ENC_TAB \
12872 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12873 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12874 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12875 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12876 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12877 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12878 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12879 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12880 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12881 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12882 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12883 /* Register variants of the following two instructions are encoded as
12884 vcge / vcgt with the operands reversed. */ \
12885 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12886 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12887 X(vfma, N_INV, 0x0000c10, N_INV), \
12888 X(vfms, N_INV, 0x0200c10, N_INV), \
12889 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12890 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12891 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12892 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12893 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12894 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12895 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12896 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12897 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12898 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12899 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12900 X(vshl, 0x0000400, N_INV, 0x0800510), \
12901 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12902 X(vand, 0x0000110, N_INV, 0x0800030), \
12903 X(vbic, 0x0100110, N_INV, 0x0800030), \
12904 X(veor, 0x1000110, N_INV, N_INV), \
12905 X(vorn, 0x0300110, N_INV, 0x0800010), \
12906 X(vorr, 0x0200110, N_INV, 0x0800010), \
12907 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12908 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12909 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12910 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12911 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12912 X(vst1, 0x0000000, 0x0800000, N_INV), \
12913 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12914 X(vst2, 0x0000100, 0x0800100, N_INV), \
12915 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12916 X(vst3, 0x0000200, 0x0800200, N_INV), \
12917 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12918 X(vst4, 0x0000300, 0x0800300, N_INV), \
12919 X(vmovn, 0x1b20200, N_INV, N_INV), \
12920 X(vtrn, 0x1b20080, N_INV, N_INV), \
12921 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12922 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12923 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12924 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12925 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12926 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12927 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12928 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12929 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12930 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12931 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12932 X(vseleq, 0xe000a00, N_INV, N_INV), \
12933 X(vselvs, 0xe100a00, N_INV, N_INV), \
12934 X(vselge, 0xe200a00, N_INV, N_INV), \
12935 X(vselgt, 0xe300a00, N_INV, N_INV), \
12936 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12937 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12938 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12939 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12940 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12941 X(aes, 0x3b00300, N_INV, N_INV), \
12942 X(sha3op, 0x2000c00, N_INV, N_INV), \
12943 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12944 X(sha2op, 0x3ba0380, N_INV, N_INV)
12945
12946 enum neon_opc
12947 {
12948 #define X(OPC,I,F,S) N_MNEM_##OPC
12949 NEON_ENC_TAB
12950 #undef X
12951 };
12952
12953 static const struct neon_tab_entry neon_enc_tab[] =
12954 {
12955 #define X(OPC,I,F,S) { (I), (F), (S) }
12956 NEON_ENC_TAB
12957 #undef X
12958 };
12959
12960 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12961 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12962 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12963 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12964 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12965 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12966 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12967 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12968 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12969 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12970 #define NEON_ENC_SINGLE_(X) \
12971 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12972 #define NEON_ENC_DOUBLE_(X) \
12973 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12974 #define NEON_ENC_FPV8_(X) \
12975 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12976
12977 #define NEON_ENCODE(type, inst) \
12978 do \
12979 { \
12980 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12981 inst.is_neon = 1; \
12982 } \
12983 while (0)
12984
12985 #define check_neon_suffixes \
12986 do \
12987 { \
12988 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12989 { \
12990 as_bad (_("invalid neon suffix for non neon instruction")); \
12991 return; \
12992 } \
12993 } \
12994 while (0)
12995
12996 /* Define shapes for instruction operands. The following mnemonic characters
12997 are used in this table:
12998
12999 F - VFP S<n> register
13000 D - Neon D<n> register
13001 Q - Neon Q<n> register
13002 I - Immediate
13003 S - Scalar
13004 R - ARM register
13005 L - D<n> register list
13006
13007 This table is used to generate various data:
13008 - enumerations of the form NS_DDR to be used as arguments to
13009 neon_select_shape.
13010 - a table classifying shapes into single, double, quad, mixed.
13011 - a table used to drive neon_select_shape. */
13012
13013 #define NEON_SHAPE_DEF \
13014 X(3, (D, D, D), DOUBLE), \
13015 X(3, (Q, Q, Q), QUAD), \
13016 X(3, (D, D, I), DOUBLE), \
13017 X(3, (Q, Q, I), QUAD), \
13018 X(3, (D, D, S), DOUBLE), \
13019 X(3, (Q, Q, S), QUAD), \
13020 X(2, (D, D), DOUBLE), \
13021 X(2, (Q, Q), QUAD), \
13022 X(2, (D, S), DOUBLE), \
13023 X(2, (Q, S), QUAD), \
13024 X(2, (D, R), DOUBLE), \
13025 X(2, (Q, R), QUAD), \
13026 X(2, (D, I), DOUBLE), \
13027 X(2, (Q, I), QUAD), \
13028 X(3, (D, L, D), DOUBLE), \
13029 X(2, (D, Q), MIXED), \
13030 X(2, (Q, D), MIXED), \
13031 X(3, (D, Q, I), MIXED), \
13032 X(3, (Q, D, I), MIXED), \
13033 X(3, (Q, D, D), MIXED), \
13034 X(3, (D, Q, Q), MIXED), \
13035 X(3, (Q, Q, D), MIXED), \
13036 X(3, (Q, D, S), MIXED), \
13037 X(3, (D, Q, S), MIXED), \
13038 X(4, (D, D, D, I), DOUBLE), \
13039 X(4, (Q, Q, Q, I), QUAD), \
13040 X(2, (F, F), SINGLE), \
13041 X(3, (F, F, F), SINGLE), \
13042 X(2, (F, I), SINGLE), \
13043 X(2, (F, D), MIXED), \
13044 X(2, (D, F), MIXED), \
13045 X(3, (F, F, I), MIXED), \
13046 X(4, (R, R, F, F), SINGLE), \
13047 X(4, (F, F, R, R), SINGLE), \
13048 X(3, (D, R, R), DOUBLE), \
13049 X(3, (R, R, D), DOUBLE), \
13050 X(2, (S, R), SINGLE), \
13051 X(2, (R, S), SINGLE), \
13052 X(2, (F, R), SINGLE), \
13053 X(2, (R, F), SINGLE)
13054
13055 #define S2(A,B) NS_##A##B
13056 #define S3(A,B,C) NS_##A##B##C
13057 #define S4(A,B,C,D) NS_##A##B##C##D
13058
13059 #define X(N, L, C) S##N L
13060
13061 enum neon_shape
13062 {
13063 NEON_SHAPE_DEF,
13064 NS_NULL
13065 };
13066
13067 #undef X
13068 #undef S2
13069 #undef S3
13070 #undef S4
13071
13072 enum neon_shape_class
13073 {
13074 SC_SINGLE,
13075 SC_DOUBLE,
13076 SC_QUAD,
13077 SC_MIXED
13078 };
13079
13080 #define X(N, L, C) SC_##C
13081
13082 static enum neon_shape_class neon_shape_class[] =
13083 {
13084 NEON_SHAPE_DEF
13085 };
13086
13087 #undef X
13088
13089 enum neon_shape_el
13090 {
13091 SE_F,
13092 SE_D,
13093 SE_Q,
13094 SE_I,
13095 SE_S,
13096 SE_R,
13097 SE_L
13098 };
13099
13100 /* Register widths of above. */
13101 static unsigned neon_shape_el_size[] =
13102 {
13103 32,
13104 64,
13105 128,
13106 0,
13107 32,
13108 32,
13109 0
13110 };
13111
13112 struct neon_shape_info
13113 {
13114 unsigned els;
13115 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13116 };
13117
13118 #define S2(A,B) { SE_##A, SE_##B }
13119 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13120 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13121
13122 #define X(N, L, C) { N, S##N L }
13123
13124 static struct neon_shape_info neon_shape_tab[] =
13125 {
13126 NEON_SHAPE_DEF
13127 };
13128
13129 #undef X
13130 #undef S2
13131 #undef S3
13132 #undef S4
13133
13134 /* Bit masks used in type checking given instructions.
13135 'N_EQK' means the type must be the same as (or based on in some way) the key
13136 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13137 set, various other bits can be set as well in order to modify the meaning of
13138 the type constraint. */
13139
13140 enum neon_type_mask
13141 {
13142 N_S8 = 0x0000001,
13143 N_S16 = 0x0000002,
13144 N_S32 = 0x0000004,
13145 N_S64 = 0x0000008,
13146 N_U8 = 0x0000010,
13147 N_U16 = 0x0000020,
13148 N_U32 = 0x0000040,
13149 N_U64 = 0x0000080,
13150 N_I8 = 0x0000100,
13151 N_I16 = 0x0000200,
13152 N_I32 = 0x0000400,
13153 N_I64 = 0x0000800,
13154 N_8 = 0x0001000,
13155 N_16 = 0x0002000,
13156 N_32 = 0x0004000,
13157 N_64 = 0x0008000,
13158 N_P8 = 0x0010000,
13159 N_P16 = 0x0020000,
13160 N_F16 = 0x0040000,
13161 N_F32 = 0x0080000,
13162 N_F64 = 0x0100000,
13163 N_P64 = 0x0200000,
13164 N_KEY = 0x1000000, /* Key element (main type specifier). */
13165 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13166 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13167 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13168 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13169 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13170 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13171 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13172 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13173 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13174 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13175 N_UTYP = 0,
13176 N_MAX_NONSPECIAL = N_P64
13177 };
13178
13179 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13180
13181 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13182 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13183 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13184 #define N_SUF_32 (N_SU_32 | N_F32)
13185 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13186 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13187
13188 /* Pass this as the first type argument to neon_check_type to ignore types
13189 altogether. */
13190 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13191
13192 /* Select a "shape" for the current instruction (describing register types or
13193 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13194 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13195 function of operand parsing, so this function doesn't need to be called.
13196 Shapes should be listed in order of decreasing length. */
13197
13198 static enum neon_shape
13199 neon_select_shape (enum neon_shape shape, ...)
13200 {
13201 va_list ap;
13202 enum neon_shape first_shape = shape;
13203
13204 /* Fix missing optional operands. FIXME: we don't know at this point how
13205 many arguments we should have, so this makes the assumption that we have
13206 > 1. This is true of all current Neon opcodes, I think, but may not be
13207 true in the future. */
13208 if (!inst.operands[1].present)
13209 inst.operands[1] = inst.operands[0];
13210
13211 va_start (ap, shape);
13212
13213 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13214 {
13215 unsigned j;
13216 int matches = 1;
13217
13218 for (j = 0; j < neon_shape_tab[shape].els; j++)
13219 {
13220 if (!inst.operands[j].present)
13221 {
13222 matches = 0;
13223 break;
13224 }
13225
13226 switch (neon_shape_tab[shape].el[j])
13227 {
13228 case SE_F:
13229 if (!(inst.operands[j].isreg
13230 && inst.operands[j].isvec
13231 && inst.operands[j].issingle
13232 && !inst.operands[j].isquad))
13233 matches = 0;
13234 break;
13235
13236 case SE_D:
13237 if (!(inst.operands[j].isreg
13238 && inst.operands[j].isvec
13239 && !inst.operands[j].isquad
13240 && !inst.operands[j].issingle))
13241 matches = 0;
13242 break;
13243
13244 case SE_R:
13245 if (!(inst.operands[j].isreg
13246 && !inst.operands[j].isvec))
13247 matches = 0;
13248 break;
13249
13250 case SE_Q:
13251 if (!(inst.operands[j].isreg
13252 && inst.operands[j].isvec
13253 && inst.operands[j].isquad
13254 && !inst.operands[j].issingle))
13255 matches = 0;
13256 break;
13257
13258 case SE_I:
13259 if (!(!inst.operands[j].isreg
13260 && !inst.operands[j].isscalar))
13261 matches = 0;
13262 break;
13263
13264 case SE_S:
13265 if (!(!inst.operands[j].isreg
13266 && inst.operands[j].isscalar))
13267 matches = 0;
13268 break;
13269
13270 case SE_L:
13271 break;
13272 }
13273 if (!matches)
13274 break;
13275 }
13276 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13277 /* We've matched all the entries in the shape table, and we don't
13278 have any left over operands which have not been matched. */
13279 break;
13280 }
13281
13282 va_end (ap);
13283
13284 if (shape == NS_NULL && first_shape != NS_NULL)
13285 first_error (_("invalid instruction shape"));
13286
13287 return shape;
13288 }
13289
13290 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13291 means the Q bit should be set). */
13292
13293 static int
13294 neon_quad (enum neon_shape shape)
13295 {
13296 return neon_shape_class[shape] == SC_QUAD;
13297 }
13298
13299 static void
13300 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13301 unsigned *g_size)
13302 {
13303 /* Allow modification to be made to types which are constrained to be
13304 based on the key element, based on bits set alongside N_EQK. */
13305 if ((typebits & N_EQK) != 0)
13306 {
13307 if ((typebits & N_HLF) != 0)
13308 *g_size /= 2;
13309 else if ((typebits & N_DBL) != 0)
13310 *g_size *= 2;
13311 if ((typebits & N_SGN) != 0)
13312 *g_type = NT_signed;
13313 else if ((typebits & N_UNS) != 0)
13314 *g_type = NT_unsigned;
13315 else if ((typebits & N_INT) != 0)
13316 *g_type = NT_integer;
13317 else if ((typebits & N_FLT) != 0)
13318 *g_type = NT_float;
13319 else if ((typebits & N_SIZ) != 0)
13320 *g_type = NT_untyped;
13321 }
13322 }
13323
13324 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13325 operand type, i.e. the single type specified in a Neon instruction when it
13326 is the only one given. */
13327
13328 static struct neon_type_el
13329 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13330 {
13331 struct neon_type_el dest = *key;
13332
13333 gas_assert ((thisarg & N_EQK) != 0);
13334
13335 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13336
13337 return dest;
13338 }
13339
13340 /* Convert Neon type and size into compact bitmask representation. */
13341
13342 static enum neon_type_mask
13343 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13344 {
13345 switch (type)
13346 {
13347 case NT_untyped:
13348 switch (size)
13349 {
13350 case 8: return N_8;
13351 case 16: return N_16;
13352 case 32: return N_32;
13353 case 64: return N_64;
13354 default: ;
13355 }
13356 break;
13357
13358 case NT_integer:
13359 switch (size)
13360 {
13361 case 8: return N_I8;
13362 case 16: return N_I16;
13363 case 32: return N_I32;
13364 case 64: return N_I64;
13365 default: ;
13366 }
13367 break;
13368
13369 case NT_float:
13370 switch (size)
13371 {
13372 case 16: return N_F16;
13373 case 32: return N_F32;
13374 case 64: return N_F64;
13375 default: ;
13376 }
13377 break;
13378
13379 case NT_poly:
13380 switch (size)
13381 {
13382 case 8: return N_P8;
13383 case 16: return N_P16;
13384 case 64: return N_P64;
13385 default: ;
13386 }
13387 break;
13388
13389 case NT_signed:
13390 switch (size)
13391 {
13392 case 8: return N_S8;
13393 case 16: return N_S16;
13394 case 32: return N_S32;
13395 case 64: return N_S64;
13396 default: ;
13397 }
13398 break;
13399
13400 case NT_unsigned:
13401 switch (size)
13402 {
13403 case 8: return N_U8;
13404 case 16: return N_U16;
13405 case 32: return N_U32;
13406 case 64: return N_U64;
13407 default: ;
13408 }
13409 break;
13410
13411 default: ;
13412 }
13413
13414 return N_UTYP;
13415 }
13416
13417 /* Convert compact Neon bitmask type representation to a type and size. Only
13418 handles the case where a single bit is set in the mask. */
13419
13420 static int
13421 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13422 enum neon_type_mask mask)
13423 {
13424 if ((mask & N_EQK) != 0)
13425 return FAIL;
13426
13427 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13428 *size = 8;
13429 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13430 *size = 16;
13431 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13432 *size = 32;
13433 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13434 *size = 64;
13435 else
13436 return FAIL;
13437
13438 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13439 *type = NT_signed;
13440 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13441 *type = NT_unsigned;
13442 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13443 *type = NT_integer;
13444 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13445 *type = NT_untyped;
13446 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13447 *type = NT_poly;
13448 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13449 *type = NT_float;
13450 else
13451 return FAIL;
13452
13453 return SUCCESS;
13454 }
13455
13456 /* Modify a bitmask of allowed types. This is only needed for type
13457 relaxation. */
13458
13459 static unsigned
13460 modify_types_allowed (unsigned allowed, unsigned mods)
13461 {
13462 unsigned size;
13463 enum neon_el_type type;
13464 unsigned destmask;
13465 int i;
13466
13467 destmask = 0;
13468
13469 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13470 {
13471 if (el_type_of_type_chk (&type, &size,
13472 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13473 {
13474 neon_modify_type_size (mods, &type, &size);
13475 destmask |= type_chk_of_el_type (type, size);
13476 }
13477 }
13478
13479 return destmask;
13480 }
13481
13482 /* Check type and return type classification.
13483 The manual states (paraphrase): If one datatype is given, it indicates the
13484 type given in:
13485 - the second operand, if there is one
13486 - the operand, if there is no second operand
13487 - the result, if there are no operands.
13488 This isn't quite good enough though, so we use a concept of a "key" datatype
13489 which is set on a per-instruction basis, which is the one which matters when
13490 only one data type is written.
13491 Note: this function has side-effects (e.g. filling in missing operands). All
13492 Neon instructions should call it before performing bit encoding. */
13493
13494 static struct neon_type_el
13495 neon_check_type (unsigned els, enum neon_shape ns, ...)
13496 {
13497 va_list ap;
13498 unsigned i, pass, key_el = 0;
13499 unsigned types[NEON_MAX_TYPE_ELS];
13500 enum neon_el_type k_type = NT_invtype;
13501 unsigned k_size = -1u;
13502 struct neon_type_el badtype = {NT_invtype, -1};
13503 unsigned key_allowed = 0;
13504
13505 /* Optional registers in Neon instructions are always (not) in operand 1.
13506 Fill in the missing operand here, if it was omitted. */
13507 if (els > 1 && !inst.operands[1].present)
13508 inst.operands[1] = inst.operands[0];
13509
13510 /* Suck up all the varargs. */
13511 va_start (ap, ns);
13512 for (i = 0; i < els; i++)
13513 {
13514 unsigned thisarg = va_arg (ap, unsigned);
13515 if (thisarg == N_IGNORE_TYPE)
13516 {
13517 va_end (ap);
13518 return badtype;
13519 }
13520 types[i] = thisarg;
13521 if ((thisarg & N_KEY) != 0)
13522 key_el = i;
13523 }
13524 va_end (ap);
13525
13526 if (inst.vectype.elems > 0)
13527 for (i = 0; i < els; i++)
13528 if (inst.operands[i].vectype.type != NT_invtype)
13529 {
13530 first_error (_("types specified in both the mnemonic and operands"));
13531 return badtype;
13532 }
13533
13534 /* Duplicate inst.vectype elements here as necessary.
13535 FIXME: No idea if this is exactly the same as the ARM assembler,
13536 particularly when an insn takes one register and one non-register
13537 operand. */
13538 if (inst.vectype.elems == 1 && els > 1)
13539 {
13540 unsigned j;
13541 inst.vectype.elems = els;
13542 inst.vectype.el[key_el] = inst.vectype.el[0];
13543 for (j = 0; j < els; j++)
13544 if (j != key_el)
13545 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13546 types[j]);
13547 }
13548 else if (inst.vectype.elems == 0 && els > 0)
13549 {
13550 unsigned j;
13551 /* No types were given after the mnemonic, so look for types specified
13552 after each operand. We allow some flexibility here; as long as the
13553 "key" operand has a type, we can infer the others. */
13554 for (j = 0; j < els; j++)
13555 if (inst.operands[j].vectype.type != NT_invtype)
13556 inst.vectype.el[j] = inst.operands[j].vectype;
13557
13558 if (inst.operands[key_el].vectype.type != NT_invtype)
13559 {
13560 for (j = 0; j < els; j++)
13561 if (inst.operands[j].vectype.type == NT_invtype)
13562 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13563 types[j]);
13564 }
13565 else
13566 {
13567 first_error (_("operand types can't be inferred"));
13568 return badtype;
13569 }
13570 }
13571 else if (inst.vectype.elems != els)
13572 {
13573 first_error (_("type specifier has the wrong number of parts"));
13574 return badtype;
13575 }
13576
13577 for (pass = 0; pass < 2; pass++)
13578 {
13579 for (i = 0; i < els; i++)
13580 {
13581 unsigned thisarg = types[i];
13582 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13583 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13584 enum neon_el_type g_type = inst.vectype.el[i].type;
13585 unsigned g_size = inst.vectype.el[i].size;
13586
13587 /* Decay more-specific signed & unsigned types to sign-insensitive
13588 integer types if sign-specific variants are unavailable. */
13589 if ((g_type == NT_signed || g_type == NT_unsigned)
13590 && (types_allowed & N_SU_ALL) == 0)
13591 g_type = NT_integer;
13592
13593 /* If only untyped args are allowed, decay any more specific types to
13594 them. Some instructions only care about signs for some element
13595 sizes, so handle that properly. */
13596 if (((types_allowed & N_UNT) == 0)
13597 && ((g_size == 8 && (types_allowed & N_8) != 0)
13598 || (g_size == 16 && (types_allowed & N_16) != 0)
13599 || (g_size == 32 && (types_allowed & N_32) != 0)
13600 || (g_size == 64 && (types_allowed & N_64) != 0)))
13601 g_type = NT_untyped;
13602
13603 if (pass == 0)
13604 {
13605 if ((thisarg & N_KEY) != 0)
13606 {
13607 k_type = g_type;
13608 k_size = g_size;
13609 key_allowed = thisarg & ~N_KEY;
13610 }
13611 }
13612 else
13613 {
13614 if ((thisarg & N_VFP) != 0)
13615 {
13616 enum neon_shape_el regshape;
13617 unsigned regwidth, match;
13618
13619 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13620 if (ns == NS_NULL)
13621 {
13622 first_error (_("invalid instruction shape"));
13623 return badtype;
13624 }
13625 regshape = neon_shape_tab[ns].el[i];
13626 regwidth = neon_shape_el_size[regshape];
13627
13628 /* In VFP mode, operands must match register widths. If we
13629 have a key operand, use its width, else use the width of
13630 the current operand. */
13631 if (k_size != -1u)
13632 match = k_size;
13633 else
13634 match = g_size;
13635
13636 if (regwidth != match)
13637 {
13638 first_error (_("operand size must match register width"));
13639 return badtype;
13640 }
13641 }
13642
13643 if ((thisarg & N_EQK) == 0)
13644 {
13645 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13646
13647 if ((given_type & types_allowed) == 0)
13648 {
13649 first_error (_("bad type in Neon instruction"));
13650 return badtype;
13651 }
13652 }
13653 else
13654 {
13655 enum neon_el_type mod_k_type = k_type;
13656 unsigned mod_k_size = k_size;
13657 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13658 if (g_type != mod_k_type || g_size != mod_k_size)
13659 {
13660 first_error (_("inconsistent types in Neon instruction"));
13661 return badtype;
13662 }
13663 }
13664 }
13665 }
13666 }
13667
13668 return inst.vectype.el[key_el];
13669 }
13670
13671 /* Neon-style VFP instruction forwarding. */
13672
13673 /* Thumb VFP instructions have 0xE in the condition field. */
13674
13675 static void
13676 do_vfp_cond_or_thumb (void)
13677 {
13678 inst.is_neon = 1;
13679
13680 if (thumb_mode)
13681 inst.instruction |= 0xe0000000;
13682 else
13683 inst.instruction |= inst.cond << 28;
13684 }
13685
13686 /* Look up and encode a simple mnemonic, for use as a helper function for the
13687 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13688 etc. It is assumed that operand parsing has already been done, and that the
13689 operands are in the form expected by the given opcode (this isn't necessarily
13690 the same as the form in which they were parsed, hence some massaging must
13691 take place before this function is called).
13692 Checks current arch version against that in the looked-up opcode. */
13693
13694 static void
13695 do_vfp_nsyn_opcode (const char *opname)
13696 {
13697 const struct asm_opcode *opcode;
13698
13699 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13700
13701 if (!opcode)
13702 abort ();
13703
13704 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13705 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13706 _(BAD_FPU));
13707
13708 inst.is_neon = 1;
13709
13710 if (thumb_mode)
13711 {
13712 inst.instruction = opcode->tvalue;
13713 opcode->tencode ();
13714 }
13715 else
13716 {
13717 inst.instruction = (inst.cond << 28) | opcode->avalue;
13718 opcode->aencode ();
13719 }
13720 }
13721
13722 static void
13723 do_vfp_nsyn_add_sub (enum neon_shape rs)
13724 {
13725 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13726
13727 if (rs == NS_FFF)
13728 {
13729 if (is_add)
13730 do_vfp_nsyn_opcode ("fadds");
13731 else
13732 do_vfp_nsyn_opcode ("fsubs");
13733 }
13734 else
13735 {
13736 if (is_add)
13737 do_vfp_nsyn_opcode ("faddd");
13738 else
13739 do_vfp_nsyn_opcode ("fsubd");
13740 }
13741 }
13742
13743 /* Check operand types to see if this is a VFP instruction, and if so call
13744 PFN (). */
13745
13746 static int
13747 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13748 {
13749 enum neon_shape rs;
13750 struct neon_type_el et;
13751
13752 switch (args)
13753 {
13754 case 2:
13755 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13756 et = neon_check_type (2, rs,
13757 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13758 break;
13759
13760 case 3:
13761 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13762 et = neon_check_type (3, rs,
13763 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13764 break;
13765
13766 default:
13767 abort ();
13768 }
13769
13770 if (et.type != NT_invtype)
13771 {
13772 pfn (rs);
13773 return SUCCESS;
13774 }
13775
13776 inst.error = NULL;
13777 return FAIL;
13778 }
13779
13780 static void
13781 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13782 {
13783 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13784
13785 if (rs == NS_FFF)
13786 {
13787 if (is_mla)
13788 do_vfp_nsyn_opcode ("fmacs");
13789 else
13790 do_vfp_nsyn_opcode ("fnmacs");
13791 }
13792 else
13793 {
13794 if (is_mla)
13795 do_vfp_nsyn_opcode ("fmacd");
13796 else
13797 do_vfp_nsyn_opcode ("fnmacd");
13798 }
13799 }
13800
13801 static void
13802 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13803 {
13804 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13805
13806 if (rs == NS_FFF)
13807 {
13808 if (is_fma)
13809 do_vfp_nsyn_opcode ("ffmas");
13810 else
13811 do_vfp_nsyn_opcode ("ffnmas");
13812 }
13813 else
13814 {
13815 if (is_fma)
13816 do_vfp_nsyn_opcode ("ffmad");
13817 else
13818 do_vfp_nsyn_opcode ("ffnmad");
13819 }
13820 }
13821
13822 static void
13823 do_vfp_nsyn_mul (enum neon_shape rs)
13824 {
13825 if (rs == NS_FFF)
13826 do_vfp_nsyn_opcode ("fmuls");
13827 else
13828 do_vfp_nsyn_opcode ("fmuld");
13829 }
13830
13831 static void
13832 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13833 {
13834 int is_neg = (inst.instruction & 0x80) != 0;
13835 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13836
13837 if (rs == NS_FF)
13838 {
13839 if (is_neg)
13840 do_vfp_nsyn_opcode ("fnegs");
13841 else
13842 do_vfp_nsyn_opcode ("fabss");
13843 }
13844 else
13845 {
13846 if (is_neg)
13847 do_vfp_nsyn_opcode ("fnegd");
13848 else
13849 do_vfp_nsyn_opcode ("fabsd");
13850 }
13851 }
13852
13853 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13854 insns belong to Neon, and are handled elsewhere. */
13855
13856 static void
13857 do_vfp_nsyn_ldm_stm (int is_dbmode)
13858 {
13859 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13860 if (is_ldm)
13861 {
13862 if (is_dbmode)
13863 do_vfp_nsyn_opcode ("fldmdbs");
13864 else
13865 do_vfp_nsyn_opcode ("fldmias");
13866 }
13867 else
13868 {
13869 if (is_dbmode)
13870 do_vfp_nsyn_opcode ("fstmdbs");
13871 else
13872 do_vfp_nsyn_opcode ("fstmias");
13873 }
13874 }
13875
13876 static void
13877 do_vfp_nsyn_sqrt (void)
13878 {
13879 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13880 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13881
13882 if (rs == NS_FF)
13883 do_vfp_nsyn_opcode ("fsqrts");
13884 else
13885 do_vfp_nsyn_opcode ("fsqrtd");
13886 }
13887
13888 static void
13889 do_vfp_nsyn_div (void)
13890 {
13891 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13892 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13893 N_F32 | N_F64 | N_KEY | N_VFP);
13894
13895 if (rs == NS_FFF)
13896 do_vfp_nsyn_opcode ("fdivs");
13897 else
13898 do_vfp_nsyn_opcode ("fdivd");
13899 }
13900
13901 static void
13902 do_vfp_nsyn_nmul (void)
13903 {
13904 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13905 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13906 N_F32 | N_F64 | N_KEY | N_VFP);
13907
13908 if (rs == NS_FFF)
13909 {
13910 NEON_ENCODE (SINGLE, inst);
13911 do_vfp_sp_dyadic ();
13912 }
13913 else
13914 {
13915 NEON_ENCODE (DOUBLE, inst);
13916 do_vfp_dp_rd_rn_rm ();
13917 }
13918 do_vfp_cond_or_thumb ();
13919 }
13920
13921 static void
13922 do_vfp_nsyn_cmp (void)
13923 {
13924 if (inst.operands[1].isreg)
13925 {
13926 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13927 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13928
13929 if (rs == NS_FF)
13930 {
13931 NEON_ENCODE (SINGLE, inst);
13932 do_vfp_sp_monadic ();
13933 }
13934 else
13935 {
13936 NEON_ENCODE (DOUBLE, inst);
13937 do_vfp_dp_rd_rm ();
13938 }
13939 }
13940 else
13941 {
13942 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13943 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13944
13945 switch (inst.instruction & 0x0fffffff)
13946 {
13947 case N_MNEM_vcmp:
13948 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13949 break;
13950 case N_MNEM_vcmpe:
13951 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13952 break;
13953 default:
13954 abort ();
13955 }
13956
13957 if (rs == NS_FI)
13958 {
13959 NEON_ENCODE (SINGLE, inst);
13960 do_vfp_sp_compare_z ();
13961 }
13962 else
13963 {
13964 NEON_ENCODE (DOUBLE, inst);
13965 do_vfp_dp_rd ();
13966 }
13967 }
13968 do_vfp_cond_or_thumb ();
13969 }
13970
13971 static void
13972 nsyn_insert_sp (void)
13973 {
13974 inst.operands[1] = inst.operands[0];
13975 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13976 inst.operands[0].reg = REG_SP;
13977 inst.operands[0].isreg = 1;
13978 inst.operands[0].writeback = 1;
13979 inst.operands[0].present = 1;
13980 }
13981
13982 static void
13983 do_vfp_nsyn_push (void)
13984 {
13985 nsyn_insert_sp ();
13986 if (inst.operands[1].issingle)
13987 do_vfp_nsyn_opcode ("fstmdbs");
13988 else
13989 do_vfp_nsyn_opcode ("fstmdbd");
13990 }
13991
13992 static void
13993 do_vfp_nsyn_pop (void)
13994 {
13995 nsyn_insert_sp ();
13996 if (inst.operands[1].issingle)
13997 do_vfp_nsyn_opcode ("fldmias");
13998 else
13999 do_vfp_nsyn_opcode ("fldmiad");
14000 }
14001
14002 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14003 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14004
14005 static void
14006 neon_dp_fixup (struct arm_it* insn)
14007 {
14008 unsigned int i = insn->instruction;
14009 insn->is_neon = 1;
14010
14011 if (thumb_mode)
14012 {
14013 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14014 if (i & (1 << 24))
14015 i |= 1 << 28;
14016
14017 i &= ~(1 << 24);
14018
14019 i |= 0xef000000;
14020 }
14021 else
14022 i |= 0xf2000000;
14023
14024 insn->instruction = i;
14025 }
14026
14027 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14028 (0, 1, 2, 3). */
14029
14030 static unsigned
14031 neon_logbits (unsigned x)
14032 {
14033 return ffs (x) - 4;
14034 }
14035
14036 #define LOW4(R) ((R) & 0xf)
14037 #define HI1(R) (((R) >> 4) & 1)
14038
14039 /* Encode insns with bit pattern:
14040
14041 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14042 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14043
14044 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14045 different meaning for some instruction. */
14046
14047 static void
14048 neon_three_same (int isquad, int ubit, int size)
14049 {
14050 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14051 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14052 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14053 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14054 inst.instruction |= LOW4 (inst.operands[2].reg);
14055 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14056 inst.instruction |= (isquad != 0) << 6;
14057 inst.instruction |= (ubit != 0) << 24;
14058 if (size != -1)
14059 inst.instruction |= neon_logbits (size) << 20;
14060
14061 neon_dp_fixup (&inst);
14062 }
14063
14064 /* Encode instructions of the form:
14065
14066 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14067 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14068
14069 Don't write size if SIZE == -1. */
14070
14071 static void
14072 neon_two_same (int qbit, int ubit, int size)
14073 {
14074 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14075 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14076 inst.instruction |= LOW4 (inst.operands[1].reg);
14077 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14078 inst.instruction |= (qbit != 0) << 6;
14079 inst.instruction |= (ubit != 0) << 24;
14080
14081 if (size != -1)
14082 inst.instruction |= neon_logbits (size) << 18;
14083
14084 neon_dp_fixup (&inst);
14085 }
14086
14087 /* Neon instruction encoders, in approximate order of appearance. */
14088
14089 static void
14090 do_neon_dyadic_i_su (void)
14091 {
14092 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14093 struct neon_type_el et = neon_check_type (3, rs,
14094 N_EQK, N_EQK, N_SU_32 | N_KEY);
14095 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14096 }
14097
14098 static void
14099 do_neon_dyadic_i64_su (void)
14100 {
14101 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14102 struct neon_type_el et = neon_check_type (3, rs,
14103 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14104 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14105 }
14106
14107 static void
14108 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14109 unsigned immbits)
14110 {
14111 unsigned size = et.size >> 3;
14112 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14113 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14114 inst.instruction |= LOW4 (inst.operands[1].reg);
14115 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14116 inst.instruction |= (isquad != 0) << 6;
14117 inst.instruction |= immbits << 16;
14118 inst.instruction |= (size >> 3) << 7;
14119 inst.instruction |= (size & 0x7) << 19;
14120 if (write_ubit)
14121 inst.instruction |= (uval != 0) << 24;
14122
14123 neon_dp_fixup (&inst);
14124 }
14125
14126 static void
14127 do_neon_shl_imm (void)
14128 {
14129 if (!inst.operands[2].isreg)
14130 {
14131 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14132 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14133 int imm = inst.operands[2].imm;
14134
14135 constraint (imm < 0 || (unsigned)imm >= et.size,
14136 _("immediate out of range for shift"));
14137 NEON_ENCODE (IMMED, inst);
14138 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14139 }
14140 else
14141 {
14142 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14143 struct neon_type_el et = neon_check_type (3, rs,
14144 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14145 unsigned int tmp;
14146
14147 /* VSHL/VQSHL 3-register variants have syntax such as:
14148 vshl.xx Dd, Dm, Dn
14149 whereas other 3-register operations encoded by neon_three_same have
14150 syntax like:
14151 vadd.xx Dd, Dn, Dm
14152 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14153 here. */
14154 tmp = inst.operands[2].reg;
14155 inst.operands[2].reg = inst.operands[1].reg;
14156 inst.operands[1].reg = tmp;
14157 NEON_ENCODE (INTEGER, inst);
14158 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14159 }
14160 }
14161
14162 static void
14163 do_neon_qshl_imm (void)
14164 {
14165 if (!inst.operands[2].isreg)
14166 {
14167 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14168 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14169 int imm = inst.operands[2].imm;
14170
14171 constraint (imm < 0 || (unsigned)imm >= et.size,
14172 _("immediate out of range for shift"));
14173 NEON_ENCODE (IMMED, inst);
14174 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14175 }
14176 else
14177 {
14178 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14179 struct neon_type_el et = neon_check_type (3, rs,
14180 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14181 unsigned int tmp;
14182
14183 /* See note in do_neon_shl_imm. */
14184 tmp = inst.operands[2].reg;
14185 inst.operands[2].reg = inst.operands[1].reg;
14186 inst.operands[1].reg = tmp;
14187 NEON_ENCODE (INTEGER, inst);
14188 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14189 }
14190 }
14191
14192 static void
14193 do_neon_rshl (void)
14194 {
14195 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14196 struct neon_type_el et = neon_check_type (3, rs,
14197 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14198 unsigned int tmp;
14199
14200 tmp = inst.operands[2].reg;
14201 inst.operands[2].reg = inst.operands[1].reg;
14202 inst.operands[1].reg = tmp;
14203 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14204 }
14205
14206 static int
14207 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14208 {
14209 /* Handle .I8 pseudo-instructions. */
14210 if (size == 8)
14211 {
14212 /* Unfortunately, this will make everything apart from zero out-of-range.
14213 FIXME is this the intended semantics? There doesn't seem much point in
14214 accepting .I8 if so. */
14215 immediate |= immediate << 8;
14216 size = 16;
14217 }
14218
14219 if (size >= 32)
14220 {
14221 if (immediate == (immediate & 0x000000ff))
14222 {
14223 *immbits = immediate;
14224 return 0x1;
14225 }
14226 else if (immediate == (immediate & 0x0000ff00))
14227 {
14228 *immbits = immediate >> 8;
14229 return 0x3;
14230 }
14231 else if (immediate == (immediate & 0x00ff0000))
14232 {
14233 *immbits = immediate >> 16;
14234 return 0x5;
14235 }
14236 else if (immediate == (immediate & 0xff000000))
14237 {
14238 *immbits = immediate >> 24;
14239 return 0x7;
14240 }
14241 if ((immediate & 0xffff) != (immediate >> 16))
14242 goto bad_immediate;
14243 immediate &= 0xffff;
14244 }
14245
14246 if (immediate == (immediate & 0x000000ff))
14247 {
14248 *immbits = immediate;
14249 return 0x9;
14250 }
14251 else if (immediate == (immediate & 0x0000ff00))
14252 {
14253 *immbits = immediate >> 8;
14254 return 0xb;
14255 }
14256
14257 bad_immediate:
14258 first_error (_("immediate value out of range"));
14259 return FAIL;
14260 }
14261
14262 static void
14263 do_neon_logic (void)
14264 {
14265 if (inst.operands[2].present && inst.operands[2].isreg)
14266 {
14267 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14268 neon_check_type (3, rs, N_IGNORE_TYPE);
14269 /* U bit and size field were set as part of the bitmask. */
14270 NEON_ENCODE (INTEGER, inst);
14271 neon_three_same (neon_quad (rs), 0, -1);
14272 }
14273 else
14274 {
14275 const int three_ops_form = (inst.operands[2].present
14276 && !inst.operands[2].isreg);
14277 const int immoperand = (three_ops_form ? 2 : 1);
14278 enum neon_shape rs = (three_ops_form
14279 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14280 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14281 struct neon_type_el et = neon_check_type (2, rs,
14282 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14283 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14284 unsigned immbits;
14285 int cmode;
14286
14287 if (et.type == NT_invtype)
14288 return;
14289
14290 if (three_ops_form)
14291 constraint (inst.operands[0].reg != inst.operands[1].reg,
14292 _("first and second operands shall be the same register"));
14293
14294 NEON_ENCODE (IMMED, inst);
14295
14296 immbits = inst.operands[immoperand].imm;
14297 if (et.size == 64)
14298 {
14299 /* .i64 is a pseudo-op, so the immediate must be a repeating
14300 pattern. */
14301 if (immbits != (inst.operands[immoperand].regisimm ?
14302 inst.operands[immoperand].reg : 0))
14303 {
14304 /* Set immbits to an invalid constant. */
14305 immbits = 0xdeadbeef;
14306 }
14307 }
14308
14309 switch (opcode)
14310 {
14311 case N_MNEM_vbic:
14312 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14313 break;
14314
14315 case N_MNEM_vorr:
14316 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14317 break;
14318
14319 case N_MNEM_vand:
14320 /* Pseudo-instruction for VBIC. */
14321 neon_invert_size (&immbits, 0, et.size);
14322 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14323 break;
14324
14325 case N_MNEM_vorn:
14326 /* Pseudo-instruction for VORR. */
14327 neon_invert_size (&immbits, 0, et.size);
14328 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14329 break;
14330
14331 default:
14332 abort ();
14333 }
14334
14335 if (cmode == FAIL)
14336 return;
14337
14338 inst.instruction |= neon_quad (rs) << 6;
14339 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14340 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14341 inst.instruction |= cmode << 8;
14342 neon_write_immbits (immbits);
14343
14344 neon_dp_fixup (&inst);
14345 }
14346 }
14347
14348 static void
14349 do_neon_bitfield (void)
14350 {
14351 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14352 neon_check_type (3, rs, N_IGNORE_TYPE);
14353 neon_three_same (neon_quad (rs), 0, -1);
14354 }
14355
14356 static void
14357 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14358 unsigned destbits)
14359 {
14360 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14361 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14362 types | N_KEY);
14363 if (et.type == NT_float)
14364 {
14365 NEON_ENCODE (FLOAT, inst);
14366 neon_three_same (neon_quad (rs), 0, -1);
14367 }
14368 else
14369 {
14370 NEON_ENCODE (INTEGER, inst);
14371 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14372 }
14373 }
14374
14375 static void
14376 do_neon_dyadic_if_su (void)
14377 {
14378 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14379 }
14380
14381 static void
14382 do_neon_dyadic_if_su_d (void)
14383 {
14384 /* This version only allow D registers, but that constraint is enforced during
14385 operand parsing so we don't need to do anything extra here. */
14386 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14387 }
14388
14389 static void
14390 do_neon_dyadic_if_i_d (void)
14391 {
14392 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14393 affected if we specify unsigned args. */
14394 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14395 }
14396
14397 enum vfp_or_neon_is_neon_bits
14398 {
14399 NEON_CHECK_CC = 1,
14400 NEON_CHECK_ARCH = 2,
14401 NEON_CHECK_ARCH8 = 4
14402 };
14403
14404 /* Call this function if an instruction which may have belonged to the VFP or
14405 Neon instruction sets, but turned out to be a Neon instruction (due to the
14406 operand types involved, etc.). We have to check and/or fix-up a couple of
14407 things:
14408
14409 - Make sure the user hasn't attempted to make a Neon instruction
14410 conditional.
14411 - Alter the value in the condition code field if necessary.
14412 - Make sure that the arch supports Neon instructions.
14413
14414 Which of these operations take place depends on bits from enum
14415 vfp_or_neon_is_neon_bits.
14416
14417 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14418 current instruction's condition is COND_ALWAYS, the condition field is
14419 changed to inst.uncond_value. This is necessary because instructions shared
14420 between VFP and Neon may be conditional for the VFP variants only, and the
14421 unconditional Neon version must have, e.g., 0xF in the condition field. */
14422
14423 static int
14424 vfp_or_neon_is_neon (unsigned check)
14425 {
14426 /* Conditions are always legal in Thumb mode (IT blocks). */
14427 if (!thumb_mode && (check & NEON_CHECK_CC))
14428 {
14429 if (inst.cond != COND_ALWAYS)
14430 {
14431 first_error (_(BAD_COND));
14432 return FAIL;
14433 }
14434 if (inst.uncond_value != -1)
14435 inst.instruction |= inst.uncond_value << 28;
14436 }
14437
14438 if ((check & NEON_CHECK_ARCH)
14439 && !mark_feature_used (&fpu_neon_ext_v1))
14440 {
14441 first_error (_(BAD_FPU));
14442 return FAIL;
14443 }
14444
14445 if ((check & NEON_CHECK_ARCH8)
14446 && !mark_feature_used (&fpu_neon_ext_armv8))
14447 {
14448 first_error (_(BAD_FPU));
14449 return FAIL;
14450 }
14451
14452 return SUCCESS;
14453 }
14454
14455 static void
14456 do_neon_addsub_if_i (void)
14457 {
14458 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14459 return;
14460
14461 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14462 return;
14463
14464 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14465 affected if we specify unsigned args. */
14466 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14467 }
14468
14469 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14470 result to be:
14471 V<op> A,B (A is operand 0, B is operand 2)
14472 to mean:
14473 V<op> A,B,A
14474 not:
14475 V<op> A,B,B
14476 so handle that case specially. */
14477
14478 static void
14479 neon_exchange_operands (void)
14480 {
14481 void *scratch = alloca (sizeof (inst.operands[0]));
14482 if (inst.operands[1].present)
14483 {
14484 /* Swap operands[1] and operands[2]. */
14485 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14486 inst.operands[1] = inst.operands[2];
14487 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14488 }
14489 else
14490 {
14491 inst.operands[1] = inst.operands[2];
14492 inst.operands[2] = inst.operands[0];
14493 }
14494 }
14495
14496 static void
14497 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14498 {
14499 if (inst.operands[2].isreg)
14500 {
14501 if (invert)
14502 neon_exchange_operands ();
14503 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14504 }
14505 else
14506 {
14507 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14508 struct neon_type_el et = neon_check_type (2, rs,
14509 N_EQK | N_SIZ, immtypes | N_KEY);
14510
14511 NEON_ENCODE (IMMED, inst);
14512 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14513 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14514 inst.instruction |= LOW4 (inst.operands[1].reg);
14515 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14516 inst.instruction |= neon_quad (rs) << 6;
14517 inst.instruction |= (et.type == NT_float) << 10;
14518 inst.instruction |= neon_logbits (et.size) << 18;
14519
14520 neon_dp_fixup (&inst);
14521 }
14522 }
14523
14524 static void
14525 do_neon_cmp (void)
14526 {
14527 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14528 }
14529
14530 static void
14531 do_neon_cmp_inv (void)
14532 {
14533 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14534 }
14535
14536 static void
14537 do_neon_ceq (void)
14538 {
14539 neon_compare (N_IF_32, N_IF_32, FALSE);
14540 }
14541
14542 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14543 scalars, which are encoded in 5 bits, M : Rm.
14544 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14545 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14546 index in M. */
14547
14548 static unsigned
14549 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14550 {
14551 unsigned regno = NEON_SCALAR_REG (scalar);
14552 unsigned elno = NEON_SCALAR_INDEX (scalar);
14553
14554 switch (elsize)
14555 {
14556 case 16:
14557 if (regno > 7 || elno > 3)
14558 goto bad_scalar;
14559 return regno | (elno << 3);
14560
14561 case 32:
14562 if (regno > 15 || elno > 1)
14563 goto bad_scalar;
14564 return regno | (elno << 4);
14565
14566 default:
14567 bad_scalar:
14568 first_error (_("scalar out of range for multiply instruction"));
14569 }
14570
14571 return 0;
14572 }
14573
14574 /* Encode multiply / multiply-accumulate scalar instructions. */
14575
14576 static void
14577 neon_mul_mac (struct neon_type_el et, int ubit)
14578 {
14579 unsigned scalar;
14580
14581 /* Give a more helpful error message if we have an invalid type. */
14582 if (et.type == NT_invtype)
14583 return;
14584
14585 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14586 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14587 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14588 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14589 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14590 inst.instruction |= LOW4 (scalar);
14591 inst.instruction |= HI1 (scalar) << 5;
14592 inst.instruction |= (et.type == NT_float) << 8;
14593 inst.instruction |= neon_logbits (et.size) << 20;
14594 inst.instruction |= (ubit != 0) << 24;
14595
14596 neon_dp_fixup (&inst);
14597 }
14598
14599 static void
14600 do_neon_mac_maybe_scalar (void)
14601 {
14602 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14603 return;
14604
14605 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14606 return;
14607
14608 if (inst.operands[2].isscalar)
14609 {
14610 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14611 struct neon_type_el et = neon_check_type (3, rs,
14612 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14613 NEON_ENCODE (SCALAR, inst);
14614 neon_mul_mac (et, neon_quad (rs));
14615 }
14616 else
14617 {
14618 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14619 affected if we specify unsigned args. */
14620 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14621 }
14622 }
14623
14624 static void
14625 do_neon_fmac (void)
14626 {
14627 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14628 return;
14629
14630 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14631 return;
14632
14633 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14634 }
14635
14636 static void
14637 do_neon_tst (void)
14638 {
14639 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14640 struct neon_type_el et = neon_check_type (3, rs,
14641 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14642 neon_three_same (neon_quad (rs), 0, et.size);
14643 }
14644
14645 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14646 same types as the MAC equivalents. The polynomial type for this instruction
14647 is encoded the same as the integer type. */
14648
14649 static void
14650 do_neon_mul (void)
14651 {
14652 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14653 return;
14654
14655 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14656 return;
14657
14658 if (inst.operands[2].isscalar)
14659 do_neon_mac_maybe_scalar ();
14660 else
14661 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14662 }
14663
14664 static void
14665 do_neon_qdmulh (void)
14666 {
14667 if (inst.operands[2].isscalar)
14668 {
14669 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14670 struct neon_type_el et = neon_check_type (3, rs,
14671 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14672 NEON_ENCODE (SCALAR, inst);
14673 neon_mul_mac (et, neon_quad (rs));
14674 }
14675 else
14676 {
14677 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14678 struct neon_type_el et = neon_check_type (3, rs,
14679 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14680 NEON_ENCODE (INTEGER, inst);
14681 /* The U bit (rounding) comes from bit mask. */
14682 neon_three_same (neon_quad (rs), 0, et.size);
14683 }
14684 }
14685
14686 static void
14687 do_neon_fcmp_absolute (void)
14688 {
14689 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14690 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14691 /* Size field comes from bit mask. */
14692 neon_three_same (neon_quad (rs), 1, -1);
14693 }
14694
14695 static void
14696 do_neon_fcmp_absolute_inv (void)
14697 {
14698 neon_exchange_operands ();
14699 do_neon_fcmp_absolute ();
14700 }
14701
14702 static void
14703 do_neon_step (void)
14704 {
14705 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14706 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14707 neon_three_same (neon_quad (rs), 0, -1);
14708 }
14709
14710 static void
14711 do_neon_abs_neg (void)
14712 {
14713 enum neon_shape rs;
14714 struct neon_type_el et;
14715
14716 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14717 return;
14718
14719 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14720 return;
14721
14722 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14723 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14724
14725 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14726 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14727 inst.instruction |= LOW4 (inst.operands[1].reg);
14728 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14729 inst.instruction |= neon_quad (rs) << 6;
14730 inst.instruction |= (et.type == NT_float) << 10;
14731 inst.instruction |= neon_logbits (et.size) << 18;
14732
14733 neon_dp_fixup (&inst);
14734 }
14735
14736 static void
14737 do_neon_sli (void)
14738 {
14739 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14740 struct neon_type_el et = neon_check_type (2, rs,
14741 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14742 int imm = inst.operands[2].imm;
14743 constraint (imm < 0 || (unsigned)imm >= et.size,
14744 _("immediate out of range for insert"));
14745 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14746 }
14747
14748 static void
14749 do_neon_sri (void)
14750 {
14751 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14752 struct neon_type_el et = neon_check_type (2, rs,
14753 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14754 int imm = inst.operands[2].imm;
14755 constraint (imm < 1 || (unsigned)imm > et.size,
14756 _("immediate out of range for insert"));
14757 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14758 }
14759
14760 static void
14761 do_neon_qshlu_imm (void)
14762 {
14763 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14764 struct neon_type_el et = neon_check_type (2, rs,
14765 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14766 int imm = inst.operands[2].imm;
14767 constraint (imm < 0 || (unsigned)imm >= et.size,
14768 _("immediate out of range for shift"));
14769 /* Only encodes the 'U present' variant of the instruction.
14770 In this case, signed types have OP (bit 8) set to 0.
14771 Unsigned types have OP set to 1. */
14772 inst.instruction |= (et.type == NT_unsigned) << 8;
14773 /* The rest of the bits are the same as other immediate shifts. */
14774 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14775 }
14776
14777 static void
14778 do_neon_qmovn (void)
14779 {
14780 struct neon_type_el et = neon_check_type (2, NS_DQ,
14781 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14782 /* Saturating move where operands can be signed or unsigned, and the
14783 destination has the same signedness. */
14784 NEON_ENCODE (INTEGER, inst);
14785 if (et.type == NT_unsigned)
14786 inst.instruction |= 0xc0;
14787 else
14788 inst.instruction |= 0x80;
14789 neon_two_same (0, 1, et.size / 2);
14790 }
14791
14792 static void
14793 do_neon_qmovun (void)
14794 {
14795 struct neon_type_el et = neon_check_type (2, NS_DQ,
14796 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14797 /* Saturating move with unsigned results. Operands must be signed. */
14798 NEON_ENCODE (INTEGER, inst);
14799 neon_two_same (0, 1, et.size / 2);
14800 }
14801
14802 static void
14803 do_neon_rshift_sat_narrow (void)
14804 {
14805 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14806 or unsigned. If operands are unsigned, results must also be unsigned. */
14807 struct neon_type_el et = neon_check_type (2, NS_DQI,
14808 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14809 int imm = inst.operands[2].imm;
14810 /* This gets the bounds check, size encoding and immediate bits calculation
14811 right. */
14812 et.size /= 2;
14813
14814 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14815 VQMOVN.I<size> <Dd>, <Qm>. */
14816 if (imm == 0)
14817 {
14818 inst.operands[2].present = 0;
14819 inst.instruction = N_MNEM_vqmovn;
14820 do_neon_qmovn ();
14821 return;
14822 }
14823
14824 constraint (imm < 1 || (unsigned)imm > et.size,
14825 _("immediate out of range"));
14826 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14827 }
14828
14829 static void
14830 do_neon_rshift_sat_narrow_u (void)
14831 {
14832 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14833 or unsigned. If operands are unsigned, results must also be unsigned. */
14834 struct neon_type_el et = neon_check_type (2, NS_DQI,
14835 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14836 int imm = inst.operands[2].imm;
14837 /* This gets the bounds check, size encoding and immediate bits calculation
14838 right. */
14839 et.size /= 2;
14840
14841 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14842 VQMOVUN.I<size> <Dd>, <Qm>. */
14843 if (imm == 0)
14844 {
14845 inst.operands[2].present = 0;
14846 inst.instruction = N_MNEM_vqmovun;
14847 do_neon_qmovun ();
14848 return;
14849 }
14850
14851 constraint (imm < 1 || (unsigned)imm > et.size,
14852 _("immediate out of range"));
14853 /* FIXME: The manual is kind of unclear about what value U should have in
14854 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14855 must be 1. */
14856 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14857 }
14858
14859 static void
14860 do_neon_movn (void)
14861 {
14862 struct neon_type_el et = neon_check_type (2, NS_DQ,
14863 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14864 NEON_ENCODE (INTEGER, inst);
14865 neon_two_same (0, 1, et.size / 2);
14866 }
14867
14868 static void
14869 do_neon_rshift_narrow (void)
14870 {
14871 struct neon_type_el et = neon_check_type (2, NS_DQI,
14872 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14873 int imm = inst.operands[2].imm;
14874 /* This gets the bounds check, size encoding and immediate bits calculation
14875 right. */
14876 et.size /= 2;
14877
14878 /* If immediate is zero then we are a pseudo-instruction for
14879 VMOVN.I<size> <Dd>, <Qm> */
14880 if (imm == 0)
14881 {
14882 inst.operands[2].present = 0;
14883 inst.instruction = N_MNEM_vmovn;
14884 do_neon_movn ();
14885 return;
14886 }
14887
14888 constraint (imm < 1 || (unsigned)imm > et.size,
14889 _("immediate out of range for narrowing operation"));
14890 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14891 }
14892
14893 static void
14894 do_neon_shll (void)
14895 {
14896 /* FIXME: Type checking when lengthening. */
14897 struct neon_type_el et = neon_check_type (2, NS_QDI,
14898 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14899 unsigned imm = inst.operands[2].imm;
14900
14901 if (imm == et.size)
14902 {
14903 /* Maximum shift variant. */
14904 NEON_ENCODE (INTEGER, inst);
14905 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14906 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14907 inst.instruction |= LOW4 (inst.operands[1].reg);
14908 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14909 inst.instruction |= neon_logbits (et.size) << 18;
14910
14911 neon_dp_fixup (&inst);
14912 }
14913 else
14914 {
14915 /* A more-specific type check for non-max versions. */
14916 et = neon_check_type (2, NS_QDI,
14917 N_EQK | N_DBL, N_SU_32 | N_KEY);
14918 NEON_ENCODE (IMMED, inst);
14919 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14920 }
14921 }
14922
14923 /* Check the various types for the VCVT instruction, and return which version
14924 the current instruction is. */
14925
14926 #define CVT_FLAVOUR_VAR \
14927 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14928 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14929 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14930 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14931 /* Half-precision conversions. */ \
14932 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14933 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14934 /* VFP instructions. */ \
14935 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14936 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14937 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14938 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14939 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14940 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14941 /* VFP instructions with bitshift. */ \
14942 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14943 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14944 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14945 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14946 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14947 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14948 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14949 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14950
14951 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14952 neon_cvt_flavour_##C,
14953
14954 /* The different types of conversions we can do. */
14955 enum neon_cvt_flavour
14956 {
14957 CVT_FLAVOUR_VAR
14958 neon_cvt_flavour_invalid,
14959 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14960 };
14961
14962 #undef CVT_VAR
14963
14964 static enum neon_cvt_flavour
14965 get_neon_cvt_flavour (enum neon_shape rs)
14966 {
14967 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14968 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14969 if (et.type != NT_invtype) \
14970 { \
14971 inst.error = NULL; \
14972 return (neon_cvt_flavour_##C); \
14973 }
14974
14975 struct neon_type_el et;
14976 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14977 || rs == NS_FF) ? N_VFP : 0;
14978 /* The instruction versions which take an immediate take one register
14979 argument, which is extended to the width of the full register. Thus the
14980 "source" and "destination" registers must have the same width. Hack that
14981 here by making the size equal to the key (wider, in this case) operand. */
14982 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14983
14984 CVT_FLAVOUR_VAR;
14985
14986 return neon_cvt_flavour_invalid;
14987 #undef CVT_VAR
14988 }
14989
14990 enum neon_cvt_mode
14991 {
14992 neon_cvt_mode_a,
14993 neon_cvt_mode_n,
14994 neon_cvt_mode_p,
14995 neon_cvt_mode_m,
14996 neon_cvt_mode_z,
14997 neon_cvt_mode_x,
14998 neon_cvt_mode_r
14999 };
15000
15001 /* Neon-syntax VFP conversions. */
15002
15003 static void
15004 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15005 {
15006 const char *opname = 0;
15007
15008 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
15009 {
15010 /* Conversions with immediate bitshift. */
15011 const char *enc[] =
15012 {
15013 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15014 CVT_FLAVOUR_VAR
15015 NULL
15016 #undef CVT_VAR
15017 };
15018
15019 if (flavour < (int) ARRAY_SIZE (enc))
15020 {
15021 opname = enc[flavour];
15022 constraint (inst.operands[0].reg != inst.operands[1].reg,
15023 _("operands 0 and 1 must be the same register"));
15024 inst.operands[1] = inst.operands[2];
15025 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15026 }
15027 }
15028 else
15029 {
15030 /* Conversions without bitshift. */
15031 const char *enc[] =
15032 {
15033 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15034 CVT_FLAVOUR_VAR
15035 NULL
15036 #undef CVT_VAR
15037 };
15038
15039 if (flavour < (int) ARRAY_SIZE (enc))
15040 opname = enc[flavour];
15041 }
15042
15043 if (opname)
15044 do_vfp_nsyn_opcode (opname);
15045 }
15046
15047 static void
15048 do_vfp_nsyn_cvtz (void)
15049 {
15050 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15051 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15052 const char *enc[] =
15053 {
15054 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15055 CVT_FLAVOUR_VAR
15056 NULL
15057 #undef CVT_VAR
15058 };
15059
15060 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15061 do_vfp_nsyn_opcode (enc[flavour]);
15062 }
15063
15064 static void
15065 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15066 enum neon_cvt_mode mode)
15067 {
15068 int sz, op;
15069 int rm;
15070
15071 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15072 D register operands. */
15073 if (flavour == neon_cvt_flavour_s32_f64
15074 || flavour == neon_cvt_flavour_u32_f64)
15075 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15076 _(BAD_FPU));
15077
15078 set_it_insn_type (OUTSIDE_IT_INSN);
15079
15080 switch (flavour)
15081 {
15082 case neon_cvt_flavour_s32_f64:
15083 sz = 1;
15084 op = 1;
15085 break;
15086 case neon_cvt_flavour_s32_f32:
15087 sz = 0;
15088 op = 1;
15089 break;
15090 case neon_cvt_flavour_u32_f64:
15091 sz = 1;
15092 op = 0;
15093 break;
15094 case neon_cvt_flavour_u32_f32:
15095 sz = 0;
15096 op = 0;
15097 break;
15098 default:
15099 first_error (_("invalid instruction shape"));
15100 return;
15101 }
15102
15103 switch (mode)
15104 {
15105 case neon_cvt_mode_a: rm = 0; break;
15106 case neon_cvt_mode_n: rm = 1; break;
15107 case neon_cvt_mode_p: rm = 2; break;
15108 case neon_cvt_mode_m: rm = 3; break;
15109 default: first_error (_("invalid rounding mode")); return;
15110 }
15111
15112 NEON_ENCODE (FPV8, inst);
15113 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15114 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15115 inst.instruction |= sz << 8;
15116 inst.instruction |= op << 7;
15117 inst.instruction |= rm << 16;
15118 inst.instruction |= 0xf0000000;
15119 inst.is_neon = TRUE;
15120 }
15121
15122 static void
15123 do_neon_cvt_1 (enum neon_cvt_mode mode)
15124 {
15125 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15126 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15127 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15128
15129 /* PR11109: Handle round-to-zero for VCVT conversions. */
15130 if (mode == neon_cvt_mode_z
15131 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15132 && (flavour == neon_cvt_flavour_s32_f32
15133 || flavour == neon_cvt_flavour_u32_f32
15134 || flavour == neon_cvt_flavour_s32_f64
15135 || flavour == neon_cvt_flavour_u32_f64)
15136 && (rs == NS_FD || rs == NS_FF))
15137 {
15138 do_vfp_nsyn_cvtz ();
15139 return;
15140 }
15141
15142 /* VFP rather than Neon conversions. */
15143 if (flavour >= neon_cvt_flavour_first_fp)
15144 {
15145 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15146 do_vfp_nsyn_cvt (rs, flavour);
15147 else
15148 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15149
15150 return;
15151 }
15152
15153 switch (rs)
15154 {
15155 case NS_DDI:
15156 case NS_QQI:
15157 {
15158 unsigned immbits;
15159 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15160
15161 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15162 return;
15163
15164 /* Fixed-point conversion with #0 immediate is encoded as an
15165 integer conversion. */
15166 if (inst.operands[2].present && inst.operands[2].imm == 0)
15167 goto int_encode;
15168 immbits = 32 - inst.operands[2].imm;
15169 NEON_ENCODE (IMMED, inst);
15170 if (flavour != neon_cvt_flavour_invalid)
15171 inst.instruction |= enctab[flavour];
15172 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15173 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15174 inst.instruction |= LOW4 (inst.operands[1].reg);
15175 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15176 inst.instruction |= neon_quad (rs) << 6;
15177 inst.instruction |= 1 << 21;
15178 inst.instruction |= immbits << 16;
15179
15180 neon_dp_fixup (&inst);
15181 }
15182 break;
15183
15184 case NS_DD:
15185 case NS_QQ:
15186 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15187 {
15188 NEON_ENCODE (FLOAT, inst);
15189 set_it_insn_type (OUTSIDE_IT_INSN);
15190
15191 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15192 return;
15193
15194 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15195 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15196 inst.instruction |= LOW4 (inst.operands[1].reg);
15197 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15198 inst.instruction |= neon_quad (rs) << 6;
15199 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15200 inst.instruction |= mode << 8;
15201 if (thumb_mode)
15202 inst.instruction |= 0xfc000000;
15203 else
15204 inst.instruction |= 0xf0000000;
15205 }
15206 else
15207 {
15208 int_encode:
15209 {
15210 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15211
15212 NEON_ENCODE (INTEGER, inst);
15213
15214 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15215 return;
15216
15217 if (flavour != neon_cvt_flavour_invalid)
15218 inst.instruction |= enctab[flavour];
15219
15220 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15221 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15222 inst.instruction |= LOW4 (inst.operands[1].reg);
15223 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15224 inst.instruction |= neon_quad (rs) << 6;
15225 inst.instruction |= 2 << 18;
15226
15227 neon_dp_fixup (&inst);
15228 }
15229 }
15230 break;
15231
15232 /* Half-precision conversions for Advanced SIMD -- neon. */
15233 case NS_QD:
15234 case NS_DQ:
15235
15236 if ((rs == NS_DQ)
15237 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15238 {
15239 as_bad (_("operand size must match register width"));
15240 break;
15241 }
15242
15243 if ((rs == NS_QD)
15244 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15245 {
15246 as_bad (_("operand size must match register width"));
15247 break;
15248 }
15249
15250 if (rs == NS_DQ)
15251 inst.instruction = 0x3b60600;
15252 else
15253 inst.instruction = 0x3b60700;
15254
15255 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15256 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15257 inst.instruction |= LOW4 (inst.operands[1].reg);
15258 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15259 neon_dp_fixup (&inst);
15260 break;
15261
15262 default:
15263 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15264 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15265 do_vfp_nsyn_cvt (rs, flavour);
15266 else
15267 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15268 }
15269 }
15270
15271 static void
15272 do_neon_cvtr (void)
15273 {
15274 do_neon_cvt_1 (neon_cvt_mode_x);
15275 }
15276
15277 static void
15278 do_neon_cvt (void)
15279 {
15280 do_neon_cvt_1 (neon_cvt_mode_z);
15281 }
15282
15283 static void
15284 do_neon_cvta (void)
15285 {
15286 do_neon_cvt_1 (neon_cvt_mode_a);
15287 }
15288
15289 static void
15290 do_neon_cvtn (void)
15291 {
15292 do_neon_cvt_1 (neon_cvt_mode_n);
15293 }
15294
15295 static void
15296 do_neon_cvtp (void)
15297 {
15298 do_neon_cvt_1 (neon_cvt_mode_p);
15299 }
15300
15301 static void
15302 do_neon_cvtm (void)
15303 {
15304 do_neon_cvt_1 (neon_cvt_mode_m);
15305 }
15306
15307 static void
15308 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15309 {
15310 if (is_double)
15311 mark_feature_used (&fpu_vfp_ext_armv8);
15312
15313 encode_arm_vfp_reg (inst.operands[0].reg,
15314 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15315 encode_arm_vfp_reg (inst.operands[1].reg,
15316 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15317 inst.instruction |= to ? 0x10000 : 0;
15318 inst.instruction |= t ? 0x80 : 0;
15319 inst.instruction |= is_double ? 0x100 : 0;
15320 do_vfp_cond_or_thumb ();
15321 }
15322
15323 static void
15324 do_neon_cvttb_1 (bfd_boolean t)
15325 {
15326 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15327
15328 if (rs == NS_NULL)
15329 return;
15330 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15331 {
15332 inst.error = NULL;
15333 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15334 }
15335 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15336 {
15337 inst.error = NULL;
15338 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15339 }
15340 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15341 {
15342 /* The VCVTB and VCVTT instructions with D-register operands
15343 don't work for SP only targets. */
15344 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15345 _(BAD_FPU));
15346
15347 inst.error = NULL;
15348 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15349 }
15350 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15351 {
15352 /* The VCVTB and VCVTT instructions with D-register operands
15353 don't work for SP only targets. */
15354 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15355 _(BAD_FPU));
15356
15357 inst.error = NULL;
15358 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15359 }
15360 else
15361 return;
15362 }
15363
15364 static void
15365 do_neon_cvtb (void)
15366 {
15367 do_neon_cvttb_1 (FALSE);
15368 }
15369
15370
15371 static void
15372 do_neon_cvtt (void)
15373 {
15374 do_neon_cvttb_1 (TRUE);
15375 }
15376
15377 static void
15378 neon_move_immediate (void)
15379 {
15380 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15381 struct neon_type_el et = neon_check_type (2, rs,
15382 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15383 unsigned immlo, immhi = 0, immbits;
15384 int op, cmode, float_p;
15385
15386 constraint (et.type == NT_invtype,
15387 _("operand size must be specified for immediate VMOV"));
15388
15389 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15390 op = (inst.instruction & (1 << 5)) != 0;
15391
15392 immlo = inst.operands[1].imm;
15393 if (inst.operands[1].regisimm)
15394 immhi = inst.operands[1].reg;
15395
15396 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15397 _("immediate has bits set outside the operand size"));
15398
15399 float_p = inst.operands[1].immisfloat;
15400
15401 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15402 et.size, et.type)) == FAIL)
15403 {
15404 /* Invert relevant bits only. */
15405 neon_invert_size (&immlo, &immhi, et.size);
15406 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15407 with one or the other; those cases are caught by
15408 neon_cmode_for_move_imm. */
15409 op = !op;
15410 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15411 &op, et.size, et.type)) == FAIL)
15412 {
15413 first_error (_("immediate out of range"));
15414 return;
15415 }
15416 }
15417
15418 inst.instruction &= ~(1 << 5);
15419 inst.instruction |= op << 5;
15420
15421 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15422 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15423 inst.instruction |= neon_quad (rs) << 6;
15424 inst.instruction |= cmode << 8;
15425
15426 neon_write_immbits (immbits);
15427 }
15428
15429 static void
15430 do_neon_mvn (void)
15431 {
15432 if (inst.operands[1].isreg)
15433 {
15434 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15435
15436 NEON_ENCODE (INTEGER, inst);
15437 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15438 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15439 inst.instruction |= LOW4 (inst.operands[1].reg);
15440 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15441 inst.instruction |= neon_quad (rs) << 6;
15442 }
15443 else
15444 {
15445 NEON_ENCODE (IMMED, inst);
15446 neon_move_immediate ();
15447 }
15448
15449 neon_dp_fixup (&inst);
15450 }
15451
15452 /* Encode instructions of form:
15453
15454 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15455 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15456
15457 static void
15458 neon_mixed_length (struct neon_type_el et, unsigned size)
15459 {
15460 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15461 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15462 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15463 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15464 inst.instruction |= LOW4 (inst.operands[2].reg);
15465 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15466 inst.instruction |= (et.type == NT_unsigned) << 24;
15467 inst.instruction |= neon_logbits (size) << 20;
15468
15469 neon_dp_fixup (&inst);
15470 }
15471
15472 static void
15473 do_neon_dyadic_long (void)
15474 {
15475 /* FIXME: Type checking for lengthening op. */
15476 struct neon_type_el et = neon_check_type (3, NS_QDD,
15477 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15478 neon_mixed_length (et, et.size);
15479 }
15480
15481 static void
15482 do_neon_abal (void)
15483 {
15484 struct neon_type_el et = neon_check_type (3, NS_QDD,
15485 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15486 neon_mixed_length (et, et.size);
15487 }
15488
15489 static void
15490 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15491 {
15492 if (inst.operands[2].isscalar)
15493 {
15494 struct neon_type_el et = neon_check_type (3, NS_QDS,
15495 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15496 NEON_ENCODE (SCALAR, inst);
15497 neon_mul_mac (et, et.type == NT_unsigned);
15498 }
15499 else
15500 {
15501 struct neon_type_el et = neon_check_type (3, NS_QDD,
15502 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15503 NEON_ENCODE (INTEGER, inst);
15504 neon_mixed_length (et, et.size);
15505 }
15506 }
15507
15508 static void
15509 do_neon_mac_maybe_scalar_long (void)
15510 {
15511 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15512 }
15513
15514 static void
15515 do_neon_dyadic_wide (void)
15516 {
15517 struct neon_type_el et = neon_check_type (3, NS_QQD,
15518 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15519 neon_mixed_length (et, et.size);
15520 }
15521
15522 static void
15523 do_neon_dyadic_narrow (void)
15524 {
15525 struct neon_type_el et = neon_check_type (3, NS_QDD,
15526 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15527 /* Operand sign is unimportant, and the U bit is part of the opcode,
15528 so force the operand type to integer. */
15529 et.type = NT_integer;
15530 neon_mixed_length (et, et.size / 2);
15531 }
15532
15533 static void
15534 do_neon_mul_sat_scalar_long (void)
15535 {
15536 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15537 }
15538
15539 static void
15540 do_neon_vmull (void)
15541 {
15542 if (inst.operands[2].isscalar)
15543 do_neon_mac_maybe_scalar_long ();
15544 else
15545 {
15546 struct neon_type_el et = neon_check_type (3, NS_QDD,
15547 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15548
15549 if (et.type == NT_poly)
15550 NEON_ENCODE (POLY, inst);
15551 else
15552 NEON_ENCODE (INTEGER, inst);
15553
15554 /* For polynomial encoding the U bit must be zero, and the size must
15555 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15556 obviously, as 0b10). */
15557 if (et.size == 64)
15558 {
15559 /* Check we're on the correct architecture. */
15560 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15561 inst.error =
15562 _("Instruction form not available on this architecture.");
15563
15564 et.size = 32;
15565 }
15566
15567 neon_mixed_length (et, et.size);
15568 }
15569 }
15570
15571 static void
15572 do_neon_ext (void)
15573 {
15574 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15575 struct neon_type_el et = neon_check_type (3, rs,
15576 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15577 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15578
15579 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15580 _("shift out of range"));
15581 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15582 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15583 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15584 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15585 inst.instruction |= LOW4 (inst.operands[2].reg);
15586 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15587 inst.instruction |= neon_quad (rs) << 6;
15588 inst.instruction |= imm << 8;
15589
15590 neon_dp_fixup (&inst);
15591 }
15592
15593 static void
15594 do_neon_rev (void)
15595 {
15596 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15597 struct neon_type_el et = neon_check_type (2, rs,
15598 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15599 unsigned op = (inst.instruction >> 7) & 3;
15600 /* N (width of reversed regions) is encoded as part of the bitmask. We
15601 extract it here to check the elements to be reversed are smaller.
15602 Otherwise we'd get a reserved instruction. */
15603 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15604 gas_assert (elsize != 0);
15605 constraint (et.size >= elsize,
15606 _("elements must be smaller than reversal region"));
15607 neon_two_same (neon_quad (rs), 1, et.size);
15608 }
15609
15610 static void
15611 do_neon_dup (void)
15612 {
15613 if (inst.operands[1].isscalar)
15614 {
15615 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15616 struct neon_type_el et = neon_check_type (2, rs,
15617 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15618 unsigned sizebits = et.size >> 3;
15619 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15620 int logsize = neon_logbits (et.size);
15621 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15622
15623 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15624 return;
15625
15626 NEON_ENCODE (SCALAR, inst);
15627 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15628 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15629 inst.instruction |= LOW4 (dm);
15630 inst.instruction |= HI1 (dm) << 5;
15631 inst.instruction |= neon_quad (rs) << 6;
15632 inst.instruction |= x << 17;
15633 inst.instruction |= sizebits << 16;
15634
15635 neon_dp_fixup (&inst);
15636 }
15637 else
15638 {
15639 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15640 struct neon_type_el et = neon_check_type (2, rs,
15641 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15642 /* Duplicate ARM register to lanes of vector. */
15643 NEON_ENCODE (ARMREG, inst);
15644 switch (et.size)
15645 {
15646 case 8: inst.instruction |= 0x400000; break;
15647 case 16: inst.instruction |= 0x000020; break;
15648 case 32: inst.instruction |= 0x000000; break;
15649 default: break;
15650 }
15651 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15652 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15653 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15654 inst.instruction |= neon_quad (rs) << 21;
15655 /* The encoding for this instruction is identical for the ARM and Thumb
15656 variants, except for the condition field. */
15657 do_vfp_cond_or_thumb ();
15658 }
15659 }
15660
15661 /* VMOV has particularly many variations. It can be one of:
15662 0. VMOV<c><q> <Qd>, <Qm>
15663 1. VMOV<c><q> <Dd>, <Dm>
15664 (Register operations, which are VORR with Rm = Rn.)
15665 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15666 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15667 (Immediate loads.)
15668 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15669 (ARM register to scalar.)
15670 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15671 (Two ARM registers to vector.)
15672 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15673 (Scalar to ARM register.)
15674 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15675 (Vector to two ARM registers.)
15676 8. VMOV.F32 <Sd>, <Sm>
15677 9. VMOV.F64 <Dd>, <Dm>
15678 (VFP register moves.)
15679 10. VMOV.F32 <Sd>, #imm
15680 11. VMOV.F64 <Dd>, #imm
15681 (VFP float immediate load.)
15682 12. VMOV <Rd>, <Sm>
15683 (VFP single to ARM reg.)
15684 13. VMOV <Sd>, <Rm>
15685 (ARM reg to VFP single.)
15686 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15687 (Two ARM regs to two VFP singles.)
15688 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15689 (Two VFP singles to two ARM regs.)
15690
15691 These cases can be disambiguated using neon_select_shape, except cases 1/9
15692 and 3/11 which depend on the operand type too.
15693
15694 All the encoded bits are hardcoded by this function.
15695
15696 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15697 Cases 5, 7 may be used with VFPv2 and above.
15698
15699 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15700 can specify a type where it doesn't make sense to, and is ignored). */
15701
15702 static void
15703 do_neon_mov (void)
15704 {
15705 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15706 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15707 NS_NULL);
15708 struct neon_type_el et;
15709 const char *ldconst = 0;
15710
15711 switch (rs)
15712 {
15713 case NS_DD: /* case 1/9. */
15714 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15715 /* It is not an error here if no type is given. */
15716 inst.error = NULL;
15717 if (et.type == NT_float && et.size == 64)
15718 {
15719 do_vfp_nsyn_opcode ("fcpyd");
15720 break;
15721 }
15722 /* fall through. */
15723
15724 case NS_QQ: /* case 0/1. */
15725 {
15726 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15727 return;
15728 /* The architecture manual I have doesn't explicitly state which
15729 value the U bit should have for register->register moves, but
15730 the equivalent VORR instruction has U = 0, so do that. */
15731 inst.instruction = 0x0200110;
15732 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15733 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15734 inst.instruction |= LOW4 (inst.operands[1].reg);
15735 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15736 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15737 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15738 inst.instruction |= neon_quad (rs) << 6;
15739
15740 neon_dp_fixup (&inst);
15741 }
15742 break;
15743
15744 case NS_DI: /* case 3/11. */
15745 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15746 inst.error = NULL;
15747 if (et.type == NT_float && et.size == 64)
15748 {
15749 /* case 11 (fconstd). */
15750 ldconst = "fconstd";
15751 goto encode_fconstd;
15752 }
15753 /* fall through. */
15754
15755 case NS_QI: /* case 2/3. */
15756 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15757 return;
15758 inst.instruction = 0x0800010;
15759 neon_move_immediate ();
15760 neon_dp_fixup (&inst);
15761 break;
15762
15763 case NS_SR: /* case 4. */
15764 {
15765 unsigned bcdebits = 0;
15766 int logsize;
15767 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15768 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15769
15770 /* .<size> is optional here, defaulting to .32. */
15771 if (inst.vectype.elems == 0
15772 && inst.operands[0].vectype.type == NT_invtype
15773 && inst.operands[1].vectype.type == NT_invtype)
15774 {
15775 inst.vectype.el[0].type = NT_untyped;
15776 inst.vectype.el[0].size = 32;
15777 inst.vectype.elems = 1;
15778 }
15779
15780 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15781 logsize = neon_logbits (et.size);
15782
15783 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15784 _(BAD_FPU));
15785 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15786 && et.size != 32, _(BAD_FPU));
15787 constraint (et.type == NT_invtype, _("bad type for scalar"));
15788 constraint (x >= 64 / et.size, _("scalar index out of range"));
15789
15790 switch (et.size)
15791 {
15792 case 8: bcdebits = 0x8; break;
15793 case 16: bcdebits = 0x1; break;
15794 case 32: bcdebits = 0x0; break;
15795 default: ;
15796 }
15797
15798 bcdebits |= x << logsize;
15799
15800 inst.instruction = 0xe000b10;
15801 do_vfp_cond_or_thumb ();
15802 inst.instruction |= LOW4 (dn) << 16;
15803 inst.instruction |= HI1 (dn) << 7;
15804 inst.instruction |= inst.operands[1].reg << 12;
15805 inst.instruction |= (bcdebits & 3) << 5;
15806 inst.instruction |= (bcdebits >> 2) << 21;
15807 }
15808 break;
15809
15810 case NS_DRR: /* case 5 (fmdrr). */
15811 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15812 _(BAD_FPU));
15813
15814 inst.instruction = 0xc400b10;
15815 do_vfp_cond_or_thumb ();
15816 inst.instruction |= LOW4 (inst.operands[0].reg);
15817 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15818 inst.instruction |= inst.operands[1].reg << 12;
15819 inst.instruction |= inst.operands[2].reg << 16;
15820 break;
15821
15822 case NS_RS: /* case 6. */
15823 {
15824 unsigned logsize;
15825 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15826 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15827 unsigned abcdebits = 0;
15828
15829 /* .<dt> is optional here, defaulting to .32. */
15830 if (inst.vectype.elems == 0
15831 && inst.operands[0].vectype.type == NT_invtype
15832 && inst.operands[1].vectype.type == NT_invtype)
15833 {
15834 inst.vectype.el[0].type = NT_untyped;
15835 inst.vectype.el[0].size = 32;
15836 inst.vectype.elems = 1;
15837 }
15838
15839 et = neon_check_type (2, NS_NULL,
15840 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15841 logsize = neon_logbits (et.size);
15842
15843 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15844 _(BAD_FPU));
15845 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15846 && et.size != 32, _(BAD_FPU));
15847 constraint (et.type == NT_invtype, _("bad type for scalar"));
15848 constraint (x >= 64 / et.size, _("scalar index out of range"));
15849
15850 switch (et.size)
15851 {
15852 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15853 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15854 case 32: abcdebits = 0x00; break;
15855 default: ;
15856 }
15857
15858 abcdebits |= x << logsize;
15859 inst.instruction = 0xe100b10;
15860 do_vfp_cond_or_thumb ();
15861 inst.instruction |= LOW4 (dn) << 16;
15862 inst.instruction |= HI1 (dn) << 7;
15863 inst.instruction |= inst.operands[0].reg << 12;
15864 inst.instruction |= (abcdebits & 3) << 5;
15865 inst.instruction |= (abcdebits >> 2) << 21;
15866 }
15867 break;
15868
15869 case NS_RRD: /* case 7 (fmrrd). */
15870 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15871 _(BAD_FPU));
15872
15873 inst.instruction = 0xc500b10;
15874 do_vfp_cond_or_thumb ();
15875 inst.instruction |= inst.operands[0].reg << 12;
15876 inst.instruction |= inst.operands[1].reg << 16;
15877 inst.instruction |= LOW4 (inst.operands[2].reg);
15878 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15879 break;
15880
15881 case NS_FF: /* case 8 (fcpys). */
15882 do_vfp_nsyn_opcode ("fcpys");
15883 break;
15884
15885 case NS_FI: /* case 10 (fconsts). */
15886 ldconst = "fconsts";
15887 encode_fconstd:
15888 if (is_quarter_float (inst.operands[1].imm))
15889 {
15890 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15891 do_vfp_nsyn_opcode (ldconst);
15892 }
15893 else
15894 first_error (_("immediate out of range"));
15895 break;
15896
15897 case NS_RF: /* case 12 (fmrs). */
15898 do_vfp_nsyn_opcode ("fmrs");
15899 break;
15900
15901 case NS_FR: /* case 13 (fmsr). */
15902 do_vfp_nsyn_opcode ("fmsr");
15903 break;
15904
15905 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15906 (one of which is a list), but we have parsed four. Do some fiddling to
15907 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15908 expect. */
15909 case NS_RRFF: /* case 14 (fmrrs). */
15910 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15911 _("VFP registers must be adjacent"));
15912 inst.operands[2].imm = 2;
15913 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15914 do_vfp_nsyn_opcode ("fmrrs");
15915 break;
15916
15917 case NS_FFRR: /* case 15 (fmsrr). */
15918 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15919 _("VFP registers must be adjacent"));
15920 inst.operands[1] = inst.operands[2];
15921 inst.operands[2] = inst.operands[3];
15922 inst.operands[0].imm = 2;
15923 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15924 do_vfp_nsyn_opcode ("fmsrr");
15925 break;
15926
15927 case NS_NULL:
15928 /* neon_select_shape has determined that the instruction
15929 shape is wrong and has already set the error message. */
15930 break;
15931
15932 default:
15933 abort ();
15934 }
15935 }
15936
15937 static void
15938 do_neon_rshift_round_imm (void)
15939 {
15940 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15941 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15942 int imm = inst.operands[2].imm;
15943
15944 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15945 if (imm == 0)
15946 {
15947 inst.operands[2].present = 0;
15948 do_neon_mov ();
15949 return;
15950 }
15951
15952 constraint (imm < 1 || (unsigned)imm > et.size,
15953 _("immediate out of range for shift"));
15954 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15955 et.size - imm);
15956 }
15957
15958 static void
15959 do_neon_movl (void)
15960 {
15961 struct neon_type_el et = neon_check_type (2, NS_QD,
15962 N_EQK | N_DBL, N_SU_32 | N_KEY);
15963 unsigned sizebits = et.size >> 3;
15964 inst.instruction |= sizebits << 19;
15965 neon_two_same (0, et.type == NT_unsigned, -1);
15966 }
15967
15968 static void
15969 do_neon_trn (void)
15970 {
15971 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15972 struct neon_type_el et = neon_check_type (2, rs,
15973 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15974 NEON_ENCODE (INTEGER, inst);
15975 neon_two_same (neon_quad (rs), 1, et.size);
15976 }
15977
15978 static void
15979 do_neon_zip_uzp (void)
15980 {
15981 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15982 struct neon_type_el et = neon_check_type (2, rs,
15983 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15984 if (rs == NS_DD && et.size == 32)
15985 {
15986 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15987 inst.instruction = N_MNEM_vtrn;
15988 do_neon_trn ();
15989 return;
15990 }
15991 neon_two_same (neon_quad (rs), 1, et.size);
15992 }
15993
15994 static void
15995 do_neon_sat_abs_neg (void)
15996 {
15997 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15998 struct neon_type_el et = neon_check_type (2, rs,
15999 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16000 neon_two_same (neon_quad (rs), 1, et.size);
16001 }
16002
16003 static void
16004 do_neon_pair_long (void)
16005 {
16006 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16007 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16008 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16009 inst.instruction |= (et.type == NT_unsigned) << 7;
16010 neon_two_same (neon_quad (rs), 1, et.size);
16011 }
16012
16013 static void
16014 do_neon_recip_est (void)
16015 {
16016 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16017 struct neon_type_el et = neon_check_type (2, rs,
16018 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16019 inst.instruction |= (et.type == NT_float) << 8;
16020 neon_two_same (neon_quad (rs), 1, et.size);
16021 }
16022
16023 static void
16024 do_neon_cls (void)
16025 {
16026 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16027 struct neon_type_el et = neon_check_type (2, rs,
16028 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16029 neon_two_same (neon_quad (rs), 1, et.size);
16030 }
16031
16032 static void
16033 do_neon_clz (void)
16034 {
16035 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16036 struct neon_type_el et = neon_check_type (2, rs,
16037 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16038 neon_two_same (neon_quad (rs), 1, et.size);
16039 }
16040
16041 static void
16042 do_neon_cnt (void)
16043 {
16044 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16045 struct neon_type_el et = neon_check_type (2, rs,
16046 N_EQK | N_INT, N_8 | N_KEY);
16047 neon_two_same (neon_quad (rs), 1, et.size);
16048 }
16049
16050 static void
16051 do_neon_swp (void)
16052 {
16053 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16054 neon_two_same (neon_quad (rs), 1, -1);
16055 }
16056
16057 static void
16058 do_neon_tbl_tbx (void)
16059 {
16060 unsigned listlenbits;
16061 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16062
16063 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16064 {
16065 first_error (_("bad list length for table lookup"));
16066 return;
16067 }
16068
16069 listlenbits = inst.operands[1].imm - 1;
16070 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16071 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16072 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16073 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16074 inst.instruction |= LOW4 (inst.operands[2].reg);
16075 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16076 inst.instruction |= listlenbits << 8;
16077
16078 neon_dp_fixup (&inst);
16079 }
16080
16081 static void
16082 do_neon_ldm_stm (void)
16083 {
16084 /* P, U and L bits are part of bitmask. */
16085 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16086 unsigned offsetbits = inst.operands[1].imm * 2;
16087
16088 if (inst.operands[1].issingle)
16089 {
16090 do_vfp_nsyn_ldm_stm (is_dbmode);
16091 return;
16092 }
16093
16094 constraint (is_dbmode && !inst.operands[0].writeback,
16095 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16096
16097 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16098 _("register list must contain at least 1 and at most 16 "
16099 "registers"));
16100
16101 inst.instruction |= inst.operands[0].reg << 16;
16102 inst.instruction |= inst.operands[0].writeback << 21;
16103 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16104 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16105
16106 inst.instruction |= offsetbits;
16107
16108 do_vfp_cond_or_thumb ();
16109 }
16110
16111 static void
16112 do_neon_ldr_str (void)
16113 {
16114 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16115
16116 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16117 And is UNPREDICTABLE in thumb mode. */
16118 if (!is_ldr
16119 && inst.operands[1].reg == REG_PC
16120 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16121 {
16122 if (thumb_mode)
16123 inst.error = _("Use of PC here is UNPREDICTABLE");
16124 else if (warn_on_deprecated)
16125 as_tsktsk (_("Use of PC here is deprecated"));
16126 }
16127
16128 if (inst.operands[0].issingle)
16129 {
16130 if (is_ldr)
16131 do_vfp_nsyn_opcode ("flds");
16132 else
16133 do_vfp_nsyn_opcode ("fsts");
16134 }
16135 else
16136 {
16137 if (is_ldr)
16138 do_vfp_nsyn_opcode ("fldd");
16139 else
16140 do_vfp_nsyn_opcode ("fstd");
16141 }
16142 }
16143
16144 /* "interleave" version also handles non-interleaving register VLD1/VST1
16145 instructions. */
16146
16147 static void
16148 do_neon_ld_st_interleave (void)
16149 {
16150 struct neon_type_el et = neon_check_type (1, NS_NULL,
16151 N_8 | N_16 | N_32 | N_64);
16152 unsigned alignbits = 0;
16153 unsigned idx;
16154 /* The bits in this table go:
16155 0: register stride of one (0) or two (1)
16156 1,2: register list length, minus one (1, 2, 3, 4).
16157 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16158 We use -1 for invalid entries. */
16159 const int typetable[] =
16160 {
16161 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16162 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16163 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16164 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16165 };
16166 int typebits;
16167
16168 if (et.type == NT_invtype)
16169 return;
16170
16171 if (inst.operands[1].immisalign)
16172 switch (inst.operands[1].imm >> 8)
16173 {
16174 case 64: alignbits = 1; break;
16175 case 128:
16176 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16177 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16178 goto bad_alignment;
16179 alignbits = 2;
16180 break;
16181 case 256:
16182 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16183 goto bad_alignment;
16184 alignbits = 3;
16185 break;
16186 default:
16187 bad_alignment:
16188 first_error (_("bad alignment"));
16189 return;
16190 }
16191
16192 inst.instruction |= alignbits << 4;
16193 inst.instruction |= neon_logbits (et.size) << 6;
16194
16195 /* Bits [4:6] of the immediate in a list specifier encode register stride
16196 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16197 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16198 up the right value for "type" in a table based on this value and the given
16199 list style, then stick it back. */
16200 idx = ((inst.operands[0].imm >> 4) & 7)
16201 | (((inst.instruction >> 8) & 3) << 3);
16202
16203 typebits = typetable[idx];
16204
16205 constraint (typebits == -1, _("bad list type for instruction"));
16206 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16207 _("bad element type for instruction"));
16208
16209 inst.instruction &= ~0xf00;
16210 inst.instruction |= typebits << 8;
16211 }
16212
16213 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16214 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16215 otherwise. The variable arguments are a list of pairs of legal (size, align)
16216 values, terminated with -1. */
16217
16218 static int
16219 neon_alignment_bit (int size, int align, int *do_align, ...)
16220 {
16221 va_list ap;
16222 int result = FAIL, thissize, thisalign;
16223
16224 if (!inst.operands[1].immisalign)
16225 {
16226 *do_align = 0;
16227 return SUCCESS;
16228 }
16229
16230 va_start (ap, do_align);
16231
16232 do
16233 {
16234 thissize = va_arg (ap, int);
16235 if (thissize == -1)
16236 break;
16237 thisalign = va_arg (ap, int);
16238
16239 if (size == thissize && align == thisalign)
16240 result = SUCCESS;
16241 }
16242 while (result != SUCCESS);
16243
16244 va_end (ap);
16245
16246 if (result == SUCCESS)
16247 *do_align = 1;
16248 else
16249 first_error (_("unsupported alignment for instruction"));
16250
16251 return result;
16252 }
16253
16254 static void
16255 do_neon_ld_st_lane (void)
16256 {
16257 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16258 int align_good, do_align = 0;
16259 int logsize = neon_logbits (et.size);
16260 int align = inst.operands[1].imm >> 8;
16261 int n = (inst.instruction >> 8) & 3;
16262 int max_el = 64 / et.size;
16263
16264 if (et.type == NT_invtype)
16265 return;
16266
16267 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16268 _("bad list length"));
16269 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16270 _("scalar index out of range"));
16271 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16272 && et.size == 8,
16273 _("stride of 2 unavailable when element size is 8"));
16274
16275 switch (n)
16276 {
16277 case 0: /* VLD1 / VST1. */
16278 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16279 32, 32, -1);
16280 if (align_good == FAIL)
16281 return;
16282 if (do_align)
16283 {
16284 unsigned alignbits = 0;
16285 switch (et.size)
16286 {
16287 case 16: alignbits = 0x1; break;
16288 case 32: alignbits = 0x3; break;
16289 default: ;
16290 }
16291 inst.instruction |= alignbits << 4;
16292 }
16293 break;
16294
16295 case 1: /* VLD2 / VST2. */
16296 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16297 32, 64, -1);
16298 if (align_good == FAIL)
16299 return;
16300 if (do_align)
16301 inst.instruction |= 1 << 4;
16302 break;
16303
16304 case 2: /* VLD3 / VST3. */
16305 constraint (inst.operands[1].immisalign,
16306 _("can't use alignment with this instruction"));
16307 break;
16308
16309 case 3: /* VLD4 / VST4. */
16310 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16311 16, 64, 32, 64, 32, 128, -1);
16312 if (align_good == FAIL)
16313 return;
16314 if (do_align)
16315 {
16316 unsigned alignbits = 0;
16317 switch (et.size)
16318 {
16319 case 8: alignbits = 0x1; break;
16320 case 16: alignbits = 0x1; break;
16321 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16322 default: ;
16323 }
16324 inst.instruction |= alignbits << 4;
16325 }
16326 break;
16327
16328 default: ;
16329 }
16330
16331 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16332 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16333 inst.instruction |= 1 << (4 + logsize);
16334
16335 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16336 inst.instruction |= logsize << 10;
16337 }
16338
16339 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16340
16341 static void
16342 do_neon_ld_dup (void)
16343 {
16344 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16345 int align_good, do_align = 0;
16346
16347 if (et.type == NT_invtype)
16348 return;
16349
16350 switch ((inst.instruction >> 8) & 3)
16351 {
16352 case 0: /* VLD1. */
16353 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16354 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16355 &do_align, 16, 16, 32, 32, -1);
16356 if (align_good == FAIL)
16357 return;
16358 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16359 {
16360 case 1: break;
16361 case 2: inst.instruction |= 1 << 5; break;
16362 default: first_error (_("bad list length")); return;
16363 }
16364 inst.instruction |= neon_logbits (et.size) << 6;
16365 break;
16366
16367 case 1: /* VLD2. */
16368 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16369 &do_align, 8, 16, 16, 32, 32, 64, -1);
16370 if (align_good == FAIL)
16371 return;
16372 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16373 _("bad list length"));
16374 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16375 inst.instruction |= 1 << 5;
16376 inst.instruction |= neon_logbits (et.size) << 6;
16377 break;
16378
16379 case 2: /* VLD3. */
16380 constraint (inst.operands[1].immisalign,
16381 _("can't use alignment with this instruction"));
16382 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16383 _("bad list length"));
16384 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16385 inst.instruction |= 1 << 5;
16386 inst.instruction |= neon_logbits (et.size) << 6;
16387 break;
16388
16389 case 3: /* VLD4. */
16390 {
16391 int align = inst.operands[1].imm >> 8;
16392 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16393 16, 64, 32, 64, 32, 128, -1);
16394 if (align_good == FAIL)
16395 return;
16396 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16397 _("bad list length"));
16398 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16399 inst.instruction |= 1 << 5;
16400 if (et.size == 32 && align == 128)
16401 inst.instruction |= 0x3 << 6;
16402 else
16403 inst.instruction |= neon_logbits (et.size) << 6;
16404 }
16405 break;
16406
16407 default: ;
16408 }
16409
16410 inst.instruction |= do_align << 4;
16411 }
16412
16413 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16414 apart from bits [11:4]. */
16415
16416 static void
16417 do_neon_ldx_stx (void)
16418 {
16419 if (inst.operands[1].isreg)
16420 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16421
16422 switch (NEON_LANE (inst.operands[0].imm))
16423 {
16424 case NEON_INTERLEAVE_LANES:
16425 NEON_ENCODE (INTERLV, inst);
16426 do_neon_ld_st_interleave ();
16427 break;
16428
16429 case NEON_ALL_LANES:
16430 NEON_ENCODE (DUP, inst);
16431 if (inst.instruction == N_INV)
16432 {
16433 first_error ("only loads support such operands");
16434 break;
16435 }
16436 do_neon_ld_dup ();
16437 break;
16438
16439 default:
16440 NEON_ENCODE (LANE, inst);
16441 do_neon_ld_st_lane ();
16442 }
16443
16444 /* L bit comes from bit mask. */
16445 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16446 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16447 inst.instruction |= inst.operands[1].reg << 16;
16448
16449 if (inst.operands[1].postind)
16450 {
16451 int postreg = inst.operands[1].imm & 0xf;
16452 constraint (!inst.operands[1].immisreg,
16453 _("post-index must be a register"));
16454 constraint (postreg == 0xd || postreg == 0xf,
16455 _("bad register for post-index"));
16456 inst.instruction |= postreg;
16457 }
16458 else
16459 {
16460 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16461 constraint (inst.reloc.exp.X_op != O_constant
16462 || inst.reloc.exp.X_add_number != 0,
16463 BAD_ADDR_MODE);
16464
16465 if (inst.operands[1].writeback)
16466 {
16467 inst.instruction |= 0xd;
16468 }
16469 else
16470 inst.instruction |= 0xf;
16471 }
16472
16473 if (thumb_mode)
16474 inst.instruction |= 0xf9000000;
16475 else
16476 inst.instruction |= 0xf4000000;
16477 }
16478
16479 /* FP v8. */
16480 static void
16481 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16482 {
16483 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16484 D register operands. */
16485 if (neon_shape_class[rs] == SC_DOUBLE)
16486 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16487 _(BAD_FPU));
16488
16489 NEON_ENCODE (FPV8, inst);
16490
16491 if (rs == NS_FFF)
16492 do_vfp_sp_dyadic ();
16493 else
16494 do_vfp_dp_rd_rn_rm ();
16495
16496 if (rs == NS_DDD)
16497 inst.instruction |= 0x100;
16498
16499 inst.instruction |= 0xf0000000;
16500 }
16501
16502 static void
16503 do_vsel (void)
16504 {
16505 set_it_insn_type (OUTSIDE_IT_INSN);
16506
16507 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16508 first_error (_("invalid instruction shape"));
16509 }
16510
16511 static void
16512 do_vmaxnm (void)
16513 {
16514 set_it_insn_type (OUTSIDE_IT_INSN);
16515
16516 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16517 return;
16518
16519 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16520 return;
16521
16522 neon_dyadic_misc (NT_untyped, N_F32, 0);
16523 }
16524
16525 static void
16526 do_vrint_1 (enum neon_cvt_mode mode)
16527 {
16528 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16529 struct neon_type_el et;
16530
16531 if (rs == NS_NULL)
16532 return;
16533
16534 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16535 D register operands. */
16536 if (neon_shape_class[rs] == SC_DOUBLE)
16537 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16538 _(BAD_FPU));
16539
16540 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16541 if (et.type != NT_invtype)
16542 {
16543 /* VFP encodings. */
16544 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16545 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16546 set_it_insn_type (OUTSIDE_IT_INSN);
16547
16548 NEON_ENCODE (FPV8, inst);
16549 if (rs == NS_FF)
16550 do_vfp_sp_monadic ();
16551 else
16552 do_vfp_dp_rd_rm ();
16553
16554 switch (mode)
16555 {
16556 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16557 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16558 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16559 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16560 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16561 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16562 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16563 default: abort ();
16564 }
16565
16566 inst.instruction |= (rs == NS_DD) << 8;
16567 do_vfp_cond_or_thumb ();
16568 }
16569 else
16570 {
16571 /* Neon encodings (or something broken...). */
16572 inst.error = NULL;
16573 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16574
16575 if (et.type == NT_invtype)
16576 return;
16577
16578 set_it_insn_type (OUTSIDE_IT_INSN);
16579 NEON_ENCODE (FLOAT, inst);
16580
16581 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16582 return;
16583
16584 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16585 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16586 inst.instruction |= LOW4 (inst.operands[1].reg);
16587 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16588 inst.instruction |= neon_quad (rs) << 6;
16589 switch (mode)
16590 {
16591 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16592 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16593 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16594 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16595 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16596 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16597 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16598 default: abort ();
16599 }
16600
16601 if (thumb_mode)
16602 inst.instruction |= 0xfc000000;
16603 else
16604 inst.instruction |= 0xf0000000;
16605 }
16606 }
16607
16608 static void
16609 do_vrintx (void)
16610 {
16611 do_vrint_1 (neon_cvt_mode_x);
16612 }
16613
16614 static void
16615 do_vrintz (void)
16616 {
16617 do_vrint_1 (neon_cvt_mode_z);
16618 }
16619
16620 static void
16621 do_vrintr (void)
16622 {
16623 do_vrint_1 (neon_cvt_mode_r);
16624 }
16625
16626 static void
16627 do_vrinta (void)
16628 {
16629 do_vrint_1 (neon_cvt_mode_a);
16630 }
16631
16632 static void
16633 do_vrintn (void)
16634 {
16635 do_vrint_1 (neon_cvt_mode_n);
16636 }
16637
16638 static void
16639 do_vrintp (void)
16640 {
16641 do_vrint_1 (neon_cvt_mode_p);
16642 }
16643
16644 static void
16645 do_vrintm (void)
16646 {
16647 do_vrint_1 (neon_cvt_mode_m);
16648 }
16649
16650 /* Crypto v1 instructions. */
16651 static void
16652 do_crypto_2op_1 (unsigned elttype, int op)
16653 {
16654 set_it_insn_type (OUTSIDE_IT_INSN);
16655
16656 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16657 == NT_invtype)
16658 return;
16659
16660 inst.error = NULL;
16661
16662 NEON_ENCODE (INTEGER, inst);
16663 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16664 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16665 inst.instruction |= LOW4 (inst.operands[1].reg);
16666 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16667 if (op != -1)
16668 inst.instruction |= op << 6;
16669
16670 if (thumb_mode)
16671 inst.instruction |= 0xfc000000;
16672 else
16673 inst.instruction |= 0xf0000000;
16674 }
16675
16676 static void
16677 do_crypto_3op_1 (int u, int op)
16678 {
16679 set_it_insn_type (OUTSIDE_IT_INSN);
16680
16681 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16682 N_32 | N_UNT | N_KEY).type == NT_invtype)
16683 return;
16684
16685 inst.error = NULL;
16686
16687 NEON_ENCODE (INTEGER, inst);
16688 neon_three_same (1, u, 8 << op);
16689 }
16690
16691 static void
16692 do_aese (void)
16693 {
16694 do_crypto_2op_1 (N_8, 0);
16695 }
16696
16697 static void
16698 do_aesd (void)
16699 {
16700 do_crypto_2op_1 (N_8, 1);
16701 }
16702
16703 static void
16704 do_aesmc (void)
16705 {
16706 do_crypto_2op_1 (N_8, 2);
16707 }
16708
16709 static void
16710 do_aesimc (void)
16711 {
16712 do_crypto_2op_1 (N_8, 3);
16713 }
16714
16715 static void
16716 do_sha1c (void)
16717 {
16718 do_crypto_3op_1 (0, 0);
16719 }
16720
16721 static void
16722 do_sha1p (void)
16723 {
16724 do_crypto_3op_1 (0, 1);
16725 }
16726
16727 static void
16728 do_sha1m (void)
16729 {
16730 do_crypto_3op_1 (0, 2);
16731 }
16732
16733 static void
16734 do_sha1su0 (void)
16735 {
16736 do_crypto_3op_1 (0, 3);
16737 }
16738
16739 static void
16740 do_sha256h (void)
16741 {
16742 do_crypto_3op_1 (1, 0);
16743 }
16744
16745 static void
16746 do_sha256h2 (void)
16747 {
16748 do_crypto_3op_1 (1, 1);
16749 }
16750
16751 static void
16752 do_sha256su1 (void)
16753 {
16754 do_crypto_3op_1 (1, 2);
16755 }
16756
16757 static void
16758 do_sha1h (void)
16759 {
16760 do_crypto_2op_1 (N_32, -1);
16761 }
16762
16763 static void
16764 do_sha1su1 (void)
16765 {
16766 do_crypto_2op_1 (N_32, 0);
16767 }
16768
16769 static void
16770 do_sha256su0 (void)
16771 {
16772 do_crypto_2op_1 (N_32, 1);
16773 }
16774
16775 static void
16776 do_crc32_1 (unsigned int poly, unsigned int sz)
16777 {
16778 unsigned int Rd = inst.operands[0].reg;
16779 unsigned int Rn = inst.operands[1].reg;
16780 unsigned int Rm = inst.operands[2].reg;
16781
16782 set_it_insn_type (OUTSIDE_IT_INSN);
16783 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16784 inst.instruction |= LOW4 (Rn) << 16;
16785 inst.instruction |= LOW4 (Rm);
16786 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16787 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16788
16789 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16790 as_warn (UNPRED_REG ("r15"));
16791 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16792 as_warn (UNPRED_REG ("r13"));
16793 }
16794
16795 static void
16796 do_crc32b (void)
16797 {
16798 do_crc32_1 (0, 0);
16799 }
16800
16801 static void
16802 do_crc32h (void)
16803 {
16804 do_crc32_1 (0, 1);
16805 }
16806
16807 static void
16808 do_crc32w (void)
16809 {
16810 do_crc32_1 (0, 2);
16811 }
16812
16813 static void
16814 do_crc32cb (void)
16815 {
16816 do_crc32_1 (1, 0);
16817 }
16818
16819 static void
16820 do_crc32ch (void)
16821 {
16822 do_crc32_1 (1, 1);
16823 }
16824
16825 static void
16826 do_crc32cw (void)
16827 {
16828 do_crc32_1 (1, 2);
16829 }
16830
16831 \f
16832 /* Overall per-instruction processing. */
16833
16834 /* We need to be able to fix up arbitrary expressions in some statements.
16835 This is so that we can handle symbols that are an arbitrary distance from
16836 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16837 which returns part of an address in a form which will be valid for
16838 a data instruction. We do this by pushing the expression into a symbol
16839 in the expr_section, and creating a fix for that. */
16840
16841 static void
16842 fix_new_arm (fragS * frag,
16843 int where,
16844 short int size,
16845 expressionS * exp,
16846 int pc_rel,
16847 int reloc)
16848 {
16849 fixS * new_fix;
16850
16851 switch (exp->X_op)
16852 {
16853 case O_constant:
16854 if (pc_rel)
16855 {
16856 /* Create an absolute valued symbol, so we have something to
16857 refer to in the object file. Unfortunately for us, gas's
16858 generic expression parsing will already have folded out
16859 any use of .set foo/.type foo %function that may have
16860 been used to set type information of the target location,
16861 that's being specified symbolically. We have to presume
16862 the user knows what they are doing. */
16863 char name[16 + 8];
16864 symbolS *symbol;
16865
16866 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16867
16868 symbol = symbol_find_or_make (name);
16869 S_SET_SEGMENT (symbol, absolute_section);
16870 symbol_set_frag (symbol, &zero_address_frag);
16871 S_SET_VALUE (symbol, exp->X_add_number);
16872 exp->X_op = O_symbol;
16873 exp->X_add_symbol = symbol;
16874 exp->X_add_number = 0;
16875 }
16876 /* FALLTHROUGH */
16877 case O_symbol:
16878 case O_add:
16879 case O_subtract:
16880 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16881 (enum bfd_reloc_code_real) reloc);
16882 break;
16883
16884 default:
16885 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16886 pc_rel, (enum bfd_reloc_code_real) reloc);
16887 break;
16888 }
16889
16890 /* Mark whether the fix is to a THUMB instruction, or an ARM
16891 instruction. */
16892 new_fix->tc_fix_data = thumb_mode;
16893 }
16894
16895 /* Create a frg for an instruction requiring relaxation. */
16896 static void
16897 output_relax_insn (void)
16898 {
16899 char * to;
16900 symbolS *sym;
16901 int offset;
16902
16903 /* The size of the instruction is unknown, so tie the debug info to the
16904 start of the instruction. */
16905 dwarf2_emit_insn (0);
16906
16907 switch (inst.reloc.exp.X_op)
16908 {
16909 case O_symbol:
16910 sym = inst.reloc.exp.X_add_symbol;
16911 offset = inst.reloc.exp.X_add_number;
16912 break;
16913 case O_constant:
16914 sym = NULL;
16915 offset = inst.reloc.exp.X_add_number;
16916 break;
16917 default:
16918 sym = make_expr_symbol (&inst.reloc.exp);
16919 offset = 0;
16920 break;
16921 }
16922 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16923 inst.relax, sym, offset, NULL/*offset, opcode*/);
16924 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16925 }
16926
16927 /* Write a 32-bit thumb instruction to buf. */
16928 static void
16929 put_thumb32_insn (char * buf, unsigned long insn)
16930 {
16931 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16932 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16933 }
16934
16935 static void
16936 output_inst (const char * str)
16937 {
16938 char * to = NULL;
16939
16940 if (inst.error)
16941 {
16942 as_bad ("%s -- `%s'", inst.error, str);
16943 return;
16944 }
16945 if (inst.relax)
16946 {
16947 output_relax_insn ();
16948 return;
16949 }
16950 if (inst.size == 0)
16951 return;
16952
16953 to = frag_more (inst.size);
16954 /* PR 9814: Record the thumb mode into the current frag so that we know
16955 what type of NOP padding to use, if necessary. We override any previous
16956 setting so that if the mode has changed then the NOPS that we use will
16957 match the encoding of the last instruction in the frag. */
16958 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16959
16960 if (thumb_mode && (inst.size > THUMB_SIZE))
16961 {
16962 gas_assert (inst.size == (2 * THUMB_SIZE));
16963 put_thumb32_insn (to, inst.instruction);
16964 }
16965 else if (inst.size > INSN_SIZE)
16966 {
16967 gas_assert (inst.size == (2 * INSN_SIZE));
16968 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16969 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16970 }
16971 else
16972 md_number_to_chars (to, inst.instruction, inst.size);
16973
16974 if (inst.reloc.type != BFD_RELOC_UNUSED)
16975 fix_new_arm (frag_now, to - frag_now->fr_literal,
16976 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16977 inst.reloc.type);
16978
16979 dwarf2_emit_insn (inst.size);
16980 }
16981
16982 static char *
16983 output_it_inst (int cond, int mask, char * to)
16984 {
16985 unsigned long instruction = 0xbf00;
16986
16987 mask &= 0xf;
16988 instruction |= mask;
16989 instruction |= cond << 4;
16990
16991 if (to == NULL)
16992 {
16993 to = frag_more (2);
16994 #ifdef OBJ_ELF
16995 dwarf2_emit_insn (2);
16996 #endif
16997 }
16998
16999 md_number_to_chars (to, instruction, 2);
17000
17001 return to;
17002 }
17003
17004 /* Tag values used in struct asm_opcode's tag field. */
17005 enum opcode_tag
17006 {
17007 OT_unconditional, /* Instruction cannot be conditionalized.
17008 The ARM condition field is still 0xE. */
17009 OT_unconditionalF, /* Instruction cannot be conditionalized
17010 and carries 0xF in its ARM condition field. */
17011 OT_csuffix, /* Instruction takes a conditional suffix. */
17012 OT_csuffixF, /* Some forms of the instruction take a conditional
17013 suffix, others place 0xF where the condition field
17014 would be. */
17015 OT_cinfix3, /* Instruction takes a conditional infix,
17016 beginning at character index 3. (In
17017 unified mode, it becomes a suffix.) */
17018 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17019 tsts, cmps, cmns, and teqs. */
17020 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17021 character index 3, even in unified mode. Used for
17022 legacy instructions where suffix and infix forms
17023 may be ambiguous. */
17024 OT_csuf_or_in3, /* Instruction takes either a conditional
17025 suffix or an infix at character index 3. */
17026 OT_odd_infix_unc, /* This is the unconditional variant of an
17027 instruction that takes a conditional infix
17028 at an unusual position. In unified mode,
17029 this variant will accept a suffix. */
17030 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17031 are the conditional variants of instructions that
17032 take conditional infixes in unusual positions.
17033 The infix appears at character index
17034 (tag - OT_odd_infix_0). These are not accepted
17035 in unified mode. */
17036 };
17037
17038 /* Subroutine of md_assemble, responsible for looking up the primary
17039 opcode from the mnemonic the user wrote. STR points to the
17040 beginning of the mnemonic.
17041
17042 This is not simply a hash table lookup, because of conditional
17043 variants. Most instructions have conditional variants, which are
17044 expressed with a _conditional affix_ to the mnemonic. If we were
17045 to encode each conditional variant as a literal string in the opcode
17046 table, it would have approximately 20,000 entries.
17047
17048 Most mnemonics take this affix as a suffix, and in unified syntax,
17049 'most' is upgraded to 'all'. However, in the divided syntax, some
17050 instructions take the affix as an infix, notably the s-variants of
17051 the arithmetic instructions. Of those instructions, all but six
17052 have the infix appear after the third character of the mnemonic.
17053
17054 Accordingly, the algorithm for looking up primary opcodes given
17055 an identifier is:
17056
17057 1. Look up the identifier in the opcode table.
17058 If we find a match, go to step U.
17059
17060 2. Look up the last two characters of the identifier in the
17061 conditions table. If we find a match, look up the first N-2
17062 characters of the identifier in the opcode table. If we
17063 find a match, go to step CE.
17064
17065 3. Look up the fourth and fifth characters of the identifier in
17066 the conditions table. If we find a match, extract those
17067 characters from the identifier, and look up the remaining
17068 characters in the opcode table. If we find a match, go
17069 to step CM.
17070
17071 4. Fail.
17072
17073 U. Examine the tag field of the opcode structure, in case this is
17074 one of the six instructions with its conditional infix in an
17075 unusual place. If it is, the tag tells us where to find the
17076 infix; look it up in the conditions table and set inst.cond
17077 accordingly. Otherwise, this is an unconditional instruction.
17078 Again set inst.cond accordingly. Return the opcode structure.
17079
17080 CE. Examine the tag field to make sure this is an instruction that
17081 should receive a conditional suffix. If it is not, fail.
17082 Otherwise, set inst.cond from the suffix we already looked up,
17083 and return the opcode structure.
17084
17085 CM. Examine the tag field to make sure this is an instruction that
17086 should receive a conditional infix after the third character.
17087 If it is not, fail. Otherwise, undo the edits to the current
17088 line of input and proceed as for case CE. */
17089
17090 static const struct asm_opcode *
17091 opcode_lookup (char **str)
17092 {
17093 char *end, *base;
17094 char *affix;
17095 const struct asm_opcode *opcode;
17096 const struct asm_cond *cond;
17097 char save[2];
17098
17099 /* Scan up to the end of the mnemonic, which must end in white space,
17100 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17101 for (base = end = *str; *end != '\0'; end++)
17102 if (*end == ' ' || *end == '.')
17103 break;
17104
17105 if (end == base)
17106 return NULL;
17107
17108 /* Handle a possible width suffix and/or Neon type suffix. */
17109 if (end[0] == '.')
17110 {
17111 int offset = 2;
17112
17113 /* The .w and .n suffixes are only valid if the unified syntax is in
17114 use. */
17115 if (unified_syntax && end[1] == 'w')
17116 inst.size_req = 4;
17117 else if (unified_syntax && end[1] == 'n')
17118 inst.size_req = 2;
17119 else
17120 offset = 0;
17121
17122 inst.vectype.elems = 0;
17123
17124 *str = end + offset;
17125
17126 if (end[offset] == '.')
17127 {
17128 /* See if we have a Neon type suffix (possible in either unified or
17129 non-unified ARM syntax mode). */
17130 if (parse_neon_type (&inst.vectype, str) == FAIL)
17131 return NULL;
17132 }
17133 else if (end[offset] != '\0' && end[offset] != ' ')
17134 return NULL;
17135 }
17136 else
17137 *str = end;
17138
17139 /* Look for unaffixed or special-case affixed mnemonic. */
17140 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17141 end - base);
17142 if (opcode)
17143 {
17144 /* step U */
17145 if (opcode->tag < OT_odd_infix_0)
17146 {
17147 inst.cond = COND_ALWAYS;
17148 return opcode;
17149 }
17150
17151 if (warn_on_deprecated && unified_syntax)
17152 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17153 affix = base + (opcode->tag - OT_odd_infix_0);
17154 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17155 gas_assert (cond);
17156
17157 inst.cond = cond->value;
17158 return opcode;
17159 }
17160
17161 /* Cannot have a conditional suffix on a mnemonic of less than two
17162 characters. */
17163 if (end - base < 3)
17164 return NULL;
17165
17166 /* Look for suffixed mnemonic. */
17167 affix = end - 2;
17168 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17169 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17170 affix - base);
17171 if (opcode && cond)
17172 {
17173 /* step CE */
17174 switch (opcode->tag)
17175 {
17176 case OT_cinfix3_legacy:
17177 /* Ignore conditional suffixes matched on infix only mnemonics. */
17178 break;
17179
17180 case OT_cinfix3:
17181 case OT_cinfix3_deprecated:
17182 case OT_odd_infix_unc:
17183 if (!unified_syntax)
17184 return 0;
17185 /* else fall through */
17186
17187 case OT_csuffix:
17188 case OT_csuffixF:
17189 case OT_csuf_or_in3:
17190 inst.cond = cond->value;
17191 return opcode;
17192
17193 case OT_unconditional:
17194 case OT_unconditionalF:
17195 if (thumb_mode)
17196 inst.cond = cond->value;
17197 else
17198 {
17199 /* Delayed diagnostic. */
17200 inst.error = BAD_COND;
17201 inst.cond = COND_ALWAYS;
17202 }
17203 return opcode;
17204
17205 default:
17206 return NULL;
17207 }
17208 }
17209
17210 /* Cannot have a usual-position infix on a mnemonic of less than
17211 six characters (five would be a suffix). */
17212 if (end - base < 6)
17213 return NULL;
17214
17215 /* Look for infixed mnemonic in the usual position. */
17216 affix = base + 3;
17217 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17218 if (!cond)
17219 return NULL;
17220
17221 memcpy (save, affix, 2);
17222 memmove (affix, affix + 2, (end - affix) - 2);
17223 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17224 (end - base) - 2);
17225 memmove (affix + 2, affix, (end - affix) - 2);
17226 memcpy (affix, save, 2);
17227
17228 if (opcode
17229 && (opcode->tag == OT_cinfix3
17230 || opcode->tag == OT_cinfix3_deprecated
17231 || opcode->tag == OT_csuf_or_in3
17232 || opcode->tag == OT_cinfix3_legacy))
17233 {
17234 /* Step CM. */
17235 if (warn_on_deprecated && unified_syntax
17236 && (opcode->tag == OT_cinfix3
17237 || opcode->tag == OT_cinfix3_deprecated))
17238 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17239
17240 inst.cond = cond->value;
17241 return opcode;
17242 }
17243
17244 return NULL;
17245 }
17246
17247 /* This function generates an initial IT instruction, leaving its block
17248 virtually open for the new instructions. Eventually,
17249 the mask will be updated by now_it_add_mask () each time
17250 a new instruction needs to be included in the IT block.
17251 Finally, the block is closed with close_automatic_it_block ().
17252 The block closure can be requested either from md_assemble (),
17253 a tencode (), or due to a label hook. */
17254
17255 static void
17256 new_automatic_it_block (int cond)
17257 {
17258 now_it.state = AUTOMATIC_IT_BLOCK;
17259 now_it.mask = 0x18;
17260 now_it.cc = cond;
17261 now_it.block_length = 1;
17262 mapping_state (MAP_THUMB);
17263 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17264 now_it.warn_deprecated = FALSE;
17265 now_it.insn_cond = TRUE;
17266 }
17267
17268 /* Close an automatic IT block.
17269 See comments in new_automatic_it_block (). */
17270
17271 static void
17272 close_automatic_it_block (void)
17273 {
17274 now_it.mask = 0x10;
17275 now_it.block_length = 0;
17276 }
17277
17278 /* Update the mask of the current automatically-generated IT
17279 instruction. See comments in new_automatic_it_block (). */
17280
17281 static void
17282 now_it_add_mask (int cond)
17283 {
17284 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17285 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17286 | ((bitvalue) << (nbit)))
17287 const int resulting_bit = (cond & 1);
17288
17289 now_it.mask &= 0xf;
17290 now_it.mask = SET_BIT_VALUE (now_it.mask,
17291 resulting_bit,
17292 (5 - now_it.block_length));
17293 now_it.mask = SET_BIT_VALUE (now_it.mask,
17294 1,
17295 ((5 - now_it.block_length) - 1) );
17296 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17297
17298 #undef CLEAR_BIT
17299 #undef SET_BIT_VALUE
17300 }
17301
17302 /* The IT blocks handling machinery is accessed through the these functions:
17303 it_fsm_pre_encode () from md_assemble ()
17304 set_it_insn_type () optional, from the tencode functions
17305 set_it_insn_type_last () ditto
17306 in_it_block () ditto
17307 it_fsm_post_encode () from md_assemble ()
17308 force_automatic_it_block_close () from label habdling functions
17309
17310 Rationale:
17311 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17312 initializing the IT insn type with a generic initial value depending
17313 on the inst.condition.
17314 2) During the tencode function, two things may happen:
17315 a) The tencode function overrides the IT insn type by
17316 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17317 b) The tencode function queries the IT block state by
17318 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17319
17320 Both set_it_insn_type and in_it_block run the internal FSM state
17321 handling function (handle_it_state), because: a) setting the IT insn
17322 type may incur in an invalid state (exiting the function),
17323 and b) querying the state requires the FSM to be updated.
17324 Specifically we want to avoid creating an IT block for conditional
17325 branches, so it_fsm_pre_encode is actually a guess and we can't
17326 determine whether an IT block is required until the tencode () routine
17327 has decided what type of instruction this actually it.
17328 Because of this, if set_it_insn_type and in_it_block have to be used,
17329 set_it_insn_type has to be called first.
17330
17331 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17332 determines the insn IT type depending on the inst.cond code.
17333 When a tencode () routine encodes an instruction that can be
17334 either outside an IT block, or, in the case of being inside, has to be
17335 the last one, set_it_insn_type_last () will determine the proper
17336 IT instruction type based on the inst.cond code. Otherwise,
17337 set_it_insn_type can be called for overriding that logic or
17338 for covering other cases.
17339
17340 Calling handle_it_state () may not transition the IT block state to
17341 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17342 still queried. Instead, if the FSM determines that the state should
17343 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17344 after the tencode () function: that's what it_fsm_post_encode () does.
17345
17346 Since in_it_block () calls the state handling function to get an
17347 updated state, an error may occur (due to invalid insns combination).
17348 In that case, inst.error is set.
17349 Therefore, inst.error has to be checked after the execution of
17350 the tencode () routine.
17351
17352 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17353 any pending state change (if any) that didn't take place in
17354 handle_it_state () as explained above. */
17355
17356 static void
17357 it_fsm_pre_encode (void)
17358 {
17359 if (inst.cond != COND_ALWAYS)
17360 inst.it_insn_type = INSIDE_IT_INSN;
17361 else
17362 inst.it_insn_type = OUTSIDE_IT_INSN;
17363
17364 now_it.state_handled = 0;
17365 }
17366
17367 /* IT state FSM handling function. */
17368
17369 static int
17370 handle_it_state (void)
17371 {
17372 now_it.state_handled = 1;
17373 now_it.insn_cond = FALSE;
17374
17375 switch (now_it.state)
17376 {
17377 case OUTSIDE_IT_BLOCK:
17378 switch (inst.it_insn_type)
17379 {
17380 case OUTSIDE_IT_INSN:
17381 break;
17382
17383 case INSIDE_IT_INSN:
17384 case INSIDE_IT_LAST_INSN:
17385 if (thumb_mode == 0)
17386 {
17387 if (unified_syntax
17388 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17389 as_tsktsk (_("Warning: conditional outside an IT block"\
17390 " for Thumb."));
17391 }
17392 else
17393 {
17394 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17395 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17396 {
17397 /* Automatically generate the IT instruction. */
17398 new_automatic_it_block (inst.cond);
17399 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17400 close_automatic_it_block ();
17401 }
17402 else
17403 {
17404 inst.error = BAD_OUT_IT;
17405 return FAIL;
17406 }
17407 }
17408 break;
17409
17410 case IF_INSIDE_IT_LAST_INSN:
17411 case NEUTRAL_IT_INSN:
17412 break;
17413
17414 case IT_INSN:
17415 now_it.state = MANUAL_IT_BLOCK;
17416 now_it.block_length = 0;
17417 break;
17418 }
17419 break;
17420
17421 case AUTOMATIC_IT_BLOCK:
17422 /* Three things may happen now:
17423 a) We should increment current it block size;
17424 b) We should close current it block (closing insn or 4 insns);
17425 c) We should close current it block and start a new one (due
17426 to incompatible conditions or
17427 4 insns-length block reached). */
17428
17429 switch (inst.it_insn_type)
17430 {
17431 case OUTSIDE_IT_INSN:
17432 /* The closure of the block shall happen immediatelly,
17433 so any in_it_block () call reports the block as closed. */
17434 force_automatic_it_block_close ();
17435 break;
17436
17437 case INSIDE_IT_INSN:
17438 case INSIDE_IT_LAST_INSN:
17439 case IF_INSIDE_IT_LAST_INSN:
17440 now_it.block_length++;
17441
17442 if (now_it.block_length > 4
17443 || !now_it_compatible (inst.cond))
17444 {
17445 force_automatic_it_block_close ();
17446 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17447 new_automatic_it_block (inst.cond);
17448 }
17449 else
17450 {
17451 now_it.insn_cond = TRUE;
17452 now_it_add_mask (inst.cond);
17453 }
17454
17455 if (now_it.state == AUTOMATIC_IT_BLOCK
17456 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17457 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17458 close_automatic_it_block ();
17459 break;
17460
17461 case NEUTRAL_IT_INSN:
17462 now_it.block_length++;
17463 now_it.insn_cond = TRUE;
17464
17465 if (now_it.block_length > 4)
17466 force_automatic_it_block_close ();
17467 else
17468 now_it_add_mask (now_it.cc & 1);
17469 break;
17470
17471 case IT_INSN:
17472 close_automatic_it_block ();
17473 now_it.state = MANUAL_IT_BLOCK;
17474 break;
17475 }
17476 break;
17477
17478 case MANUAL_IT_BLOCK:
17479 {
17480 /* Check conditional suffixes. */
17481 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17482 int is_last;
17483 now_it.mask <<= 1;
17484 now_it.mask &= 0x1f;
17485 is_last = (now_it.mask == 0x10);
17486 now_it.insn_cond = TRUE;
17487
17488 switch (inst.it_insn_type)
17489 {
17490 case OUTSIDE_IT_INSN:
17491 inst.error = BAD_NOT_IT;
17492 return FAIL;
17493
17494 case INSIDE_IT_INSN:
17495 if (cond != inst.cond)
17496 {
17497 inst.error = BAD_IT_COND;
17498 return FAIL;
17499 }
17500 break;
17501
17502 case INSIDE_IT_LAST_INSN:
17503 case IF_INSIDE_IT_LAST_INSN:
17504 if (cond != inst.cond)
17505 {
17506 inst.error = BAD_IT_COND;
17507 return FAIL;
17508 }
17509 if (!is_last)
17510 {
17511 inst.error = BAD_BRANCH;
17512 return FAIL;
17513 }
17514 break;
17515
17516 case NEUTRAL_IT_INSN:
17517 /* The BKPT instruction is unconditional even in an IT block. */
17518 break;
17519
17520 case IT_INSN:
17521 inst.error = BAD_IT_IT;
17522 return FAIL;
17523 }
17524 }
17525 break;
17526 }
17527
17528 return SUCCESS;
17529 }
17530
17531 struct depr_insn_mask
17532 {
17533 unsigned long pattern;
17534 unsigned long mask;
17535 const char* description;
17536 };
17537
17538 /* List of 16-bit instruction patterns deprecated in an IT block in
17539 ARMv8. */
17540 static const struct depr_insn_mask depr_it_insns[] = {
17541 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17542 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17543 { 0xa000, 0xb800, N_("ADR") },
17544 { 0x4800, 0xf800, N_("Literal loads") },
17545 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17546 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17547 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17548 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17549 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17550 { 0, 0, NULL }
17551 };
17552
17553 static void
17554 it_fsm_post_encode (void)
17555 {
17556 int is_last;
17557
17558 if (!now_it.state_handled)
17559 handle_it_state ();
17560
17561 if (now_it.insn_cond
17562 && !now_it.warn_deprecated
17563 && warn_on_deprecated
17564 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17565 {
17566 if (inst.instruction >= 0x10000)
17567 {
17568 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17569 "deprecated in ARMv8"));
17570 now_it.warn_deprecated = TRUE;
17571 }
17572 else
17573 {
17574 const struct depr_insn_mask *p = depr_it_insns;
17575
17576 while (p->mask != 0)
17577 {
17578 if ((inst.instruction & p->mask) == p->pattern)
17579 {
17580 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17581 "of the following class are deprecated in ARMv8: "
17582 "%s"), p->description);
17583 now_it.warn_deprecated = TRUE;
17584 break;
17585 }
17586
17587 ++p;
17588 }
17589 }
17590
17591 if (now_it.block_length > 1)
17592 {
17593 as_tsktsk (_("IT blocks containing more than one conditional "
17594 "instruction are deprecated in ARMv8"));
17595 now_it.warn_deprecated = TRUE;
17596 }
17597 }
17598
17599 is_last = (now_it.mask == 0x10);
17600 if (is_last)
17601 {
17602 now_it.state = OUTSIDE_IT_BLOCK;
17603 now_it.mask = 0;
17604 }
17605 }
17606
17607 static void
17608 force_automatic_it_block_close (void)
17609 {
17610 if (now_it.state == AUTOMATIC_IT_BLOCK)
17611 {
17612 close_automatic_it_block ();
17613 now_it.state = OUTSIDE_IT_BLOCK;
17614 now_it.mask = 0;
17615 }
17616 }
17617
17618 static int
17619 in_it_block (void)
17620 {
17621 if (!now_it.state_handled)
17622 handle_it_state ();
17623
17624 return now_it.state != OUTSIDE_IT_BLOCK;
17625 }
17626
17627 void
17628 md_assemble (char *str)
17629 {
17630 char *p = str;
17631 const struct asm_opcode * opcode;
17632
17633 /* Align the previous label if needed. */
17634 if (last_label_seen != NULL)
17635 {
17636 symbol_set_frag (last_label_seen, frag_now);
17637 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17638 S_SET_SEGMENT (last_label_seen, now_seg);
17639 }
17640
17641 memset (&inst, '\0', sizeof (inst));
17642 inst.reloc.type = BFD_RELOC_UNUSED;
17643
17644 opcode = opcode_lookup (&p);
17645 if (!opcode)
17646 {
17647 /* It wasn't an instruction, but it might be a register alias of
17648 the form alias .req reg, or a Neon .dn/.qn directive. */
17649 if (! create_register_alias (str, p)
17650 && ! create_neon_reg_alias (str, p))
17651 as_bad (_("bad instruction `%s'"), str);
17652
17653 return;
17654 }
17655
17656 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17657 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17658
17659 /* The value which unconditional instructions should have in place of the
17660 condition field. */
17661 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17662
17663 if (thumb_mode)
17664 {
17665 arm_feature_set variant;
17666
17667 variant = cpu_variant;
17668 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17669 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17670 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17671 /* Check that this instruction is supported for this CPU. */
17672 if (!opcode->tvariant
17673 || (thumb_mode == 1
17674 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17675 {
17676 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17677 return;
17678 }
17679 if (inst.cond != COND_ALWAYS && !unified_syntax
17680 && opcode->tencode != do_t_branch)
17681 {
17682 as_bad (_("Thumb does not support conditional execution"));
17683 return;
17684 }
17685
17686 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17687 {
17688 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17689 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17690 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17691 {
17692 /* Two things are addressed here.
17693 1) Implicit require narrow instructions on Thumb-1.
17694 This avoids relaxation accidentally introducing Thumb-2
17695 instructions.
17696 2) Reject wide instructions in non Thumb-2 cores. */
17697 if (inst.size_req == 0)
17698 inst.size_req = 2;
17699 else if (inst.size_req == 4)
17700 {
17701 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17702 return;
17703 }
17704 }
17705 }
17706
17707 inst.instruction = opcode->tvalue;
17708
17709 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17710 {
17711 /* Prepare the it_insn_type for those encodings that don't set
17712 it. */
17713 it_fsm_pre_encode ();
17714
17715 opcode->tencode ();
17716
17717 it_fsm_post_encode ();
17718 }
17719
17720 if (!(inst.error || inst.relax))
17721 {
17722 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17723 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17724 if (inst.size_req && inst.size_req != inst.size)
17725 {
17726 as_bad (_("cannot honor width suffix -- `%s'"), str);
17727 return;
17728 }
17729 }
17730
17731 /* Something has gone badly wrong if we try to relax a fixed size
17732 instruction. */
17733 gas_assert (inst.size_req == 0 || !inst.relax);
17734
17735 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17736 *opcode->tvariant);
17737 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17738 set those bits when Thumb-2 32-bit instructions are seen. ie.
17739 anything other than bl/blx and v6-M instructions.
17740 The impact of relaxable instructions will be considered later after we
17741 finish all relaxation. */
17742 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17743 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17744 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17745 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17746 arm_ext_v6t2);
17747
17748 check_neon_suffixes;
17749
17750 if (!inst.error)
17751 {
17752 mapping_state (MAP_THUMB);
17753 }
17754 }
17755 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17756 {
17757 bfd_boolean is_bx;
17758
17759 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17760 is_bx = (opcode->aencode == do_bx);
17761
17762 /* Check that this instruction is supported for this CPU. */
17763 if (!(is_bx && fix_v4bx)
17764 && !(opcode->avariant &&
17765 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17766 {
17767 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17768 return;
17769 }
17770 if (inst.size_req)
17771 {
17772 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17773 return;
17774 }
17775
17776 inst.instruction = opcode->avalue;
17777 if (opcode->tag == OT_unconditionalF)
17778 inst.instruction |= 0xF << 28;
17779 else
17780 inst.instruction |= inst.cond << 28;
17781 inst.size = INSN_SIZE;
17782 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17783 {
17784 it_fsm_pre_encode ();
17785 opcode->aencode ();
17786 it_fsm_post_encode ();
17787 }
17788 /* Arm mode bx is marked as both v4T and v5 because it's still required
17789 on a hypothetical non-thumb v5 core. */
17790 if (is_bx)
17791 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17792 else
17793 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17794 *opcode->avariant);
17795
17796 check_neon_suffixes;
17797
17798 if (!inst.error)
17799 {
17800 mapping_state (MAP_ARM);
17801 }
17802 }
17803 else
17804 {
17805 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17806 "-- `%s'"), str);
17807 return;
17808 }
17809 output_inst (str);
17810 }
17811
17812 static void
17813 check_it_blocks_finished (void)
17814 {
17815 #ifdef OBJ_ELF
17816 asection *sect;
17817
17818 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17819 if (seg_info (sect)->tc_segment_info_data.current_it.state
17820 == MANUAL_IT_BLOCK)
17821 {
17822 as_warn (_("section '%s' finished with an open IT block."),
17823 sect->name);
17824 }
17825 #else
17826 if (now_it.state == MANUAL_IT_BLOCK)
17827 as_warn (_("file finished with an open IT block."));
17828 #endif
17829 }
17830
17831 /* Various frobbings of labels and their addresses. */
17832
17833 void
17834 arm_start_line_hook (void)
17835 {
17836 last_label_seen = NULL;
17837 }
17838
17839 void
17840 arm_frob_label (symbolS * sym)
17841 {
17842 last_label_seen = sym;
17843
17844 ARM_SET_THUMB (sym, thumb_mode);
17845
17846 #if defined OBJ_COFF || defined OBJ_ELF
17847 ARM_SET_INTERWORK (sym, support_interwork);
17848 #endif
17849
17850 force_automatic_it_block_close ();
17851
17852 /* Note - do not allow local symbols (.Lxxx) to be labelled
17853 as Thumb functions. This is because these labels, whilst
17854 they exist inside Thumb code, are not the entry points for
17855 possible ARM->Thumb calls. Also, these labels can be used
17856 as part of a computed goto or switch statement. eg gcc
17857 can generate code that looks like this:
17858
17859 ldr r2, [pc, .Laaa]
17860 lsl r3, r3, #2
17861 ldr r2, [r3, r2]
17862 mov pc, r2
17863
17864 .Lbbb: .word .Lxxx
17865 .Lccc: .word .Lyyy
17866 ..etc...
17867 .Laaa: .word Lbbb
17868
17869 The first instruction loads the address of the jump table.
17870 The second instruction converts a table index into a byte offset.
17871 The third instruction gets the jump address out of the table.
17872 The fourth instruction performs the jump.
17873
17874 If the address stored at .Laaa is that of a symbol which has the
17875 Thumb_Func bit set, then the linker will arrange for this address
17876 to have the bottom bit set, which in turn would mean that the
17877 address computation performed by the third instruction would end
17878 up with the bottom bit set. Since the ARM is capable of unaligned
17879 word loads, the instruction would then load the incorrect address
17880 out of the jump table, and chaos would ensue. */
17881 if (label_is_thumb_function_name
17882 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17883 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17884 {
17885 /* When the address of a Thumb function is taken the bottom
17886 bit of that address should be set. This will allow
17887 interworking between Arm and Thumb functions to work
17888 correctly. */
17889
17890 THUMB_SET_FUNC (sym, 1);
17891
17892 label_is_thumb_function_name = FALSE;
17893 }
17894
17895 dwarf2_emit_label (sym);
17896 }
17897
17898 bfd_boolean
17899 arm_data_in_code (void)
17900 {
17901 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17902 {
17903 *input_line_pointer = '/';
17904 input_line_pointer += 5;
17905 *input_line_pointer = 0;
17906 return TRUE;
17907 }
17908
17909 return FALSE;
17910 }
17911
17912 char *
17913 arm_canonicalize_symbol_name (char * name)
17914 {
17915 int len;
17916
17917 if (thumb_mode && (len = strlen (name)) > 5
17918 && streq (name + len - 5, "/data"))
17919 *(name + len - 5) = 0;
17920
17921 return name;
17922 }
17923 \f
17924 /* Table of all register names defined by default. The user can
17925 define additional names with .req. Note that all register names
17926 should appear in both upper and lowercase variants. Some registers
17927 also have mixed-case names. */
17928
17929 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17930 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17931 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17932 #define REGSET(p,t) \
17933 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17934 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17935 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17936 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17937 #define REGSETH(p,t) \
17938 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17939 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17940 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17941 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17942 #define REGSET2(p,t) \
17943 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17944 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17945 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17946 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17947 #define SPLRBANK(base,bank,t) \
17948 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17949 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17950 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17951 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17952 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17953 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17954
17955 static const struct reg_entry reg_names[] =
17956 {
17957 /* ARM integer registers. */
17958 REGSET(r, RN), REGSET(R, RN),
17959
17960 /* ATPCS synonyms. */
17961 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17962 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17963 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17964
17965 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17966 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17967 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17968
17969 /* Well-known aliases. */
17970 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17971 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17972
17973 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17974 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17975
17976 /* Coprocessor numbers. */
17977 REGSET(p, CP), REGSET(P, CP),
17978
17979 /* Coprocessor register numbers. The "cr" variants are for backward
17980 compatibility. */
17981 REGSET(c, CN), REGSET(C, CN),
17982 REGSET(cr, CN), REGSET(CR, CN),
17983
17984 /* ARM banked registers. */
17985 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17986 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17987 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17988 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17989 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17990 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17991 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17992
17993 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17994 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17995 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17996 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17997 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17998 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17999 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18000 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18001
18002 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18003 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18004 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18005 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18006 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18007 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18008 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18009 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18010 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18011
18012 /* FPA registers. */
18013 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18014 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18015
18016 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18017 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18018
18019 /* VFP SP registers. */
18020 REGSET(s,VFS), REGSET(S,VFS),
18021 REGSETH(s,VFS), REGSETH(S,VFS),
18022
18023 /* VFP DP Registers. */
18024 REGSET(d,VFD), REGSET(D,VFD),
18025 /* Extra Neon DP registers. */
18026 REGSETH(d,VFD), REGSETH(D,VFD),
18027
18028 /* Neon QP registers. */
18029 REGSET2(q,NQ), REGSET2(Q,NQ),
18030
18031 /* VFP control registers. */
18032 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18033 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18034 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18035 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18036 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18037 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18038
18039 /* Maverick DSP coprocessor registers. */
18040 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18041 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18042
18043 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18044 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18045 REGDEF(dspsc,0,DSPSC),
18046
18047 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18048 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18049 REGDEF(DSPSC,0,DSPSC),
18050
18051 /* iWMMXt data registers - p0, c0-15. */
18052 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18053
18054 /* iWMMXt control registers - p1, c0-3. */
18055 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18056 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18057 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18058 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18059
18060 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18061 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18062 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18063 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18064 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18065
18066 /* XScale accumulator registers. */
18067 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18068 };
18069 #undef REGDEF
18070 #undef REGNUM
18071 #undef REGSET
18072
18073 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18074 within psr_required_here. */
18075 static const struct asm_psr psrs[] =
18076 {
18077 /* Backward compatibility notation. Note that "all" is no longer
18078 truly all possible PSR bits. */
18079 {"all", PSR_c | PSR_f},
18080 {"flg", PSR_f},
18081 {"ctl", PSR_c},
18082
18083 /* Individual flags. */
18084 {"f", PSR_f},
18085 {"c", PSR_c},
18086 {"x", PSR_x},
18087 {"s", PSR_s},
18088
18089 /* Combinations of flags. */
18090 {"fs", PSR_f | PSR_s},
18091 {"fx", PSR_f | PSR_x},
18092 {"fc", PSR_f | PSR_c},
18093 {"sf", PSR_s | PSR_f},
18094 {"sx", PSR_s | PSR_x},
18095 {"sc", PSR_s | PSR_c},
18096 {"xf", PSR_x | PSR_f},
18097 {"xs", PSR_x | PSR_s},
18098 {"xc", PSR_x | PSR_c},
18099 {"cf", PSR_c | PSR_f},
18100 {"cs", PSR_c | PSR_s},
18101 {"cx", PSR_c | PSR_x},
18102 {"fsx", PSR_f | PSR_s | PSR_x},
18103 {"fsc", PSR_f | PSR_s | PSR_c},
18104 {"fxs", PSR_f | PSR_x | PSR_s},
18105 {"fxc", PSR_f | PSR_x | PSR_c},
18106 {"fcs", PSR_f | PSR_c | PSR_s},
18107 {"fcx", PSR_f | PSR_c | PSR_x},
18108 {"sfx", PSR_s | PSR_f | PSR_x},
18109 {"sfc", PSR_s | PSR_f | PSR_c},
18110 {"sxf", PSR_s | PSR_x | PSR_f},
18111 {"sxc", PSR_s | PSR_x | PSR_c},
18112 {"scf", PSR_s | PSR_c | PSR_f},
18113 {"scx", PSR_s | PSR_c | PSR_x},
18114 {"xfs", PSR_x | PSR_f | PSR_s},
18115 {"xfc", PSR_x | PSR_f | PSR_c},
18116 {"xsf", PSR_x | PSR_s | PSR_f},
18117 {"xsc", PSR_x | PSR_s | PSR_c},
18118 {"xcf", PSR_x | PSR_c | PSR_f},
18119 {"xcs", PSR_x | PSR_c | PSR_s},
18120 {"cfs", PSR_c | PSR_f | PSR_s},
18121 {"cfx", PSR_c | PSR_f | PSR_x},
18122 {"csf", PSR_c | PSR_s | PSR_f},
18123 {"csx", PSR_c | PSR_s | PSR_x},
18124 {"cxf", PSR_c | PSR_x | PSR_f},
18125 {"cxs", PSR_c | PSR_x | PSR_s},
18126 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18127 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18128 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18129 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18130 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18131 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18132 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18133 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18134 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18135 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18136 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18137 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18138 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18139 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18140 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18141 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18142 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18143 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18144 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18145 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18146 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18147 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18148 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18149 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18150 };
18151
18152 /* Table of V7M psr names. */
18153 static const struct asm_psr v7m_psrs[] =
18154 {
18155 {"apsr", 0 }, {"APSR", 0 },
18156 {"iapsr", 1 }, {"IAPSR", 1 },
18157 {"eapsr", 2 }, {"EAPSR", 2 },
18158 {"psr", 3 }, {"PSR", 3 },
18159 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18160 {"ipsr", 5 }, {"IPSR", 5 },
18161 {"epsr", 6 }, {"EPSR", 6 },
18162 {"iepsr", 7 }, {"IEPSR", 7 },
18163 {"msp", 8 }, {"MSP", 8 },
18164 {"psp", 9 }, {"PSP", 9 },
18165 {"primask", 16}, {"PRIMASK", 16},
18166 {"basepri", 17}, {"BASEPRI", 17},
18167 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18168 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18169 {"faultmask", 19}, {"FAULTMASK", 19},
18170 {"control", 20}, {"CONTROL", 20}
18171 };
18172
18173 /* Table of all shift-in-operand names. */
18174 static const struct asm_shift_name shift_names [] =
18175 {
18176 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18177 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18178 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18179 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18180 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18181 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18182 };
18183
18184 /* Table of all explicit relocation names. */
18185 #ifdef OBJ_ELF
18186 static struct reloc_entry reloc_names[] =
18187 {
18188 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18189 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18190 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18191 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18192 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18193 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18194 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18195 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18196 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18197 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18198 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18199 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18200 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18201 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18202 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18203 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18204 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18205 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18206 };
18207 #endif
18208
18209 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18210 static const struct asm_cond conds[] =
18211 {
18212 {"eq", 0x0},
18213 {"ne", 0x1},
18214 {"cs", 0x2}, {"hs", 0x2},
18215 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18216 {"mi", 0x4},
18217 {"pl", 0x5},
18218 {"vs", 0x6},
18219 {"vc", 0x7},
18220 {"hi", 0x8},
18221 {"ls", 0x9},
18222 {"ge", 0xa},
18223 {"lt", 0xb},
18224 {"gt", 0xc},
18225 {"le", 0xd},
18226 {"al", 0xe}
18227 };
18228
18229 #define UL_BARRIER(L,U,CODE,FEAT) \
18230 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18231 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18232
18233 static struct asm_barrier_opt barrier_opt_names[] =
18234 {
18235 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18236 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18237 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18238 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18239 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18240 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18241 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18242 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18243 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18244 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18245 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18246 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18247 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18248 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18249 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18250 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18251 };
18252
18253 #undef UL_BARRIER
18254
18255 /* Table of ARM-format instructions. */
18256
18257 /* Macros for gluing together operand strings. N.B. In all cases
18258 other than OPS0, the trailing OP_stop comes from default
18259 zero-initialization of the unspecified elements of the array. */
18260 #define OPS0() { OP_stop, }
18261 #define OPS1(a) { OP_##a, }
18262 #define OPS2(a,b) { OP_##a,OP_##b, }
18263 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18264 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18265 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18266 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18267
18268 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18269 This is useful when mixing operands for ARM and THUMB, i.e. using the
18270 MIX_ARM_THUMB_OPERANDS macro.
18271 In order to use these macros, prefix the number of operands with _
18272 e.g. _3. */
18273 #define OPS_1(a) { a, }
18274 #define OPS_2(a,b) { a,b, }
18275 #define OPS_3(a,b,c) { a,b,c, }
18276 #define OPS_4(a,b,c,d) { a,b,c,d, }
18277 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18278 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18279
18280 /* These macros abstract out the exact format of the mnemonic table and
18281 save some repeated characters. */
18282
18283 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18284 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18285 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18286 THUMB_VARIANT, do_##ae, do_##te }
18287
18288 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18289 a T_MNEM_xyz enumerator. */
18290 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18291 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18292 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18293 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18294
18295 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18296 infix after the third character. */
18297 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18298 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18299 THUMB_VARIANT, do_##ae, do_##te }
18300 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18301 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18302 THUMB_VARIANT, do_##ae, do_##te }
18303 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18304 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18305 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18306 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18307 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18308 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18309 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18310 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18311
18312 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18313 field is still 0xE. Many of the Thumb variants can be executed
18314 conditionally, so this is checked separately. */
18315 #define TUE(mnem, op, top, nops, ops, ae, te) \
18316 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18317 THUMB_VARIANT, do_##ae, do_##te }
18318
18319 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18320 Used by mnemonics that have very minimal differences in the encoding for
18321 ARM and Thumb variants and can be handled in a common function. */
18322 #define TUEc(mnem, op, top, nops, ops, en) \
18323 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18324 THUMB_VARIANT, do_##en, do_##en }
18325
18326 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18327 condition code field. */
18328 #define TUF(mnem, op, top, nops, ops, ae, te) \
18329 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18330 THUMB_VARIANT, do_##ae, do_##te }
18331
18332 /* ARM-only variants of all the above. */
18333 #define CE(mnem, op, nops, ops, ae) \
18334 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18335
18336 #define C3(mnem, op, nops, ops, ae) \
18337 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18338
18339 /* Legacy mnemonics that always have conditional infix after the third
18340 character. */
18341 #define CL(mnem, op, nops, ops, ae) \
18342 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18343 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18344
18345 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18346 #define cCE(mnem, op, nops, ops, ae) \
18347 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18348
18349 /* Legacy coprocessor instructions where conditional infix and conditional
18350 suffix are ambiguous. For consistency this includes all FPA instructions,
18351 not just the potentially ambiguous ones. */
18352 #define cCL(mnem, op, nops, ops, ae) \
18353 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18354 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18355
18356 /* Coprocessor, takes either a suffix or a position-3 infix
18357 (for an FPA corner case). */
18358 #define C3E(mnem, op, nops, ops, ae) \
18359 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18360 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18361
18362 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18363 { m1 #m2 m3, OPS##nops ops, \
18364 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18365 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18366
18367 #define CM(m1, m2, op, nops, ops, ae) \
18368 xCM_ (m1, , m2, op, nops, ops, ae), \
18369 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18370 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18371 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18372 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18373 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18374 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18375 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18376 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18377 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18378 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18379 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18380 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18381 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18382 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18383 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18384 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18385 xCM_ (m1, le, m2, op, nops, ops, ae), \
18386 xCM_ (m1, al, m2, op, nops, ops, ae)
18387
18388 #define UE(mnem, op, nops, ops, ae) \
18389 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18390
18391 #define UF(mnem, op, nops, ops, ae) \
18392 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18393
18394 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18395 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18396 use the same encoding function for each. */
18397 #define NUF(mnem, op, nops, ops, enc) \
18398 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18399 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18400
18401 /* Neon data processing, version which indirects through neon_enc_tab for
18402 the various overloaded versions of opcodes. */
18403 #define nUF(mnem, op, nops, ops, enc) \
18404 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18405 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18406
18407 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18408 version. */
18409 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18410 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18411 THUMB_VARIANT, do_##enc, do_##enc }
18412
18413 #define NCE(mnem, op, nops, ops, enc) \
18414 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18415
18416 #define NCEF(mnem, op, nops, ops, enc) \
18417 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18418
18419 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18420 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18421 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18422 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18423
18424 #define nCE(mnem, op, nops, ops, enc) \
18425 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18426
18427 #define nCEF(mnem, op, nops, ops, enc) \
18428 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18429
18430 #define do_0 0
18431
18432 static const struct asm_opcode insns[] =
18433 {
18434 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18435 #define THUMB_VARIANT & arm_ext_v4t
18436 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18437 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18438 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18439 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18440 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18441 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18442 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18443 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18444 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18445 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18446 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18447 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18448 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18449 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18450 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18451 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18452
18453 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18454 for setting PSR flag bits. They are obsolete in V6 and do not
18455 have Thumb equivalents. */
18456 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18457 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18458 CL("tstp", 110f000, 2, (RR, SH), cmp),
18459 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18460 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18461 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18462 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18463 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18464 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18465
18466 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18467 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18468 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18469 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18470
18471 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18472 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18473 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18474 OP_RRnpc),
18475 OP_ADDRGLDR),ldst, t_ldst),
18476 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18477
18478 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18479 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18480 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18481 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18482 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18483 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18484
18485 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18486 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18487 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18488 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18489
18490 /* Pseudo ops. */
18491 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18492 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18493 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18494 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18495
18496 /* Thumb-compatibility pseudo ops. */
18497 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18498 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18499 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18500 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18501 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18502 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18503 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18504 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18505 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18506 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18507 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18508 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18509
18510 /* These may simplify to neg. */
18511 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18512 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18513
18514 #undef THUMB_VARIANT
18515 #define THUMB_VARIANT & arm_ext_v6
18516
18517 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18518
18519 /* V1 instructions with no Thumb analogue prior to V6T2. */
18520 #undef THUMB_VARIANT
18521 #define THUMB_VARIANT & arm_ext_v6t2
18522
18523 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18524 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18525 CL("teqp", 130f000, 2, (RR, SH), cmp),
18526
18527 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18528 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18529 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18530 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18531
18532 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18533 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18534
18535 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18536 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18537
18538 /* V1 instructions with no Thumb analogue at all. */
18539 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18540 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18541
18542 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18543 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18544 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18545 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18546 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18547 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18548 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18549 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18550
18551 #undef ARM_VARIANT
18552 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18553 #undef THUMB_VARIANT
18554 #define THUMB_VARIANT & arm_ext_v4t
18555
18556 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18557 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18558
18559 #undef THUMB_VARIANT
18560 #define THUMB_VARIANT & arm_ext_v6t2
18561
18562 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18563 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18564
18565 /* Generic coprocessor instructions. */
18566 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18567 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18568 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18569 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18570 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18571 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18572 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18573
18574 #undef ARM_VARIANT
18575 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18576
18577 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18578 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18579
18580 #undef ARM_VARIANT
18581 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18582 #undef THUMB_VARIANT
18583 #define THUMB_VARIANT & arm_ext_msr
18584
18585 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18586 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18587
18588 #undef ARM_VARIANT
18589 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18590 #undef THUMB_VARIANT
18591 #define THUMB_VARIANT & arm_ext_v6t2
18592
18593 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18594 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18595 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18596 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18597 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18598 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18599 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18600 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18601
18602 #undef ARM_VARIANT
18603 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18604 #undef THUMB_VARIANT
18605 #define THUMB_VARIANT & arm_ext_v4t
18606
18607 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18608 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18609 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18610 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18611 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18612 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18613
18614 #undef ARM_VARIANT
18615 #define ARM_VARIANT & arm_ext_v4t_5
18616
18617 /* ARM Architecture 4T. */
18618 /* Note: bx (and blx) are required on V5, even if the processor does
18619 not support Thumb. */
18620 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18621
18622 #undef ARM_VARIANT
18623 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18624 #undef THUMB_VARIANT
18625 #define THUMB_VARIANT & arm_ext_v5t
18626
18627 /* Note: blx has 2 variants; the .value coded here is for
18628 BLX(2). Only this variant has conditional execution. */
18629 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18630 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18631
18632 #undef THUMB_VARIANT
18633 #define THUMB_VARIANT & arm_ext_v6t2
18634
18635 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18636 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18637 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18638 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18639 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18640 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18641 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18642 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18643
18644 #undef ARM_VARIANT
18645 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18646 #undef THUMB_VARIANT
18647 #define THUMB_VARIANT & arm_ext_v5exp
18648
18649 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18650 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18651 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18652 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18653
18654 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18655 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18656
18657 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18658 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18659 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18660 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18661
18662 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18663 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18664 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18665 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18666
18667 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18668 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18669
18670 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18671 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18672 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18673 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18674
18675 #undef ARM_VARIANT
18676 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18677 #undef THUMB_VARIANT
18678 #define THUMB_VARIANT & arm_ext_v6t2
18679
18680 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18681 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18682 ldrd, t_ldstd),
18683 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18684 ADDRGLDRS), ldrd, t_ldstd),
18685
18686 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18687 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18688
18689 #undef ARM_VARIANT
18690 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18691
18692 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18693
18694 #undef ARM_VARIANT
18695 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18696 #undef THUMB_VARIANT
18697 #define THUMB_VARIANT & arm_ext_v6
18698
18699 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18700 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18701 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18702 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18703 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18704 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18705 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18706 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18707 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18708 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18709
18710 #undef THUMB_VARIANT
18711 #define THUMB_VARIANT & arm_ext_v6t2
18712
18713 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18714 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18715 strex, t_strex),
18716 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18717 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18718
18719 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18720 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18721
18722 /* ARM V6 not included in V7M. */
18723 #undef THUMB_VARIANT
18724 #define THUMB_VARIANT & arm_ext_v6_notm
18725 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18726 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18727 UF(rfeib, 9900a00, 1, (RRw), rfe),
18728 UF(rfeda, 8100a00, 1, (RRw), rfe),
18729 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18730 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18731 UF(rfefa, 8100a00, 1, (RRw), rfe),
18732 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18733 UF(rfeed, 9900a00, 1, (RRw), rfe),
18734 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18735 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18736 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18737 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18738 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18739 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18740 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18741 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18742 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18743
18744 /* ARM V6 not included in V7M (eg. integer SIMD). */
18745 #undef THUMB_VARIANT
18746 #define THUMB_VARIANT & arm_ext_v6_dsp
18747 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18748 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18749 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18750 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18751 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18752 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18753 /* Old name for QASX. */
18754 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18755 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18756 /* Old name for QSAX. */
18757 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18758 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18759 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18760 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18761 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18762 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18763 /* Old name for SASX. */
18764 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18765 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18766 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18767 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18768 /* Old name for SHASX. */
18769 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18770 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18771 /* Old name for SHSAX. */
18772 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18773 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18774 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18775 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18776 /* Old name for SSAX. */
18777 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18778 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18779 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18780 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18781 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18782 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18783 /* Old name for UASX. */
18784 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18785 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18786 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18787 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18788 /* Old name for UHASX. */
18789 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18790 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18791 /* Old name for UHSAX. */
18792 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18793 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18794 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18795 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18796 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18797 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18798 /* Old name for UQASX. */
18799 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18800 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18801 /* Old name for UQSAX. */
18802 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18803 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18804 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18805 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18806 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18807 /* Old name for USAX. */
18808 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18809 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18810 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18811 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18812 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18813 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18814 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18815 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18816 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18817 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18818 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18819 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18820 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18821 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18822 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18823 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18824 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18825 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18826 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18827 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18828 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18829 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18830 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18831 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18832 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18833 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18834 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18835 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18836 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18837 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18838 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18839 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18840 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18841 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18842
18843 #undef ARM_VARIANT
18844 #define ARM_VARIANT & arm_ext_v6k
18845 #undef THUMB_VARIANT
18846 #define THUMB_VARIANT & arm_ext_v6k
18847
18848 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18849 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18850 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18851 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18852
18853 #undef THUMB_VARIANT
18854 #define THUMB_VARIANT & arm_ext_v6_notm
18855 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18856 ldrexd, t_ldrexd),
18857 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18858 RRnpcb), strexd, t_strexd),
18859
18860 #undef THUMB_VARIANT
18861 #define THUMB_VARIANT & arm_ext_v6t2
18862 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18863 rd_rn, rd_rn),
18864 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18865 rd_rn, rd_rn),
18866 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18867 strex, t_strexbh),
18868 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18869 strex, t_strexbh),
18870 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18871
18872 #undef ARM_VARIANT
18873 #define ARM_VARIANT & arm_ext_sec
18874 #undef THUMB_VARIANT
18875 #define THUMB_VARIANT & arm_ext_sec
18876
18877 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18878
18879 #undef ARM_VARIANT
18880 #define ARM_VARIANT & arm_ext_virt
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_virt
18883
18884 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18885 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18886
18887 #undef ARM_VARIANT
18888 #define ARM_VARIANT & arm_ext_v6t2
18889 #undef THUMB_VARIANT
18890 #define THUMB_VARIANT & arm_ext_v6t2
18891
18892 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18893 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18894 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18895 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18896
18897 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18898 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18899 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18900 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18901
18902 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18903 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18904 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18905 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18906
18907 /* Thumb-only instructions. */
18908 #undef ARM_VARIANT
18909 #define ARM_VARIANT NULL
18910 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18911 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18912
18913 /* ARM does not really have an IT instruction, so always allow it.
18914 The opcode is copied from Thumb in order to allow warnings in
18915 -mimplicit-it=[never | arm] modes. */
18916 #undef ARM_VARIANT
18917 #define ARM_VARIANT & arm_ext_v1
18918
18919 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18920 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18921 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18922 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18923 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18924 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18925 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18926 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18927 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18928 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18929 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18930 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18931 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18932 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18933 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18934 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18935 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18936 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18937
18938 /* Thumb2 only instructions. */
18939 #undef ARM_VARIANT
18940 #define ARM_VARIANT NULL
18941
18942 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18943 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18944 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18945 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18946 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18947 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18948
18949 /* Hardware division instructions. */
18950 #undef ARM_VARIANT
18951 #define ARM_VARIANT & arm_ext_adiv
18952 #undef THUMB_VARIANT
18953 #define THUMB_VARIANT & arm_ext_div
18954
18955 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18956 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18957
18958 /* ARM V6M/V7 instructions. */
18959 #undef ARM_VARIANT
18960 #define ARM_VARIANT & arm_ext_barrier
18961 #undef THUMB_VARIANT
18962 #define THUMB_VARIANT & arm_ext_barrier
18963
18964 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18965 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18966 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18967
18968 /* ARM V7 instructions. */
18969 #undef ARM_VARIANT
18970 #define ARM_VARIANT & arm_ext_v7
18971 #undef THUMB_VARIANT
18972 #define THUMB_VARIANT & arm_ext_v7
18973
18974 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18975 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18976
18977 #undef ARM_VARIANT
18978 #define ARM_VARIANT & arm_ext_mp
18979 #undef THUMB_VARIANT
18980 #define THUMB_VARIANT & arm_ext_mp
18981
18982 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18983
18984 /* AArchv8 instructions. */
18985 #undef ARM_VARIANT
18986 #define ARM_VARIANT & arm_ext_v8
18987 #undef THUMB_VARIANT
18988 #define THUMB_VARIANT & arm_ext_v8
18989
18990 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18991 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18992 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18993 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18994 ldrexd, t_ldrexd),
18995 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18996 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18997 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18998 stlex, t_stlex),
18999 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19000 strexd, t_strexd),
19001 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19002 stlex, t_stlex),
19003 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19004 stlex, t_stlex),
19005 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19006 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19007 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19008 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19009 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19010 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19011
19012 /* ARMv8 T32 only. */
19013 #undef ARM_VARIANT
19014 #define ARM_VARIANT NULL
19015 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19016 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19017 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19018
19019 /* FP for ARMv8. */
19020 #undef ARM_VARIANT
19021 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19022 #undef THUMB_VARIANT
19023 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19024
19025 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19026 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19027 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19028 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19029 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19030 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19031 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19032 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19033 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19034 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19035 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19036 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19037 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19038 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19039 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19040 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19041 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19042
19043 /* Crypto v1 extensions. */
19044 #undef ARM_VARIANT
19045 #define ARM_VARIANT & fpu_crypto_ext_armv8
19046 #undef THUMB_VARIANT
19047 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19048
19049 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19050 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19051 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19052 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19053 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19054 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19055 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19056 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19057 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19058 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19059 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19060 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19061 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19062 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19063
19064 #undef ARM_VARIANT
19065 #define ARM_VARIANT & crc_ext_armv8
19066 #undef THUMB_VARIANT
19067 #define THUMB_VARIANT & crc_ext_armv8
19068 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19069 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19070 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19071 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19072 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19073 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19074
19075 #undef ARM_VARIANT
19076 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19077 #undef THUMB_VARIANT
19078 #define THUMB_VARIANT NULL
19079
19080 cCE("wfs", e200110, 1, (RR), rd),
19081 cCE("rfs", e300110, 1, (RR), rd),
19082 cCE("wfc", e400110, 1, (RR), rd),
19083 cCE("rfc", e500110, 1, (RR), rd),
19084
19085 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19086 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19087 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19088 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19089
19090 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19091 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19092 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19093 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19094
19095 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19096 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19097 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19098 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19099 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19100 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19101 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19102 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19103 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19104 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19105 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19106 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19107
19108 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19109 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19110 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19111 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19112 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19113 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19114 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19115 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19116 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19117 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19118 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19119 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19120
19121 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19122 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19123 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19124 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19125 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19126 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19127 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19128 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19129 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19130 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19131 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19132 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19133
19134 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19135 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19136 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19137 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19138 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19139 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19140 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19141 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19142 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19143 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19144 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19145 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19146
19147 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19148 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19149 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19150 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19151 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19152 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19153 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19154 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19155 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19156 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19157 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19158 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19159
19160 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19161 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19162 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19163 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19164 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19165 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19166 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19167 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19168 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19169 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19170 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19171 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19172
19173 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19174 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19175 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19176 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19177 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19178 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19179 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19180 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19181 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19182 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19183 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19184 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19185
19186 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19187 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19188 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19189 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19190 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19191 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19192 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19193 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19194 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19195 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19196 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19197 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19198
19199 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19200 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19201 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19202 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19203 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19204 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19205 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19206 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19207 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19208 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19209 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19210 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19211
19212 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19213 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19214 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19215 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19216 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19217 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19218 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19219 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19220 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19221 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19222 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19223 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19224
19225 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19226 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19227 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19228 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19229 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19230 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19231 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19232 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19233 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19234 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19235 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19236 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19237
19238 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19239 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19240 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19241 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19242 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19243 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19244 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19245 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19246 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19247 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19248 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19249 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19250
19251 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19252 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19253 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19254 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19255 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19256 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19257 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19258 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19259 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19260 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19261 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19262 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19263
19264 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19265 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19266 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19267 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19268 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19269 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19270 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19271 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19272 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19273 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19274 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19275 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19276
19277 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19278 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19279 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19280 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19281 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19282 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19283 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19284 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19285 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19286 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19287 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19288 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19289
19290 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19291 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19292 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19293 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19294 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19295 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19296 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19297 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19298 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19299 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19300 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19301 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19302
19303 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19310 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19311 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19315
19316 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19323 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19324 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19328
19329 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19336 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19337 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19341
19342 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19349 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19350 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19354
19355 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19362 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19363 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19364 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19365 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19366 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19367
19368 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19369 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19370 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19371 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19372 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19373 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19374 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19375 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19376 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19377 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19378 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19379 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19380
19381 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19382 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19383 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19384 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19385 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19386 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19387 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19388 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19389 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19390 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19391 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19392 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19393
19394 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19395 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19396 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19397 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19398 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19399 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19400 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19401 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19402 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19403 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19404 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19405 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19406
19407 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19408 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19409 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19410 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19411 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19412 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19413 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19414 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19415 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19416 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19417 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19418 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19419
19420 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19421 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19422 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19423 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19424 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19425 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19426 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19427 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19428 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19429 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19430 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19431 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19432
19433 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19434 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19435 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19436 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19437 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19438 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19439 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19440 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19441 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19442 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19443 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19444 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19445
19446 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19447 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19448 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19449 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19450 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19451 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19452 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19453 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19454 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19455 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19456 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19457 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19458
19459 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19460 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19461 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19462 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19463 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19464 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19465 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19466 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19467 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19468 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19469 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19470 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19471
19472 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19473 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19474 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19475 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19476
19477 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19478 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19479 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19480 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19481 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19482 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19483 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19484 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19485 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19486 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19487 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19488 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19489
19490 /* The implementation of the FIX instruction is broken on some
19491 assemblers, in that it accepts a precision specifier as well as a
19492 rounding specifier, despite the fact that this is meaningless.
19493 To be more compatible, we accept it as well, though of course it
19494 does not set any bits. */
19495 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19496 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19497 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19498 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19499 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19500 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19501 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19502 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19503 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19504 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19505 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19506 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19507 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19508
19509 /* Instructions that were new with the real FPA, call them V2. */
19510 #undef ARM_VARIANT
19511 #define ARM_VARIANT & fpu_fpa_ext_v2
19512
19513 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19514 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19515 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19516 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19517 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19518 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19519
19520 #undef ARM_VARIANT
19521 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19522
19523 /* Moves and type conversions. */
19524 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19525 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19526 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19527 cCE("fmstat", ef1fa10, 0, (), noargs),
19528 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19529 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19530 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19531 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19532 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19533 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19534 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19535 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19536 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19537 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19538
19539 /* Memory operations. */
19540 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19541 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19542 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19543 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19544 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19545 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19546 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19547 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19548 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19549 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19550 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19551 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19552 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19553 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19554 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19555 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19556 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19557 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19558
19559 /* Monadic operations. */
19560 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19561 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19562 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19563
19564 /* Dyadic operations. */
19565 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19566 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19567 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19568 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19569 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19570 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19571 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19572 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19573 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19574
19575 /* Comparisons. */
19576 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19577 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19578 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19579 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19580
19581 /* Double precision load/store are still present on single precision
19582 implementations. */
19583 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19584 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19585 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19586 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19587 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19588 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19589 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19590 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19591 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19592 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19593
19594 #undef ARM_VARIANT
19595 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19596
19597 /* Moves and type conversions. */
19598 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19599 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19600 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19601 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19602 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19603 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19604 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19605 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19606 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19607 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19608 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19609 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19610 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19611
19612 /* Monadic operations. */
19613 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19614 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19615 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19616
19617 /* Dyadic operations. */
19618 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19619 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19620 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19621 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19622 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19623 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19624 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19625 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19626 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19627
19628 /* Comparisons. */
19629 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19630 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19631 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19632 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19633
19634 #undef ARM_VARIANT
19635 #define ARM_VARIANT & fpu_vfp_ext_v2
19636
19637 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19638 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19639 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19640 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19641
19642 /* Instructions which may belong to either the Neon or VFP instruction sets.
19643 Individual encoder functions perform additional architecture checks. */
19644 #undef ARM_VARIANT
19645 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19646 #undef THUMB_VARIANT
19647 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19648
19649 /* These mnemonics are unique to VFP. */
19650 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19651 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19652 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19653 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19654 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19655 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19656 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19657 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19658 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19659 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19660
19661 /* Mnemonics shared by Neon and VFP. */
19662 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19663 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19664 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19665
19666 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19667 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19668
19669 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19670 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19671
19672 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19673 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19674 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19675 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19676 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19677 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19678 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19679 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19680
19681 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19682 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19683 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19684 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19685
19686
19687 /* NOTE: All VMOV encoding is special-cased! */
19688 NCE(vmov, 0, 1, (VMOV), neon_mov),
19689 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19690
19691 #undef THUMB_VARIANT
19692 #define THUMB_VARIANT & fpu_neon_ext_v1
19693 #undef ARM_VARIANT
19694 #define ARM_VARIANT & fpu_neon_ext_v1
19695
19696 /* Data processing with three registers of the same length. */
19697 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19698 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19699 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19700 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19701 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19702 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19703 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19704 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19705 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19706 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19707 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19708 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19709 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19710 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19711 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19712 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19713 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19714 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19715 /* If not immediate, fall back to neon_dyadic_i64_su.
19716 shl_imm should accept I8 I16 I32 I64,
19717 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19718 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19719 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19720 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19721 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19722 /* Logic ops, types optional & ignored. */
19723 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19724 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19725 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19726 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19727 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19728 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19729 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19730 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19731 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19732 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19733 /* Bitfield ops, untyped. */
19734 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19735 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19736 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19737 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19738 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19739 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19740 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19741 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19742 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19743 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19744 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19745 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19746 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19747 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19748 back to neon_dyadic_if_su. */
19749 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19750 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19751 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19752 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19753 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19754 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19755 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19756 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19757 /* Comparison. Type I8 I16 I32 F32. */
19758 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19759 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19760 /* As above, D registers only. */
19761 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19762 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19763 /* Int and float variants, signedness unimportant. */
19764 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19765 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19766 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19767 /* Add/sub take types I8 I16 I32 I64 F32. */
19768 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19769 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19770 /* vtst takes sizes 8, 16, 32. */
19771 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19772 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19773 /* VMUL takes I8 I16 I32 F32 P8. */
19774 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19775 /* VQD{R}MULH takes S16 S32. */
19776 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19777 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19778 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19779 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19780 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19781 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19782 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19783 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19784 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19785 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19786 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19787 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19788 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19789 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19790 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19791 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19792
19793 /* Two address, int/float. Types S8 S16 S32 F32. */
19794 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19795 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19796
19797 /* Data processing with two registers and a shift amount. */
19798 /* Right shifts, and variants with rounding.
19799 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19800 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19801 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19802 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19803 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19804 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19805 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19806 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19807 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19808 /* Shift and insert. Sizes accepted 8 16 32 64. */
19809 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19810 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19811 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19812 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19813 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19814 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19815 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19816 /* Right shift immediate, saturating & narrowing, with rounding variants.
19817 Types accepted S16 S32 S64 U16 U32 U64. */
19818 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19819 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19820 /* As above, unsigned. Types accepted S16 S32 S64. */
19821 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19822 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19823 /* Right shift narrowing. Types accepted I16 I32 I64. */
19824 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19825 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19826 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19827 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19828 /* CVT with optional immediate for fixed-point variant. */
19829 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19830
19831 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19832 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19833
19834 /* Data processing, three registers of different lengths. */
19835 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19836 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19837 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19838 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19839 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19840 /* If not scalar, fall back to neon_dyadic_long.
19841 Vector types as above, scalar types S16 S32 U16 U32. */
19842 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19843 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19844 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19845 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19846 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19847 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19848 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19849 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19850 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19851 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19852 /* Saturating doubling multiplies. Types S16 S32. */
19853 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19854 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19855 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19856 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19857 S16 S32 U16 U32. */
19858 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19859
19860 /* Extract. Size 8. */
19861 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19862 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19863
19864 /* Two registers, miscellaneous. */
19865 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19866 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19867 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19868 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19869 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19870 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19871 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19872 /* Vector replicate. Sizes 8 16 32. */
19873 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19874 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19875 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19876 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19877 /* VMOVN. Types I16 I32 I64. */
19878 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19879 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19880 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19881 /* VQMOVUN. Types S16 S32 S64. */
19882 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19883 /* VZIP / VUZP. Sizes 8 16 32. */
19884 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19885 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19886 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19887 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19888 /* VQABS / VQNEG. Types S8 S16 S32. */
19889 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19890 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19891 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19892 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19893 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19894 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19895 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19896 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19897 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19898 /* Reciprocal estimates. Types U32 F32. */
19899 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19900 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19901 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19902 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19903 /* VCLS. Types S8 S16 S32. */
19904 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19905 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19906 /* VCLZ. Types I8 I16 I32. */
19907 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19908 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19909 /* VCNT. Size 8. */
19910 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19911 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19912 /* Two address, untyped. */
19913 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19914 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19915 /* VTRN. Sizes 8 16 32. */
19916 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19917 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19918
19919 /* Table lookup. Size 8. */
19920 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19921 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19922
19923 #undef THUMB_VARIANT
19924 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19925 #undef ARM_VARIANT
19926 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19927
19928 /* Neon element/structure load/store. */
19929 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19930 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19931 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19932 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19933 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19934 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19935 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19936 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19937
19938 #undef THUMB_VARIANT
19939 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19940 #undef ARM_VARIANT
19941 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19942 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19943 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19944 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19945 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19946 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19947 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19948 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19949 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19950 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19951
19952 #undef THUMB_VARIANT
19953 #define THUMB_VARIANT & fpu_vfp_ext_v3
19954 #undef ARM_VARIANT
19955 #define ARM_VARIANT & fpu_vfp_ext_v3
19956
19957 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19958 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19959 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19960 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19961 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19962 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19963 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19964 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19965 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19966
19967 #undef ARM_VARIANT
19968 #define ARM_VARIANT & fpu_vfp_ext_fma
19969 #undef THUMB_VARIANT
19970 #define THUMB_VARIANT & fpu_vfp_ext_fma
19971 /* Mnemonics shared by Neon and VFP. These are included in the
19972 VFP FMA variant; NEON and VFP FMA always includes the NEON
19973 FMA instructions. */
19974 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19975 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19976 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19977 the v form should always be used. */
19978 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19979 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19980 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19981 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19982 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19983 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19984
19985 #undef THUMB_VARIANT
19986 #undef ARM_VARIANT
19987 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19988
19989 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19990 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19991 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19992 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19993 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19994 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19995 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19996 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19997
19998 #undef ARM_VARIANT
19999 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20000
20001 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20002 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20003 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20004 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20005 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20006 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20007 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20008 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20009 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20010 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20011 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20012 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20013 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20014 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20015 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20016 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20017 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20018 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20019 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20020 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20021 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20022 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20023 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20024 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20025 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20026 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20027 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20028 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20029 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20030 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20031 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20032 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20033 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20034 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20035 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20036 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20037 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20038 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20039 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20040 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20041 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20042 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20043 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20044 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20045 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20046 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20047 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20048 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20049 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20050 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20051 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20052 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20053 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20054 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20055 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20056 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20057 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20058 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20059 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20060 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20061 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20062 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20063 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20064 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20065 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20066 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20067 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20068 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20069 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20070 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20071 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20079 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20080 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20081 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20082 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20083 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20084 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20085 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20088 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20089 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20090 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20091 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20092 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20093 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20097 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20098 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20099 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20100 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20101 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20102 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20103 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20104 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20105 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20106 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20107 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20108 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20109 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20110 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20111 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20112 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20113 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20114 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20115 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20116 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20117 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20118 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20119 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20120 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20121 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20122 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20123 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20124 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20125 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20126 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20127 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20128 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20129 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20130 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20131 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20132 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20133 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20134 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20135 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20136 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20137 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20138 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20139 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20140 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20141 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20142 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20143 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20144 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20145 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20146 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20147 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20148 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20149 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20150 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20151 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20152 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20153 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20154 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20155 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20156 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20157 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20158 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20159 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20160 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20161 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20162 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20163
20164 #undef ARM_VARIANT
20165 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20166
20167 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20168 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20169 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20170 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20171 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20172 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20173 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20174 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20175 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20176 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20177 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20178 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20179 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20180 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20181 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20182 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20183 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20184 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20185 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20186 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20187 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20188 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20189 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20190 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20191 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20192 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20193 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20194 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20195 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20196 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20197 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20198 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20199 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20200 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20201 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20202 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20203 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20204 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20205 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20206 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20207 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20208 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20209 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20210 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20211 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20212 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20213 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20214 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20215 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20216 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20217 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20218 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20219 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20220 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20221 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20222 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20223 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20224
20225 #undef ARM_VARIANT
20226 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20227
20228 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20229 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20230 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20231 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20232 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20233 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20234 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20235 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20236 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20237 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20238 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20239 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20240 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20241 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20242 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20243 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20244 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20245 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20246 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20247 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20248 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20249 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20250 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20251 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20252 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20253 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20254 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20255 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20256 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20257 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20258 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20259 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20260 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20261 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20262 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20263 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20264 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20265 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20266 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20267 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20268 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20269 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20270 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20271 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20272 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20273 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20274 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20275 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20276 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20277 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20278 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20279 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20280 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20281 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20282 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20283 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20284 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20285 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20286 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20287 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20288 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20289 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20290 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20291 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20292 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20293 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20294 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20295 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20296 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20297 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20298 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20299 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20300 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20301 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20302 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20303 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20304 };
20305 #undef ARM_VARIANT
20306 #undef THUMB_VARIANT
20307 #undef TCE
20308 #undef TUE
20309 #undef TUF
20310 #undef TCC
20311 #undef cCE
20312 #undef cCL
20313 #undef C3E
20314 #undef CE
20315 #undef CM
20316 #undef UE
20317 #undef UF
20318 #undef UT
20319 #undef NUF
20320 #undef nUF
20321 #undef NCE
20322 #undef nCE
20323 #undef OPS0
20324 #undef OPS1
20325 #undef OPS2
20326 #undef OPS3
20327 #undef OPS4
20328 #undef OPS5
20329 #undef OPS6
20330 #undef do_0
20331 \f
20332 /* MD interface: bits in the object file. */
20333
20334 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20335 for use in the a.out file, and stores them in the array pointed to by buf.
20336 This knows about the endian-ness of the target machine and does
20337 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20338 2 (short) and 4 (long) Floating numbers are put out as a series of
20339 LITTLENUMS (shorts, here at least). */
20340
20341 void
20342 md_number_to_chars (char * buf, valueT val, int n)
20343 {
20344 if (target_big_endian)
20345 number_to_chars_bigendian (buf, val, n);
20346 else
20347 number_to_chars_littleendian (buf, val, n);
20348 }
20349
20350 static valueT
20351 md_chars_to_number (char * buf, int n)
20352 {
20353 valueT result = 0;
20354 unsigned char * where = (unsigned char *) buf;
20355
20356 if (target_big_endian)
20357 {
20358 while (n--)
20359 {
20360 result <<= 8;
20361 result |= (*where++ & 255);
20362 }
20363 }
20364 else
20365 {
20366 while (n--)
20367 {
20368 result <<= 8;
20369 result |= (where[n] & 255);
20370 }
20371 }
20372
20373 return result;
20374 }
20375
20376 /* MD interface: Sections. */
20377
20378 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20379 that an rs_machine_dependent frag may reach. */
20380
20381 unsigned int
20382 arm_frag_max_var (fragS *fragp)
20383 {
20384 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20385 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20386
20387 Note that we generate relaxable instructions even for cases that don't
20388 really need it, like an immediate that's a trivial constant. So we're
20389 overestimating the instruction size for some of those cases. Rather
20390 than putting more intelligence here, it would probably be better to
20391 avoid generating a relaxation frag in the first place when it can be
20392 determined up front that a short instruction will suffice. */
20393
20394 gas_assert (fragp->fr_type == rs_machine_dependent);
20395 return INSN_SIZE;
20396 }
20397
20398 /* Estimate the size of a frag before relaxing. Assume everything fits in
20399 2 bytes. */
20400
20401 int
20402 md_estimate_size_before_relax (fragS * fragp,
20403 segT segtype ATTRIBUTE_UNUSED)
20404 {
20405 fragp->fr_var = 2;
20406 return 2;
20407 }
20408
20409 /* Convert a machine dependent frag. */
20410
20411 void
20412 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20413 {
20414 unsigned long insn;
20415 unsigned long old_op;
20416 char *buf;
20417 expressionS exp;
20418 fixS *fixp;
20419 int reloc_type;
20420 int pc_rel;
20421 int opcode;
20422
20423 buf = fragp->fr_literal + fragp->fr_fix;
20424
20425 old_op = bfd_get_16(abfd, buf);
20426 if (fragp->fr_symbol)
20427 {
20428 exp.X_op = O_symbol;
20429 exp.X_add_symbol = fragp->fr_symbol;
20430 }
20431 else
20432 {
20433 exp.X_op = O_constant;
20434 }
20435 exp.X_add_number = fragp->fr_offset;
20436 opcode = fragp->fr_subtype;
20437 switch (opcode)
20438 {
20439 case T_MNEM_ldr_pc:
20440 case T_MNEM_ldr_pc2:
20441 case T_MNEM_ldr_sp:
20442 case T_MNEM_str_sp:
20443 case T_MNEM_ldr:
20444 case T_MNEM_ldrb:
20445 case T_MNEM_ldrh:
20446 case T_MNEM_str:
20447 case T_MNEM_strb:
20448 case T_MNEM_strh:
20449 if (fragp->fr_var == 4)
20450 {
20451 insn = THUMB_OP32 (opcode);
20452 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20453 {
20454 insn |= (old_op & 0x700) << 4;
20455 }
20456 else
20457 {
20458 insn |= (old_op & 7) << 12;
20459 insn |= (old_op & 0x38) << 13;
20460 }
20461 insn |= 0x00000c00;
20462 put_thumb32_insn (buf, insn);
20463 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20464 }
20465 else
20466 {
20467 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20468 }
20469 pc_rel = (opcode == T_MNEM_ldr_pc2);
20470 break;
20471 case T_MNEM_adr:
20472 if (fragp->fr_var == 4)
20473 {
20474 insn = THUMB_OP32 (opcode);
20475 insn |= (old_op & 0xf0) << 4;
20476 put_thumb32_insn (buf, insn);
20477 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20478 }
20479 else
20480 {
20481 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20482 exp.X_add_number -= 4;
20483 }
20484 pc_rel = 1;
20485 break;
20486 case T_MNEM_mov:
20487 case T_MNEM_movs:
20488 case T_MNEM_cmp:
20489 case T_MNEM_cmn:
20490 if (fragp->fr_var == 4)
20491 {
20492 int r0off = (opcode == T_MNEM_mov
20493 || opcode == T_MNEM_movs) ? 0 : 8;
20494 insn = THUMB_OP32 (opcode);
20495 insn = (insn & 0xe1ffffff) | 0x10000000;
20496 insn |= (old_op & 0x700) << r0off;
20497 put_thumb32_insn (buf, insn);
20498 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20499 }
20500 else
20501 {
20502 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20503 }
20504 pc_rel = 0;
20505 break;
20506 case T_MNEM_b:
20507 if (fragp->fr_var == 4)
20508 {
20509 insn = THUMB_OP32(opcode);
20510 put_thumb32_insn (buf, insn);
20511 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20512 }
20513 else
20514 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20515 pc_rel = 1;
20516 break;
20517 case T_MNEM_bcond:
20518 if (fragp->fr_var == 4)
20519 {
20520 insn = THUMB_OP32(opcode);
20521 insn |= (old_op & 0xf00) << 14;
20522 put_thumb32_insn (buf, insn);
20523 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20524 }
20525 else
20526 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20527 pc_rel = 1;
20528 break;
20529 case T_MNEM_add_sp:
20530 case T_MNEM_add_pc:
20531 case T_MNEM_inc_sp:
20532 case T_MNEM_dec_sp:
20533 if (fragp->fr_var == 4)
20534 {
20535 /* ??? Choose between add and addw. */
20536 insn = THUMB_OP32 (opcode);
20537 insn |= (old_op & 0xf0) << 4;
20538 put_thumb32_insn (buf, insn);
20539 if (opcode == T_MNEM_add_pc)
20540 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20541 else
20542 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20543 }
20544 else
20545 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20546 pc_rel = 0;
20547 break;
20548
20549 case T_MNEM_addi:
20550 case T_MNEM_addis:
20551 case T_MNEM_subi:
20552 case T_MNEM_subis:
20553 if (fragp->fr_var == 4)
20554 {
20555 insn = THUMB_OP32 (opcode);
20556 insn |= (old_op & 0xf0) << 4;
20557 insn |= (old_op & 0xf) << 16;
20558 put_thumb32_insn (buf, insn);
20559 if (insn & (1 << 20))
20560 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20561 else
20562 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20563 }
20564 else
20565 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20566 pc_rel = 0;
20567 break;
20568 default:
20569 abort ();
20570 }
20571 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20572 (enum bfd_reloc_code_real) reloc_type);
20573 fixp->fx_file = fragp->fr_file;
20574 fixp->fx_line = fragp->fr_line;
20575 fragp->fr_fix += fragp->fr_var;
20576
20577 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20578 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20579 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20580 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20581 }
20582
20583 /* Return the size of a relaxable immediate operand instruction.
20584 SHIFT and SIZE specify the form of the allowable immediate. */
20585 static int
20586 relax_immediate (fragS *fragp, int size, int shift)
20587 {
20588 offsetT offset;
20589 offsetT mask;
20590 offsetT low;
20591
20592 /* ??? Should be able to do better than this. */
20593 if (fragp->fr_symbol)
20594 return 4;
20595
20596 low = (1 << shift) - 1;
20597 mask = (1 << (shift + size)) - (1 << shift);
20598 offset = fragp->fr_offset;
20599 /* Force misaligned offsets to 32-bit variant. */
20600 if (offset & low)
20601 return 4;
20602 if (offset & ~mask)
20603 return 4;
20604 return 2;
20605 }
20606
20607 /* Get the address of a symbol during relaxation. */
20608 static addressT
20609 relaxed_symbol_addr (fragS *fragp, long stretch)
20610 {
20611 fragS *sym_frag;
20612 addressT addr;
20613 symbolS *sym;
20614
20615 sym = fragp->fr_symbol;
20616 sym_frag = symbol_get_frag (sym);
20617 know (S_GET_SEGMENT (sym) != absolute_section
20618 || sym_frag == &zero_address_frag);
20619 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20620
20621 /* If frag has yet to be reached on this pass, assume it will
20622 move by STRETCH just as we did. If this is not so, it will
20623 be because some frag between grows, and that will force
20624 another pass. */
20625
20626 if (stretch != 0
20627 && sym_frag->relax_marker != fragp->relax_marker)
20628 {
20629 fragS *f;
20630
20631 /* Adjust stretch for any alignment frag. Note that if have
20632 been expanding the earlier code, the symbol may be
20633 defined in what appears to be an earlier frag. FIXME:
20634 This doesn't handle the fr_subtype field, which specifies
20635 a maximum number of bytes to skip when doing an
20636 alignment. */
20637 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20638 {
20639 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20640 {
20641 if (stretch < 0)
20642 stretch = - ((- stretch)
20643 & ~ ((1 << (int) f->fr_offset) - 1));
20644 else
20645 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20646 if (stretch == 0)
20647 break;
20648 }
20649 }
20650 if (f != NULL)
20651 addr += stretch;
20652 }
20653
20654 return addr;
20655 }
20656
20657 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20658 load. */
20659 static int
20660 relax_adr (fragS *fragp, asection *sec, long stretch)
20661 {
20662 addressT addr;
20663 offsetT val;
20664
20665 /* Assume worst case for symbols not known to be in the same section. */
20666 if (fragp->fr_symbol == NULL
20667 || !S_IS_DEFINED (fragp->fr_symbol)
20668 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20669 || S_IS_WEAK (fragp->fr_symbol))
20670 return 4;
20671
20672 val = relaxed_symbol_addr (fragp, stretch);
20673 addr = fragp->fr_address + fragp->fr_fix;
20674 addr = (addr + 4) & ~3;
20675 /* Force misaligned targets to 32-bit variant. */
20676 if (val & 3)
20677 return 4;
20678 val -= addr;
20679 if (val < 0 || val > 1020)
20680 return 4;
20681 return 2;
20682 }
20683
20684 /* Return the size of a relaxable add/sub immediate instruction. */
20685 static int
20686 relax_addsub (fragS *fragp, asection *sec)
20687 {
20688 char *buf;
20689 int op;
20690
20691 buf = fragp->fr_literal + fragp->fr_fix;
20692 op = bfd_get_16(sec->owner, buf);
20693 if ((op & 0xf) == ((op >> 4) & 0xf))
20694 return relax_immediate (fragp, 8, 0);
20695 else
20696 return relax_immediate (fragp, 3, 0);
20697 }
20698
20699 /* Return TRUE iff the definition of symbol S could be pre-empted
20700 (overridden) at link or load time. */
20701 static bfd_boolean
20702 symbol_preemptible (symbolS *s)
20703 {
20704 /* Weak symbols can always be pre-empted. */
20705 if (S_IS_WEAK (s))
20706 return TRUE;
20707
20708 /* Non-global symbols cannot be pre-empted. */
20709 if (! S_IS_EXTERNAL (s))
20710 return FALSE;
20711
20712 #ifdef OBJ_ELF
20713 /* In ELF, a global symbol can be marked protected, or private. In that
20714 case it can't be pre-empted (other definitions in the same link unit
20715 would violate the ODR). */
20716 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20717 return FALSE;
20718 #endif
20719
20720 /* Other global symbols might be pre-empted. */
20721 return TRUE;
20722 }
20723
20724 /* Return the size of a relaxable branch instruction. BITS is the
20725 size of the offset field in the narrow instruction. */
20726
20727 static int
20728 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20729 {
20730 addressT addr;
20731 offsetT val;
20732 offsetT limit;
20733
20734 /* Assume worst case for symbols not known to be in the same section. */
20735 if (!S_IS_DEFINED (fragp->fr_symbol)
20736 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20737 || S_IS_WEAK (fragp->fr_symbol))
20738 return 4;
20739
20740 #ifdef OBJ_ELF
20741 /* A branch to a function in ARM state will require interworking. */
20742 if (S_IS_DEFINED (fragp->fr_symbol)
20743 && ARM_IS_FUNC (fragp->fr_symbol))
20744 return 4;
20745 #endif
20746
20747 if (symbol_preemptible (fragp->fr_symbol))
20748 return 4;
20749
20750 val = relaxed_symbol_addr (fragp, stretch);
20751 addr = fragp->fr_address + fragp->fr_fix + 4;
20752 val -= addr;
20753
20754 /* Offset is a signed value *2 */
20755 limit = 1 << bits;
20756 if (val >= limit || val < -limit)
20757 return 4;
20758 return 2;
20759 }
20760
20761
20762 /* Relax a machine dependent frag. This returns the amount by which
20763 the current size of the frag should change. */
20764
20765 int
20766 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20767 {
20768 int oldsize;
20769 int newsize;
20770
20771 oldsize = fragp->fr_var;
20772 switch (fragp->fr_subtype)
20773 {
20774 case T_MNEM_ldr_pc2:
20775 newsize = relax_adr (fragp, sec, stretch);
20776 break;
20777 case T_MNEM_ldr_pc:
20778 case T_MNEM_ldr_sp:
20779 case T_MNEM_str_sp:
20780 newsize = relax_immediate (fragp, 8, 2);
20781 break;
20782 case T_MNEM_ldr:
20783 case T_MNEM_str:
20784 newsize = relax_immediate (fragp, 5, 2);
20785 break;
20786 case T_MNEM_ldrh:
20787 case T_MNEM_strh:
20788 newsize = relax_immediate (fragp, 5, 1);
20789 break;
20790 case T_MNEM_ldrb:
20791 case T_MNEM_strb:
20792 newsize = relax_immediate (fragp, 5, 0);
20793 break;
20794 case T_MNEM_adr:
20795 newsize = relax_adr (fragp, sec, stretch);
20796 break;
20797 case T_MNEM_mov:
20798 case T_MNEM_movs:
20799 case T_MNEM_cmp:
20800 case T_MNEM_cmn:
20801 newsize = relax_immediate (fragp, 8, 0);
20802 break;
20803 case T_MNEM_b:
20804 newsize = relax_branch (fragp, sec, 11, stretch);
20805 break;
20806 case T_MNEM_bcond:
20807 newsize = relax_branch (fragp, sec, 8, stretch);
20808 break;
20809 case T_MNEM_add_sp:
20810 case T_MNEM_add_pc:
20811 newsize = relax_immediate (fragp, 8, 2);
20812 break;
20813 case T_MNEM_inc_sp:
20814 case T_MNEM_dec_sp:
20815 newsize = relax_immediate (fragp, 7, 2);
20816 break;
20817 case T_MNEM_addi:
20818 case T_MNEM_addis:
20819 case T_MNEM_subi:
20820 case T_MNEM_subis:
20821 newsize = relax_addsub (fragp, sec);
20822 break;
20823 default:
20824 abort ();
20825 }
20826
20827 fragp->fr_var = newsize;
20828 /* Freeze wide instructions that are at or before the same location as
20829 in the previous pass. This avoids infinite loops.
20830 Don't freeze them unconditionally because targets may be artificially
20831 misaligned by the expansion of preceding frags. */
20832 if (stretch <= 0 && newsize > 2)
20833 {
20834 md_convert_frag (sec->owner, sec, fragp);
20835 frag_wane (fragp);
20836 }
20837
20838 return newsize - oldsize;
20839 }
20840
20841 /* Round up a section size to the appropriate boundary. */
20842
20843 valueT
20844 md_section_align (segT segment ATTRIBUTE_UNUSED,
20845 valueT size)
20846 {
20847 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20848 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20849 {
20850 /* For a.out, force the section size to be aligned. If we don't do
20851 this, BFD will align it for us, but it will not write out the
20852 final bytes of the section. This may be a bug in BFD, but it is
20853 easier to fix it here since that is how the other a.out targets
20854 work. */
20855 int align;
20856
20857 align = bfd_get_section_alignment (stdoutput, segment);
20858 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20859 }
20860 #endif
20861
20862 return size;
20863 }
20864
20865 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20866 of an rs_align_code fragment. */
20867
20868 void
20869 arm_handle_align (fragS * fragP)
20870 {
20871 static char const arm_noop[2][2][4] =
20872 {
20873 { /* ARMv1 */
20874 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20875 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20876 },
20877 { /* ARMv6k */
20878 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20879 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20880 },
20881 };
20882 static char const thumb_noop[2][2][2] =
20883 {
20884 { /* Thumb-1 */
20885 {0xc0, 0x46}, /* LE */
20886 {0x46, 0xc0}, /* BE */
20887 },
20888 { /* Thumb-2 */
20889 {0x00, 0xbf}, /* LE */
20890 {0xbf, 0x00} /* BE */
20891 }
20892 };
20893 static char const wide_thumb_noop[2][4] =
20894 { /* Wide Thumb-2 */
20895 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20896 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20897 };
20898
20899 unsigned bytes, fix, noop_size;
20900 char * p;
20901 const char * noop;
20902 const char *narrow_noop = NULL;
20903 #ifdef OBJ_ELF
20904 enum mstate state;
20905 #endif
20906
20907 if (fragP->fr_type != rs_align_code)
20908 return;
20909
20910 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20911 p = fragP->fr_literal + fragP->fr_fix;
20912 fix = 0;
20913
20914 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20915 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20916
20917 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20918
20919 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20920 {
20921 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
20922 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
20923 {
20924 narrow_noop = thumb_noop[1][target_big_endian];
20925 noop = wide_thumb_noop[target_big_endian];
20926 }
20927 else
20928 noop = thumb_noop[0][target_big_endian];
20929 noop_size = 2;
20930 #ifdef OBJ_ELF
20931 state = MAP_THUMB;
20932 #endif
20933 }
20934 else
20935 {
20936 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
20937 ? selected_cpu : arm_arch_none,
20938 arm_ext_v6k) != 0]
20939 [target_big_endian];
20940 noop_size = 4;
20941 #ifdef OBJ_ELF
20942 state = MAP_ARM;
20943 #endif
20944 }
20945
20946 fragP->fr_var = noop_size;
20947
20948 if (bytes & (noop_size - 1))
20949 {
20950 fix = bytes & (noop_size - 1);
20951 #ifdef OBJ_ELF
20952 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20953 #endif
20954 memset (p, 0, fix);
20955 p += fix;
20956 bytes -= fix;
20957 }
20958
20959 if (narrow_noop)
20960 {
20961 if (bytes & noop_size)
20962 {
20963 /* Insert a narrow noop. */
20964 memcpy (p, narrow_noop, noop_size);
20965 p += noop_size;
20966 bytes -= noop_size;
20967 fix += noop_size;
20968 }
20969
20970 /* Use wide noops for the remainder */
20971 noop_size = 4;
20972 }
20973
20974 while (bytes >= noop_size)
20975 {
20976 memcpy (p, noop, noop_size);
20977 p += noop_size;
20978 bytes -= noop_size;
20979 fix += noop_size;
20980 }
20981
20982 fragP->fr_fix += fix;
20983 }
20984
20985 /* Called from md_do_align. Used to create an alignment
20986 frag in a code section. */
20987
20988 void
20989 arm_frag_align_code (int n, int max)
20990 {
20991 char * p;
20992
20993 /* We assume that there will never be a requirement
20994 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20995 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20996 {
20997 char err_msg[128];
20998
20999 sprintf (err_msg,
21000 _("alignments greater than %d bytes not supported in .text sections."),
21001 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21002 as_fatal ("%s", err_msg);
21003 }
21004
21005 p = frag_var (rs_align_code,
21006 MAX_MEM_FOR_RS_ALIGN_CODE,
21007 1,
21008 (relax_substateT) max,
21009 (symbolS *) NULL,
21010 (offsetT) n,
21011 (char *) NULL);
21012 *p = 0;
21013 }
21014
21015 /* Perform target specific initialisation of a frag.
21016 Note - despite the name this initialisation is not done when the frag
21017 is created, but only when its type is assigned. A frag can be created
21018 and used a long time before its type is set, so beware of assuming that
21019 this initialisationis performed first. */
21020
21021 #ifndef OBJ_ELF
21022 void
21023 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21024 {
21025 /* Record whether this frag is in an ARM or a THUMB area. */
21026 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21027 }
21028
21029 #else /* OBJ_ELF is defined. */
21030 void
21031 arm_init_frag (fragS * fragP, int max_chars)
21032 {
21033 /* If the current ARM vs THUMB mode has not already
21034 been recorded into this frag then do so now. */
21035 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21036 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21037
21038 /* Record a mapping symbol for alignment frags. We will delete this
21039 later if the alignment ends up empty. */
21040 switch (fragP->fr_type)
21041 {
21042 case rs_align:
21043 case rs_align_test:
21044 case rs_fill:
21045 mapping_state_2 (MAP_DATA, max_chars);
21046 break;
21047 case rs_align_code:
21048 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21049 break;
21050 default:
21051 break;
21052 }
21053 }
21054
21055 /* When we change sections we need to issue a new mapping symbol. */
21056
21057 void
21058 arm_elf_change_section (void)
21059 {
21060 /* Link an unlinked unwind index table section to the .text section. */
21061 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21062 && elf_linked_to_section (now_seg) == NULL)
21063 elf_linked_to_section (now_seg) = text_section;
21064 }
21065
21066 int
21067 arm_elf_section_type (const char * str, size_t len)
21068 {
21069 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21070 return SHT_ARM_EXIDX;
21071
21072 return -1;
21073 }
21074 \f
21075 /* Code to deal with unwinding tables. */
21076
21077 static void add_unwind_adjustsp (offsetT);
21078
21079 /* Generate any deferred unwind frame offset. */
21080
21081 static void
21082 flush_pending_unwind (void)
21083 {
21084 offsetT offset;
21085
21086 offset = unwind.pending_offset;
21087 unwind.pending_offset = 0;
21088 if (offset != 0)
21089 add_unwind_adjustsp (offset);
21090 }
21091
21092 /* Add an opcode to this list for this function. Two-byte opcodes should
21093 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21094 order. */
21095
21096 static void
21097 add_unwind_opcode (valueT op, int length)
21098 {
21099 /* Add any deferred stack adjustment. */
21100 if (unwind.pending_offset)
21101 flush_pending_unwind ();
21102
21103 unwind.sp_restored = 0;
21104
21105 if (unwind.opcode_count + length > unwind.opcode_alloc)
21106 {
21107 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21108 if (unwind.opcodes)
21109 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21110 unwind.opcode_alloc);
21111 else
21112 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21113 }
21114 while (length > 0)
21115 {
21116 length--;
21117 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21118 op >>= 8;
21119 unwind.opcode_count++;
21120 }
21121 }
21122
21123 /* Add unwind opcodes to adjust the stack pointer. */
21124
21125 static void
21126 add_unwind_adjustsp (offsetT offset)
21127 {
21128 valueT op;
21129
21130 if (offset > 0x200)
21131 {
21132 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21133 char bytes[5];
21134 int n;
21135 valueT o;
21136
21137 /* Long form: 0xb2, uleb128. */
21138 /* This might not fit in a word so add the individual bytes,
21139 remembering the list is built in reverse order. */
21140 o = (valueT) ((offset - 0x204) >> 2);
21141 if (o == 0)
21142 add_unwind_opcode (0, 1);
21143
21144 /* Calculate the uleb128 encoding of the offset. */
21145 n = 0;
21146 while (o)
21147 {
21148 bytes[n] = o & 0x7f;
21149 o >>= 7;
21150 if (o)
21151 bytes[n] |= 0x80;
21152 n++;
21153 }
21154 /* Add the insn. */
21155 for (; n; n--)
21156 add_unwind_opcode (bytes[n - 1], 1);
21157 add_unwind_opcode (0xb2, 1);
21158 }
21159 else if (offset > 0x100)
21160 {
21161 /* Two short opcodes. */
21162 add_unwind_opcode (0x3f, 1);
21163 op = (offset - 0x104) >> 2;
21164 add_unwind_opcode (op, 1);
21165 }
21166 else if (offset > 0)
21167 {
21168 /* Short opcode. */
21169 op = (offset - 4) >> 2;
21170 add_unwind_opcode (op, 1);
21171 }
21172 else if (offset < 0)
21173 {
21174 offset = -offset;
21175 while (offset > 0x100)
21176 {
21177 add_unwind_opcode (0x7f, 1);
21178 offset -= 0x100;
21179 }
21180 op = ((offset - 4) >> 2) | 0x40;
21181 add_unwind_opcode (op, 1);
21182 }
21183 }
21184
21185 /* Finish the list of unwind opcodes for this function. */
21186 static void
21187 finish_unwind_opcodes (void)
21188 {
21189 valueT op;
21190
21191 if (unwind.fp_used)
21192 {
21193 /* Adjust sp as necessary. */
21194 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21195 flush_pending_unwind ();
21196
21197 /* After restoring sp from the frame pointer. */
21198 op = 0x90 | unwind.fp_reg;
21199 add_unwind_opcode (op, 1);
21200 }
21201 else
21202 flush_pending_unwind ();
21203 }
21204
21205
21206 /* Start an exception table entry. If idx is nonzero this is an index table
21207 entry. */
21208
21209 static void
21210 start_unwind_section (const segT text_seg, int idx)
21211 {
21212 const char * text_name;
21213 const char * prefix;
21214 const char * prefix_once;
21215 const char * group_name;
21216 size_t prefix_len;
21217 size_t text_len;
21218 char * sec_name;
21219 size_t sec_name_len;
21220 int type;
21221 int flags;
21222 int linkonce;
21223
21224 if (idx)
21225 {
21226 prefix = ELF_STRING_ARM_unwind;
21227 prefix_once = ELF_STRING_ARM_unwind_once;
21228 type = SHT_ARM_EXIDX;
21229 }
21230 else
21231 {
21232 prefix = ELF_STRING_ARM_unwind_info;
21233 prefix_once = ELF_STRING_ARM_unwind_info_once;
21234 type = SHT_PROGBITS;
21235 }
21236
21237 text_name = segment_name (text_seg);
21238 if (streq (text_name, ".text"))
21239 text_name = "";
21240
21241 if (strncmp (text_name, ".gnu.linkonce.t.",
21242 strlen (".gnu.linkonce.t.")) == 0)
21243 {
21244 prefix = prefix_once;
21245 text_name += strlen (".gnu.linkonce.t.");
21246 }
21247
21248 prefix_len = strlen (prefix);
21249 text_len = strlen (text_name);
21250 sec_name_len = prefix_len + text_len;
21251 sec_name = (char *) xmalloc (sec_name_len + 1);
21252 memcpy (sec_name, prefix, prefix_len);
21253 memcpy (sec_name + prefix_len, text_name, text_len);
21254 sec_name[prefix_len + text_len] = '\0';
21255
21256 flags = SHF_ALLOC;
21257 linkonce = 0;
21258 group_name = 0;
21259
21260 /* Handle COMDAT group. */
21261 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21262 {
21263 group_name = elf_group_name (text_seg);
21264 if (group_name == NULL)
21265 {
21266 as_bad (_("Group section `%s' has no group signature"),
21267 segment_name (text_seg));
21268 ignore_rest_of_line ();
21269 return;
21270 }
21271 flags |= SHF_GROUP;
21272 linkonce = 1;
21273 }
21274
21275 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21276
21277 /* Set the section link for index tables. */
21278 if (idx)
21279 elf_linked_to_section (now_seg) = text_seg;
21280 }
21281
21282
21283 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21284 personality routine data. Returns zero, or the index table value for
21285 an inline entry. */
21286
21287 static valueT
21288 create_unwind_entry (int have_data)
21289 {
21290 int size;
21291 addressT where;
21292 char *ptr;
21293 /* The current word of data. */
21294 valueT data;
21295 /* The number of bytes left in this word. */
21296 int n;
21297
21298 finish_unwind_opcodes ();
21299
21300 /* Remember the current text section. */
21301 unwind.saved_seg = now_seg;
21302 unwind.saved_subseg = now_subseg;
21303
21304 start_unwind_section (now_seg, 0);
21305
21306 if (unwind.personality_routine == NULL)
21307 {
21308 if (unwind.personality_index == -2)
21309 {
21310 if (have_data)
21311 as_bad (_("handlerdata in cantunwind frame"));
21312 return 1; /* EXIDX_CANTUNWIND. */
21313 }
21314
21315 /* Use a default personality routine if none is specified. */
21316 if (unwind.personality_index == -1)
21317 {
21318 if (unwind.opcode_count > 3)
21319 unwind.personality_index = 1;
21320 else
21321 unwind.personality_index = 0;
21322 }
21323
21324 /* Space for the personality routine entry. */
21325 if (unwind.personality_index == 0)
21326 {
21327 if (unwind.opcode_count > 3)
21328 as_bad (_("too many unwind opcodes for personality routine 0"));
21329
21330 if (!have_data)
21331 {
21332 /* All the data is inline in the index table. */
21333 data = 0x80;
21334 n = 3;
21335 while (unwind.opcode_count > 0)
21336 {
21337 unwind.opcode_count--;
21338 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21339 n--;
21340 }
21341
21342 /* Pad with "finish" opcodes. */
21343 while (n--)
21344 data = (data << 8) | 0xb0;
21345
21346 return data;
21347 }
21348 size = 0;
21349 }
21350 else
21351 /* We get two opcodes "free" in the first word. */
21352 size = unwind.opcode_count - 2;
21353 }
21354 else
21355 {
21356 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21357 if (unwind.personality_index != -1)
21358 {
21359 as_bad (_("attempt to recreate an unwind entry"));
21360 return 1;
21361 }
21362
21363 /* An extra byte is required for the opcode count. */
21364 size = unwind.opcode_count + 1;
21365 }
21366
21367 size = (size + 3) >> 2;
21368 if (size > 0xff)
21369 as_bad (_("too many unwind opcodes"));
21370
21371 frag_align (2, 0, 0);
21372 record_alignment (now_seg, 2);
21373 unwind.table_entry = expr_build_dot ();
21374
21375 /* Allocate the table entry. */
21376 ptr = frag_more ((size << 2) + 4);
21377 /* PR 13449: Zero the table entries in case some of them are not used. */
21378 memset (ptr, 0, (size << 2) + 4);
21379 where = frag_now_fix () - ((size << 2) + 4);
21380
21381 switch (unwind.personality_index)
21382 {
21383 case -1:
21384 /* ??? Should this be a PLT generating relocation? */
21385 /* Custom personality routine. */
21386 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21387 BFD_RELOC_ARM_PREL31);
21388
21389 where += 4;
21390 ptr += 4;
21391
21392 /* Set the first byte to the number of additional words. */
21393 data = size > 0 ? size - 1 : 0;
21394 n = 3;
21395 break;
21396
21397 /* ABI defined personality routines. */
21398 case 0:
21399 /* Three opcodes bytes are packed into the first word. */
21400 data = 0x80;
21401 n = 3;
21402 break;
21403
21404 case 1:
21405 case 2:
21406 /* The size and first two opcode bytes go in the first word. */
21407 data = ((0x80 + unwind.personality_index) << 8) | size;
21408 n = 2;
21409 break;
21410
21411 default:
21412 /* Should never happen. */
21413 abort ();
21414 }
21415
21416 /* Pack the opcodes into words (MSB first), reversing the list at the same
21417 time. */
21418 while (unwind.opcode_count > 0)
21419 {
21420 if (n == 0)
21421 {
21422 md_number_to_chars (ptr, data, 4);
21423 ptr += 4;
21424 n = 4;
21425 data = 0;
21426 }
21427 unwind.opcode_count--;
21428 n--;
21429 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21430 }
21431
21432 /* Finish off the last word. */
21433 if (n < 4)
21434 {
21435 /* Pad with "finish" opcodes. */
21436 while (n--)
21437 data = (data << 8) | 0xb0;
21438
21439 md_number_to_chars (ptr, data, 4);
21440 }
21441
21442 if (!have_data)
21443 {
21444 /* Add an empty descriptor if there is no user-specified data. */
21445 ptr = frag_more (4);
21446 md_number_to_chars (ptr, 0, 4);
21447 }
21448
21449 return 0;
21450 }
21451
21452
21453 /* Initialize the DWARF-2 unwind information for this procedure. */
21454
21455 void
21456 tc_arm_frame_initial_instructions (void)
21457 {
21458 cfi_add_CFA_def_cfa (REG_SP, 0);
21459 }
21460 #endif /* OBJ_ELF */
21461
21462 /* Convert REGNAME to a DWARF-2 register number. */
21463
21464 int
21465 tc_arm_regname_to_dw2regnum (char *regname)
21466 {
21467 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21468 if (reg != FAIL)
21469 return reg;
21470
21471 /* PR 16694: Allow VFP registers as well. */
21472 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21473 if (reg != FAIL)
21474 return 64 + reg;
21475
21476 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21477 if (reg != FAIL)
21478 return reg + 256;
21479
21480 return -1;
21481 }
21482
21483 #ifdef TE_PE
21484 void
21485 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21486 {
21487 expressionS exp;
21488
21489 exp.X_op = O_secrel;
21490 exp.X_add_symbol = symbol;
21491 exp.X_add_number = 0;
21492 emit_expr (&exp, size);
21493 }
21494 #endif
21495
21496 /* MD interface: Symbol and relocation handling. */
21497
21498 /* Return the address within the segment that a PC-relative fixup is
21499 relative to. For ARM, PC-relative fixups applied to instructions
21500 are generally relative to the location of the fixup plus 8 bytes.
21501 Thumb branches are offset by 4, and Thumb loads relative to PC
21502 require special handling. */
21503
21504 long
21505 md_pcrel_from_section (fixS * fixP, segT seg)
21506 {
21507 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21508
21509 /* If this is pc-relative and we are going to emit a relocation
21510 then we just want to put out any pipeline compensation that the linker
21511 will need. Otherwise we want to use the calculated base.
21512 For WinCE we skip the bias for externals as well, since this
21513 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21514 if (fixP->fx_pcrel
21515 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21516 || (arm_force_relocation (fixP)
21517 #ifdef TE_WINCE
21518 && !S_IS_EXTERNAL (fixP->fx_addsy)
21519 #endif
21520 )))
21521 base = 0;
21522
21523
21524 switch (fixP->fx_r_type)
21525 {
21526 /* PC relative addressing on the Thumb is slightly odd as the
21527 bottom two bits of the PC are forced to zero for the
21528 calculation. This happens *after* application of the
21529 pipeline offset. However, Thumb adrl already adjusts for
21530 this, so we need not do it again. */
21531 case BFD_RELOC_ARM_THUMB_ADD:
21532 return base & ~3;
21533
21534 case BFD_RELOC_ARM_THUMB_OFFSET:
21535 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21536 case BFD_RELOC_ARM_T32_ADD_PC12:
21537 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21538 return (base + 4) & ~3;
21539
21540 /* Thumb branches are simply offset by +4. */
21541 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21542 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21543 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21544 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21545 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21546 return base + 4;
21547
21548 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21549 if (fixP->fx_addsy
21550 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21551 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21552 && ARM_IS_FUNC (fixP->fx_addsy)
21553 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21554 base = fixP->fx_where + fixP->fx_frag->fr_address;
21555 return base + 4;
21556
21557 /* BLX is like branches above, but forces the low two bits of PC to
21558 zero. */
21559 case BFD_RELOC_THUMB_PCREL_BLX:
21560 if (fixP->fx_addsy
21561 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21562 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21563 && THUMB_IS_FUNC (fixP->fx_addsy)
21564 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21565 base = fixP->fx_where + fixP->fx_frag->fr_address;
21566 return (base + 4) & ~3;
21567
21568 /* ARM mode branches are offset by +8. However, the Windows CE
21569 loader expects the relocation not to take this into account. */
21570 case BFD_RELOC_ARM_PCREL_BLX:
21571 if (fixP->fx_addsy
21572 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21573 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21574 && ARM_IS_FUNC (fixP->fx_addsy)
21575 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21576 base = fixP->fx_where + fixP->fx_frag->fr_address;
21577 return base + 8;
21578
21579 case BFD_RELOC_ARM_PCREL_CALL:
21580 if (fixP->fx_addsy
21581 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21582 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21583 && THUMB_IS_FUNC (fixP->fx_addsy)
21584 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21585 base = fixP->fx_where + fixP->fx_frag->fr_address;
21586 return base + 8;
21587
21588 case BFD_RELOC_ARM_PCREL_BRANCH:
21589 case BFD_RELOC_ARM_PCREL_JUMP:
21590 case BFD_RELOC_ARM_PLT32:
21591 #ifdef TE_WINCE
21592 /* When handling fixups immediately, because we have already
21593 discovered the value of a symbol, or the address of the frag involved
21594 we must account for the offset by +8, as the OS loader will never see the reloc.
21595 see fixup_segment() in write.c
21596 The S_IS_EXTERNAL test handles the case of global symbols.
21597 Those need the calculated base, not just the pipe compensation the linker will need. */
21598 if (fixP->fx_pcrel
21599 && fixP->fx_addsy != NULL
21600 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21601 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21602 return base + 8;
21603 return base;
21604 #else
21605 return base + 8;
21606 #endif
21607
21608
21609 /* ARM mode loads relative to PC are also offset by +8. Unlike
21610 branches, the Windows CE loader *does* expect the relocation
21611 to take this into account. */
21612 case BFD_RELOC_ARM_OFFSET_IMM:
21613 case BFD_RELOC_ARM_OFFSET_IMM8:
21614 case BFD_RELOC_ARM_HWLITERAL:
21615 case BFD_RELOC_ARM_LITERAL:
21616 case BFD_RELOC_ARM_CP_OFF_IMM:
21617 return base + 8;
21618
21619
21620 /* Other PC-relative relocations are un-offset. */
21621 default:
21622 return base;
21623 }
21624 }
21625
21626 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21627 Otherwise we have no need to default values of symbols. */
21628
21629 symbolS *
21630 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21631 {
21632 #ifdef OBJ_ELF
21633 if (name[0] == '_' && name[1] == 'G'
21634 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21635 {
21636 if (!GOT_symbol)
21637 {
21638 if (symbol_find (name))
21639 as_bad (_("GOT already in the symbol table"));
21640
21641 GOT_symbol = symbol_new (name, undefined_section,
21642 (valueT) 0, & zero_address_frag);
21643 }
21644
21645 return GOT_symbol;
21646 }
21647 #endif
21648
21649 return NULL;
21650 }
21651
21652 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21653 computed as two separate immediate values, added together. We
21654 already know that this value cannot be computed by just one ARM
21655 instruction. */
21656
21657 static unsigned int
21658 validate_immediate_twopart (unsigned int val,
21659 unsigned int * highpart)
21660 {
21661 unsigned int a;
21662 unsigned int i;
21663
21664 for (i = 0; i < 32; i += 2)
21665 if (((a = rotate_left (val, i)) & 0xff) != 0)
21666 {
21667 if (a & 0xff00)
21668 {
21669 if (a & ~ 0xffff)
21670 continue;
21671 * highpart = (a >> 8) | ((i + 24) << 7);
21672 }
21673 else if (a & 0xff0000)
21674 {
21675 if (a & 0xff000000)
21676 continue;
21677 * highpart = (a >> 16) | ((i + 16) << 7);
21678 }
21679 else
21680 {
21681 gas_assert (a & 0xff000000);
21682 * highpart = (a >> 24) | ((i + 8) << 7);
21683 }
21684
21685 return (a & 0xff) | (i << 7);
21686 }
21687
21688 return FAIL;
21689 }
21690
21691 static int
21692 validate_offset_imm (unsigned int val, int hwse)
21693 {
21694 if ((hwse && val > 255) || val > 4095)
21695 return FAIL;
21696 return val;
21697 }
21698
21699 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21700 negative immediate constant by altering the instruction. A bit of
21701 a hack really.
21702 MOV <-> MVN
21703 AND <-> BIC
21704 ADC <-> SBC
21705 by inverting the second operand, and
21706 ADD <-> SUB
21707 CMP <-> CMN
21708 by negating the second operand. */
21709
21710 static int
21711 negate_data_op (unsigned long * instruction,
21712 unsigned long value)
21713 {
21714 int op, new_inst;
21715 unsigned long negated, inverted;
21716
21717 negated = encode_arm_immediate (-value);
21718 inverted = encode_arm_immediate (~value);
21719
21720 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21721 switch (op)
21722 {
21723 /* First negates. */
21724 case OPCODE_SUB: /* ADD <-> SUB */
21725 new_inst = OPCODE_ADD;
21726 value = negated;
21727 break;
21728
21729 case OPCODE_ADD:
21730 new_inst = OPCODE_SUB;
21731 value = negated;
21732 break;
21733
21734 case OPCODE_CMP: /* CMP <-> CMN */
21735 new_inst = OPCODE_CMN;
21736 value = negated;
21737 break;
21738
21739 case OPCODE_CMN:
21740 new_inst = OPCODE_CMP;
21741 value = negated;
21742 break;
21743
21744 /* Now Inverted ops. */
21745 case OPCODE_MOV: /* MOV <-> MVN */
21746 new_inst = OPCODE_MVN;
21747 value = inverted;
21748 break;
21749
21750 case OPCODE_MVN:
21751 new_inst = OPCODE_MOV;
21752 value = inverted;
21753 break;
21754
21755 case OPCODE_AND: /* AND <-> BIC */
21756 new_inst = OPCODE_BIC;
21757 value = inverted;
21758 break;
21759
21760 case OPCODE_BIC:
21761 new_inst = OPCODE_AND;
21762 value = inverted;
21763 break;
21764
21765 case OPCODE_ADC: /* ADC <-> SBC */
21766 new_inst = OPCODE_SBC;
21767 value = inverted;
21768 break;
21769
21770 case OPCODE_SBC:
21771 new_inst = OPCODE_ADC;
21772 value = inverted;
21773 break;
21774
21775 /* We cannot do anything. */
21776 default:
21777 return FAIL;
21778 }
21779
21780 if (value == (unsigned) FAIL)
21781 return FAIL;
21782
21783 *instruction &= OPCODE_MASK;
21784 *instruction |= new_inst << DATA_OP_SHIFT;
21785 return value;
21786 }
21787
21788 /* Like negate_data_op, but for Thumb-2. */
21789
21790 static unsigned int
21791 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21792 {
21793 int op, new_inst;
21794 int rd;
21795 unsigned int negated, inverted;
21796
21797 negated = encode_thumb32_immediate (-value);
21798 inverted = encode_thumb32_immediate (~value);
21799
21800 rd = (*instruction >> 8) & 0xf;
21801 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21802 switch (op)
21803 {
21804 /* ADD <-> SUB. Includes CMP <-> CMN. */
21805 case T2_OPCODE_SUB:
21806 new_inst = T2_OPCODE_ADD;
21807 value = negated;
21808 break;
21809
21810 case T2_OPCODE_ADD:
21811 new_inst = T2_OPCODE_SUB;
21812 value = negated;
21813 break;
21814
21815 /* ORR <-> ORN. Includes MOV <-> MVN. */
21816 case T2_OPCODE_ORR:
21817 new_inst = T2_OPCODE_ORN;
21818 value = inverted;
21819 break;
21820
21821 case T2_OPCODE_ORN:
21822 new_inst = T2_OPCODE_ORR;
21823 value = inverted;
21824 break;
21825
21826 /* AND <-> BIC. TST has no inverted equivalent. */
21827 case T2_OPCODE_AND:
21828 new_inst = T2_OPCODE_BIC;
21829 if (rd == 15)
21830 value = FAIL;
21831 else
21832 value = inverted;
21833 break;
21834
21835 case T2_OPCODE_BIC:
21836 new_inst = T2_OPCODE_AND;
21837 value = inverted;
21838 break;
21839
21840 /* ADC <-> SBC */
21841 case T2_OPCODE_ADC:
21842 new_inst = T2_OPCODE_SBC;
21843 value = inverted;
21844 break;
21845
21846 case T2_OPCODE_SBC:
21847 new_inst = T2_OPCODE_ADC;
21848 value = inverted;
21849 break;
21850
21851 /* We cannot do anything. */
21852 default:
21853 return FAIL;
21854 }
21855
21856 if (value == (unsigned int)FAIL)
21857 return FAIL;
21858
21859 *instruction &= T2_OPCODE_MASK;
21860 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21861 return value;
21862 }
21863
21864 /* Read a 32-bit thumb instruction from buf. */
21865 static unsigned long
21866 get_thumb32_insn (char * buf)
21867 {
21868 unsigned long insn;
21869 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21870 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21871
21872 return insn;
21873 }
21874
21875
21876 /* We usually want to set the low bit on the address of thumb function
21877 symbols. In particular .word foo - . should have the low bit set.
21878 Generic code tries to fold the difference of two symbols to
21879 a constant. Prevent this and force a relocation when the first symbols
21880 is a thumb function. */
21881
21882 bfd_boolean
21883 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21884 {
21885 if (op == O_subtract
21886 && l->X_op == O_symbol
21887 && r->X_op == O_symbol
21888 && THUMB_IS_FUNC (l->X_add_symbol))
21889 {
21890 l->X_op = O_subtract;
21891 l->X_op_symbol = r->X_add_symbol;
21892 l->X_add_number -= r->X_add_number;
21893 return TRUE;
21894 }
21895
21896 /* Process as normal. */
21897 return FALSE;
21898 }
21899
21900 /* Encode Thumb2 unconditional branches and calls. The encoding
21901 for the 2 are identical for the immediate values. */
21902
21903 static void
21904 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21905 {
21906 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21907 offsetT newval;
21908 offsetT newval2;
21909 addressT S, I1, I2, lo, hi;
21910
21911 S = (value >> 24) & 0x01;
21912 I1 = (value >> 23) & 0x01;
21913 I2 = (value >> 22) & 0x01;
21914 hi = (value >> 12) & 0x3ff;
21915 lo = (value >> 1) & 0x7ff;
21916 newval = md_chars_to_number (buf, THUMB_SIZE);
21917 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21918 newval |= (S << 10) | hi;
21919 newval2 &= ~T2I1I2MASK;
21920 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21921 md_number_to_chars (buf, newval, THUMB_SIZE);
21922 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21923 }
21924
21925 void
21926 md_apply_fix (fixS * fixP,
21927 valueT * valP,
21928 segT seg)
21929 {
21930 offsetT value = * valP;
21931 offsetT newval;
21932 unsigned int newimm;
21933 unsigned long temp;
21934 int sign;
21935 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21936
21937 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21938
21939 /* Note whether this will delete the relocation. */
21940
21941 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21942 fixP->fx_done = 1;
21943
21944 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21945 consistency with the behaviour on 32-bit hosts. Remember value
21946 for emit_reloc. */
21947 value &= 0xffffffff;
21948 value ^= 0x80000000;
21949 value -= 0x80000000;
21950
21951 *valP = value;
21952 fixP->fx_addnumber = value;
21953
21954 /* Same treatment for fixP->fx_offset. */
21955 fixP->fx_offset &= 0xffffffff;
21956 fixP->fx_offset ^= 0x80000000;
21957 fixP->fx_offset -= 0x80000000;
21958
21959 switch (fixP->fx_r_type)
21960 {
21961 case BFD_RELOC_NONE:
21962 /* This will need to go in the object file. */
21963 fixP->fx_done = 0;
21964 break;
21965
21966 case BFD_RELOC_ARM_IMMEDIATE:
21967 /* We claim that this fixup has been processed here,
21968 even if in fact we generate an error because we do
21969 not have a reloc for it, so tc_gen_reloc will reject it. */
21970 fixP->fx_done = 1;
21971
21972 if (fixP->fx_addsy)
21973 {
21974 const char *msg = 0;
21975
21976 if (! S_IS_DEFINED (fixP->fx_addsy))
21977 msg = _("undefined symbol %s used as an immediate value");
21978 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21979 msg = _("symbol %s is in a different section");
21980 else if (S_IS_WEAK (fixP->fx_addsy))
21981 msg = _("symbol %s is weak and may be overridden later");
21982
21983 if (msg)
21984 {
21985 as_bad_where (fixP->fx_file, fixP->fx_line,
21986 msg, S_GET_NAME (fixP->fx_addsy));
21987 break;
21988 }
21989 }
21990
21991 temp = md_chars_to_number (buf, INSN_SIZE);
21992
21993 /* If the offset is negative, we should use encoding A2 for ADR. */
21994 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21995 newimm = negate_data_op (&temp, value);
21996 else
21997 {
21998 newimm = encode_arm_immediate (value);
21999
22000 /* If the instruction will fail, see if we can fix things up by
22001 changing the opcode. */
22002 if (newimm == (unsigned int) FAIL)
22003 newimm = negate_data_op (&temp, value);
22004 }
22005
22006 if (newimm == (unsigned int) FAIL)
22007 {
22008 as_bad_where (fixP->fx_file, fixP->fx_line,
22009 _("invalid constant (%lx) after fixup"),
22010 (unsigned long) value);
22011 break;
22012 }
22013
22014 newimm |= (temp & 0xfffff000);
22015 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22016 break;
22017
22018 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22019 {
22020 unsigned int highpart = 0;
22021 unsigned int newinsn = 0xe1a00000; /* nop. */
22022
22023 if (fixP->fx_addsy)
22024 {
22025 const char *msg = 0;
22026
22027 if (! S_IS_DEFINED (fixP->fx_addsy))
22028 msg = _("undefined symbol %s used as an immediate value");
22029 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22030 msg = _("symbol %s is in a different section");
22031 else if (S_IS_WEAK (fixP->fx_addsy))
22032 msg = _("symbol %s is weak and may be overridden later");
22033
22034 if (msg)
22035 {
22036 as_bad_where (fixP->fx_file, fixP->fx_line,
22037 msg, S_GET_NAME (fixP->fx_addsy));
22038 break;
22039 }
22040 }
22041
22042 newimm = encode_arm_immediate (value);
22043 temp = md_chars_to_number (buf, INSN_SIZE);
22044
22045 /* If the instruction will fail, see if we can fix things up by
22046 changing the opcode. */
22047 if (newimm == (unsigned int) FAIL
22048 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22049 {
22050 /* No ? OK - try using two ADD instructions to generate
22051 the value. */
22052 newimm = validate_immediate_twopart (value, & highpart);
22053
22054 /* Yes - then make sure that the second instruction is
22055 also an add. */
22056 if (newimm != (unsigned int) FAIL)
22057 newinsn = temp;
22058 /* Still No ? Try using a negated value. */
22059 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22060 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22061 /* Otherwise - give up. */
22062 else
22063 {
22064 as_bad_where (fixP->fx_file, fixP->fx_line,
22065 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22066 (long) value);
22067 break;
22068 }
22069
22070 /* Replace the first operand in the 2nd instruction (which
22071 is the PC) with the destination register. We have
22072 already added in the PC in the first instruction and we
22073 do not want to do it again. */
22074 newinsn &= ~ 0xf0000;
22075 newinsn |= ((newinsn & 0x0f000) << 4);
22076 }
22077
22078 newimm |= (temp & 0xfffff000);
22079 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22080
22081 highpart |= (newinsn & 0xfffff000);
22082 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22083 }
22084 break;
22085
22086 case BFD_RELOC_ARM_OFFSET_IMM:
22087 if (!fixP->fx_done && seg->use_rela_p)
22088 value = 0;
22089
22090 case BFD_RELOC_ARM_LITERAL:
22091 sign = value > 0;
22092
22093 if (value < 0)
22094 value = - value;
22095
22096 if (validate_offset_imm (value, 0) == FAIL)
22097 {
22098 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22099 as_bad_where (fixP->fx_file, fixP->fx_line,
22100 _("invalid literal constant: pool needs to be closer"));
22101 else
22102 as_bad_where (fixP->fx_file, fixP->fx_line,
22103 _("bad immediate value for offset (%ld)"),
22104 (long) value);
22105 break;
22106 }
22107
22108 newval = md_chars_to_number (buf, INSN_SIZE);
22109 if (value == 0)
22110 newval &= 0xfffff000;
22111 else
22112 {
22113 newval &= 0xff7ff000;
22114 newval |= value | (sign ? INDEX_UP : 0);
22115 }
22116 md_number_to_chars (buf, newval, INSN_SIZE);
22117 break;
22118
22119 case BFD_RELOC_ARM_OFFSET_IMM8:
22120 case BFD_RELOC_ARM_HWLITERAL:
22121 sign = value > 0;
22122
22123 if (value < 0)
22124 value = - value;
22125
22126 if (validate_offset_imm (value, 1) == FAIL)
22127 {
22128 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22129 as_bad_where (fixP->fx_file, fixP->fx_line,
22130 _("invalid literal constant: pool needs to be closer"));
22131 else
22132 as_bad_where (fixP->fx_file, fixP->fx_line,
22133 _("bad immediate value for 8-bit offset (%ld)"),
22134 (long) value);
22135 break;
22136 }
22137
22138 newval = md_chars_to_number (buf, INSN_SIZE);
22139 if (value == 0)
22140 newval &= 0xfffff0f0;
22141 else
22142 {
22143 newval &= 0xff7ff0f0;
22144 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22145 }
22146 md_number_to_chars (buf, newval, INSN_SIZE);
22147 break;
22148
22149 case BFD_RELOC_ARM_T32_OFFSET_U8:
22150 if (value < 0 || value > 1020 || value % 4 != 0)
22151 as_bad_where (fixP->fx_file, fixP->fx_line,
22152 _("bad immediate value for offset (%ld)"), (long) value);
22153 value /= 4;
22154
22155 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22156 newval |= value;
22157 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22158 break;
22159
22160 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22161 /* This is a complicated relocation used for all varieties of Thumb32
22162 load/store instruction with immediate offset:
22163
22164 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22165 *4, optional writeback(W)
22166 (doubleword load/store)
22167
22168 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22169 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22170 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22171 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22172 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22173
22174 Uppercase letters indicate bits that are already encoded at
22175 this point. Lowercase letters are our problem. For the
22176 second block of instructions, the secondary opcode nybble
22177 (bits 8..11) is present, and bit 23 is zero, even if this is
22178 a PC-relative operation. */
22179 newval = md_chars_to_number (buf, THUMB_SIZE);
22180 newval <<= 16;
22181 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22182
22183 if ((newval & 0xf0000000) == 0xe0000000)
22184 {
22185 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22186 if (value >= 0)
22187 newval |= (1 << 23);
22188 else
22189 value = -value;
22190 if (value % 4 != 0)
22191 {
22192 as_bad_where (fixP->fx_file, fixP->fx_line,
22193 _("offset not a multiple of 4"));
22194 break;
22195 }
22196 value /= 4;
22197 if (value > 0xff)
22198 {
22199 as_bad_where (fixP->fx_file, fixP->fx_line,
22200 _("offset out of range"));
22201 break;
22202 }
22203 newval &= ~0xff;
22204 }
22205 else if ((newval & 0x000f0000) == 0x000f0000)
22206 {
22207 /* PC-relative, 12-bit offset. */
22208 if (value >= 0)
22209 newval |= (1 << 23);
22210 else
22211 value = -value;
22212 if (value > 0xfff)
22213 {
22214 as_bad_where (fixP->fx_file, fixP->fx_line,
22215 _("offset out of range"));
22216 break;
22217 }
22218 newval &= ~0xfff;
22219 }
22220 else if ((newval & 0x00000100) == 0x00000100)
22221 {
22222 /* Writeback: 8-bit, +/- offset. */
22223 if (value >= 0)
22224 newval |= (1 << 9);
22225 else
22226 value = -value;
22227 if (value > 0xff)
22228 {
22229 as_bad_where (fixP->fx_file, fixP->fx_line,
22230 _("offset out of range"));
22231 break;
22232 }
22233 newval &= ~0xff;
22234 }
22235 else if ((newval & 0x00000f00) == 0x00000e00)
22236 {
22237 /* T-instruction: positive 8-bit offset. */
22238 if (value < 0 || value > 0xff)
22239 {
22240 as_bad_where (fixP->fx_file, fixP->fx_line,
22241 _("offset out of range"));
22242 break;
22243 }
22244 newval &= ~0xff;
22245 newval |= value;
22246 }
22247 else
22248 {
22249 /* Positive 12-bit or negative 8-bit offset. */
22250 int limit;
22251 if (value >= 0)
22252 {
22253 newval |= (1 << 23);
22254 limit = 0xfff;
22255 }
22256 else
22257 {
22258 value = -value;
22259 limit = 0xff;
22260 }
22261 if (value > limit)
22262 {
22263 as_bad_where (fixP->fx_file, fixP->fx_line,
22264 _("offset out of range"));
22265 break;
22266 }
22267 newval &= ~limit;
22268 }
22269
22270 newval |= value;
22271 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22272 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22273 break;
22274
22275 case BFD_RELOC_ARM_SHIFT_IMM:
22276 newval = md_chars_to_number (buf, INSN_SIZE);
22277 if (((unsigned long) value) > 32
22278 || (value == 32
22279 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22280 {
22281 as_bad_where (fixP->fx_file, fixP->fx_line,
22282 _("shift expression is too large"));
22283 break;
22284 }
22285
22286 if (value == 0)
22287 /* Shifts of zero must be done as lsl. */
22288 newval &= ~0x60;
22289 else if (value == 32)
22290 value = 0;
22291 newval &= 0xfffff07f;
22292 newval |= (value & 0x1f) << 7;
22293 md_number_to_chars (buf, newval, INSN_SIZE);
22294 break;
22295
22296 case BFD_RELOC_ARM_T32_IMMEDIATE:
22297 case BFD_RELOC_ARM_T32_ADD_IMM:
22298 case BFD_RELOC_ARM_T32_IMM12:
22299 case BFD_RELOC_ARM_T32_ADD_PC12:
22300 /* We claim that this fixup has been processed here,
22301 even if in fact we generate an error because we do
22302 not have a reloc for it, so tc_gen_reloc will reject it. */
22303 fixP->fx_done = 1;
22304
22305 if (fixP->fx_addsy
22306 && ! S_IS_DEFINED (fixP->fx_addsy))
22307 {
22308 as_bad_where (fixP->fx_file, fixP->fx_line,
22309 _("undefined symbol %s used as an immediate value"),
22310 S_GET_NAME (fixP->fx_addsy));
22311 break;
22312 }
22313
22314 newval = md_chars_to_number (buf, THUMB_SIZE);
22315 newval <<= 16;
22316 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22317
22318 newimm = FAIL;
22319 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22320 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22321 {
22322 newimm = encode_thumb32_immediate (value);
22323 if (newimm == (unsigned int) FAIL)
22324 newimm = thumb32_negate_data_op (&newval, value);
22325 }
22326 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22327 && newimm == (unsigned int) FAIL)
22328 {
22329 /* Turn add/sum into addw/subw. */
22330 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22331 newval = (newval & 0xfeffffff) | 0x02000000;
22332 /* No flat 12-bit imm encoding for addsw/subsw. */
22333 if ((newval & 0x00100000) == 0)
22334 {
22335 /* 12 bit immediate for addw/subw. */
22336 if (value < 0)
22337 {
22338 value = -value;
22339 newval ^= 0x00a00000;
22340 }
22341 if (value > 0xfff)
22342 newimm = (unsigned int) FAIL;
22343 else
22344 newimm = value;
22345 }
22346 }
22347
22348 if (newimm == (unsigned int)FAIL)
22349 {
22350 as_bad_where (fixP->fx_file, fixP->fx_line,
22351 _("invalid constant (%lx) after fixup"),
22352 (unsigned long) value);
22353 break;
22354 }
22355
22356 newval |= (newimm & 0x800) << 15;
22357 newval |= (newimm & 0x700) << 4;
22358 newval |= (newimm & 0x0ff);
22359
22360 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22361 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22362 break;
22363
22364 case BFD_RELOC_ARM_SMC:
22365 if (((unsigned long) value) > 0xffff)
22366 as_bad_where (fixP->fx_file, fixP->fx_line,
22367 _("invalid smc expression"));
22368 newval = md_chars_to_number (buf, INSN_SIZE);
22369 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22370 md_number_to_chars (buf, newval, INSN_SIZE);
22371 break;
22372
22373 case BFD_RELOC_ARM_HVC:
22374 if (((unsigned long) value) > 0xffff)
22375 as_bad_where (fixP->fx_file, fixP->fx_line,
22376 _("invalid hvc expression"));
22377 newval = md_chars_to_number (buf, INSN_SIZE);
22378 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22379 md_number_to_chars (buf, newval, INSN_SIZE);
22380 break;
22381
22382 case BFD_RELOC_ARM_SWI:
22383 if (fixP->tc_fix_data != 0)
22384 {
22385 if (((unsigned long) value) > 0xff)
22386 as_bad_where (fixP->fx_file, fixP->fx_line,
22387 _("invalid swi expression"));
22388 newval = md_chars_to_number (buf, THUMB_SIZE);
22389 newval |= value;
22390 md_number_to_chars (buf, newval, THUMB_SIZE);
22391 }
22392 else
22393 {
22394 if (((unsigned long) value) > 0x00ffffff)
22395 as_bad_where (fixP->fx_file, fixP->fx_line,
22396 _("invalid swi expression"));
22397 newval = md_chars_to_number (buf, INSN_SIZE);
22398 newval |= value;
22399 md_number_to_chars (buf, newval, INSN_SIZE);
22400 }
22401 break;
22402
22403 case BFD_RELOC_ARM_MULTI:
22404 if (((unsigned long) value) > 0xffff)
22405 as_bad_where (fixP->fx_file, fixP->fx_line,
22406 _("invalid expression in load/store multiple"));
22407 newval = value | md_chars_to_number (buf, INSN_SIZE);
22408 md_number_to_chars (buf, newval, INSN_SIZE);
22409 break;
22410
22411 #ifdef OBJ_ELF
22412 case BFD_RELOC_ARM_PCREL_CALL:
22413
22414 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22415 && fixP->fx_addsy
22416 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22417 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22418 && THUMB_IS_FUNC (fixP->fx_addsy))
22419 /* Flip the bl to blx. This is a simple flip
22420 bit here because we generate PCREL_CALL for
22421 unconditional bls. */
22422 {
22423 newval = md_chars_to_number (buf, INSN_SIZE);
22424 newval = newval | 0x10000000;
22425 md_number_to_chars (buf, newval, INSN_SIZE);
22426 temp = 1;
22427 fixP->fx_done = 1;
22428 }
22429 else
22430 temp = 3;
22431 goto arm_branch_common;
22432
22433 case BFD_RELOC_ARM_PCREL_JUMP:
22434 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22435 && fixP->fx_addsy
22436 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22437 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22438 && THUMB_IS_FUNC (fixP->fx_addsy))
22439 {
22440 /* This would map to a bl<cond>, b<cond>,
22441 b<always> to a Thumb function. We
22442 need to force a relocation for this particular
22443 case. */
22444 newval = md_chars_to_number (buf, INSN_SIZE);
22445 fixP->fx_done = 0;
22446 }
22447
22448 case BFD_RELOC_ARM_PLT32:
22449 #endif
22450 case BFD_RELOC_ARM_PCREL_BRANCH:
22451 temp = 3;
22452 goto arm_branch_common;
22453
22454 case BFD_RELOC_ARM_PCREL_BLX:
22455
22456 temp = 1;
22457 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22458 && fixP->fx_addsy
22459 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22460 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22461 && ARM_IS_FUNC (fixP->fx_addsy))
22462 {
22463 /* Flip the blx to a bl and warn. */
22464 const char *name = S_GET_NAME (fixP->fx_addsy);
22465 newval = 0xeb000000;
22466 as_warn_where (fixP->fx_file, fixP->fx_line,
22467 _("blx to '%s' an ARM ISA state function changed to bl"),
22468 name);
22469 md_number_to_chars (buf, newval, INSN_SIZE);
22470 temp = 3;
22471 fixP->fx_done = 1;
22472 }
22473
22474 #ifdef OBJ_ELF
22475 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22476 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22477 #endif
22478
22479 arm_branch_common:
22480 /* We are going to store value (shifted right by two) in the
22481 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22482 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22483 also be be clear. */
22484 if (value & temp)
22485 as_bad_where (fixP->fx_file, fixP->fx_line,
22486 _("misaligned branch destination"));
22487 if ((value & (offsetT)0xfe000000) != (offsetT)0
22488 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22489 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22490
22491 if (fixP->fx_done || !seg->use_rela_p)
22492 {
22493 newval = md_chars_to_number (buf, INSN_SIZE);
22494 newval |= (value >> 2) & 0x00ffffff;
22495 /* Set the H bit on BLX instructions. */
22496 if (temp == 1)
22497 {
22498 if (value & 2)
22499 newval |= 0x01000000;
22500 else
22501 newval &= ~0x01000000;
22502 }
22503 md_number_to_chars (buf, newval, INSN_SIZE);
22504 }
22505 break;
22506
22507 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22508 /* CBZ can only branch forward. */
22509
22510 /* Attempts to use CBZ to branch to the next instruction
22511 (which, strictly speaking, are prohibited) will be turned into
22512 no-ops.
22513
22514 FIXME: It may be better to remove the instruction completely and
22515 perform relaxation. */
22516 if (value == -2)
22517 {
22518 newval = md_chars_to_number (buf, THUMB_SIZE);
22519 newval = 0xbf00; /* NOP encoding T1 */
22520 md_number_to_chars (buf, newval, THUMB_SIZE);
22521 }
22522 else
22523 {
22524 if (value & ~0x7e)
22525 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22526
22527 if (fixP->fx_done || !seg->use_rela_p)
22528 {
22529 newval = md_chars_to_number (buf, THUMB_SIZE);
22530 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22531 md_number_to_chars (buf, newval, THUMB_SIZE);
22532 }
22533 }
22534 break;
22535
22536 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22537 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22538 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22539
22540 if (fixP->fx_done || !seg->use_rela_p)
22541 {
22542 newval = md_chars_to_number (buf, THUMB_SIZE);
22543 newval |= (value & 0x1ff) >> 1;
22544 md_number_to_chars (buf, newval, THUMB_SIZE);
22545 }
22546 break;
22547
22548 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22549 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22550 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22551
22552 if (fixP->fx_done || !seg->use_rela_p)
22553 {
22554 newval = md_chars_to_number (buf, THUMB_SIZE);
22555 newval |= (value & 0xfff) >> 1;
22556 md_number_to_chars (buf, newval, THUMB_SIZE);
22557 }
22558 break;
22559
22560 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22561 if (fixP->fx_addsy
22562 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22563 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22564 && ARM_IS_FUNC (fixP->fx_addsy)
22565 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22566 {
22567 /* Force a relocation for a branch 20 bits wide. */
22568 fixP->fx_done = 0;
22569 }
22570 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22571 as_bad_where (fixP->fx_file, fixP->fx_line,
22572 _("conditional branch out of range"));
22573
22574 if (fixP->fx_done || !seg->use_rela_p)
22575 {
22576 offsetT newval2;
22577 addressT S, J1, J2, lo, hi;
22578
22579 S = (value & 0x00100000) >> 20;
22580 J2 = (value & 0x00080000) >> 19;
22581 J1 = (value & 0x00040000) >> 18;
22582 hi = (value & 0x0003f000) >> 12;
22583 lo = (value & 0x00000ffe) >> 1;
22584
22585 newval = md_chars_to_number (buf, THUMB_SIZE);
22586 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22587 newval |= (S << 10) | hi;
22588 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22589 md_number_to_chars (buf, newval, THUMB_SIZE);
22590 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22591 }
22592 break;
22593
22594 case BFD_RELOC_THUMB_PCREL_BLX:
22595 /* If there is a blx from a thumb state function to
22596 another thumb function flip this to a bl and warn
22597 about it. */
22598
22599 if (fixP->fx_addsy
22600 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22601 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22602 && THUMB_IS_FUNC (fixP->fx_addsy))
22603 {
22604 const char *name = S_GET_NAME (fixP->fx_addsy);
22605 as_warn_where (fixP->fx_file, fixP->fx_line,
22606 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22607 name);
22608 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22609 newval = newval | 0x1000;
22610 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22611 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22612 fixP->fx_done = 1;
22613 }
22614
22615
22616 goto thumb_bl_common;
22617
22618 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22619 /* A bl from Thumb state ISA to an internal ARM state function
22620 is converted to a blx. */
22621 if (fixP->fx_addsy
22622 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22623 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22624 && ARM_IS_FUNC (fixP->fx_addsy)
22625 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22626 {
22627 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22628 newval = newval & ~0x1000;
22629 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22630 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22631 fixP->fx_done = 1;
22632 }
22633
22634 thumb_bl_common:
22635
22636 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22637 /* For a BLX instruction, make sure that the relocation is rounded up
22638 to a word boundary. This follows the semantics of the instruction
22639 which specifies that bit 1 of the target address will come from bit
22640 1 of the base address. */
22641 value = (value + 3) & ~ 3;
22642
22643 #ifdef OBJ_ELF
22644 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22645 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22646 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22647 #endif
22648
22649 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22650 {
22651 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22652 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22653 else if ((value & ~0x1ffffff)
22654 && ((value & ~0x1ffffff) != ~0x1ffffff))
22655 as_bad_where (fixP->fx_file, fixP->fx_line,
22656 _("Thumb2 branch out of range"));
22657 }
22658
22659 if (fixP->fx_done || !seg->use_rela_p)
22660 encode_thumb2_b_bl_offset (buf, value);
22661
22662 break;
22663
22664 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22665 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22666 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22667
22668 if (fixP->fx_done || !seg->use_rela_p)
22669 encode_thumb2_b_bl_offset (buf, value);
22670
22671 break;
22672
22673 case BFD_RELOC_8:
22674 if (fixP->fx_done || !seg->use_rela_p)
22675 *buf = value;
22676 break;
22677
22678 case BFD_RELOC_16:
22679 if (fixP->fx_done || !seg->use_rela_p)
22680 md_number_to_chars (buf, value, 2);
22681 break;
22682
22683 #ifdef OBJ_ELF
22684 case BFD_RELOC_ARM_TLS_CALL:
22685 case BFD_RELOC_ARM_THM_TLS_CALL:
22686 case BFD_RELOC_ARM_TLS_DESCSEQ:
22687 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22688 case BFD_RELOC_ARM_TLS_GOTDESC:
22689 case BFD_RELOC_ARM_TLS_GD32:
22690 case BFD_RELOC_ARM_TLS_LE32:
22691 case BFD_RELOC_ARM_TLS_IE32:
22692 case BFD_RELOC_ARM_TLS_LDM32:
22693 case BFD_RELOC_ARM_TLS_LDO32:
22694 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22695 break;
22696
22697 case BFD_RELOC_ARM_GOT32:
22698 case BFD_RELOC_ARM_GOTOFF:
22699 break;
22700
22701 case BFD_RELOC_ARM_GOT_PREL:
22702 if (fixP->fx_done || !seg->use_rela_p)
22703 md_number_to_chars (buf, value, 4);
22704 break;
22705
22706 case BFD_RELOC_ARM_TARGET2:
22707 /* TARGET2 is not partial-inplace, so we need to write the
22708 addend here for REL targets, because it won't be written out
22709 during reloc processing later. */
22710 if (fixP->fx_done || !seg->use_rela_p)
22711 md_number_to_chars (buf, fixP->fx_offset, 4);
22712 break;
22713 #endif
22714
22715 case BFD_RELOC_RVA:
22716 case BFD_RELOC_32:
22717 case BFD_RELOC_ARM_TARGET1:
22718 case BFD_RELOC_ARM_ROSEGREL32:
22719 case BFD_RELOC_ARM_SBREL32:
22720 case BFD_RELOC_32_PCREL:
22721 #ifdef TE_PE
22722 case BFD_RELOC_32_SECREL:
22723 #endif
22724 if (fixP->fx_done || !seg->use_rela_p)
22725 #ifdef TE_WINCE
22726 /* For WinCE we only do this for pcrel fixups. */
22727 if (fixP->fx_done || fixP->fx_pcrel)
22728 #endif
22729 md_number_to_chars (buf, value, 4);
22730 break;
22731
22732 #ifdef OBJ_ELF
22733 case BFD_RELOC_ARM_PREL31:
22734 if (fixP->fx_done || !seg->use_rela_p)
22735 {
22736 newval = md_chars_to_number (buf, 4) & 0x80000000;
22737 if ((value ^ (value >> 1)) & 0x40000000)
22738 {
22739 as_bad_where (fixP->fx_file, fixP->fx_line,
22740 _("rel31 relocation overflow"));
22741 }
22742 newval |= value & 0x7fffffff;
22743 md_number_to_chars (buf, newval, 4);
22744 }
22745 break;
22746 #endif
22747
22748 case BFD_RELOC_ARM_CP_OFF_IMM:
22749 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22750 if (value < -1023 || value > 1023 || (value & 3))
22751 as_bad_where (fixP->fx_file, fixP->fx_line,
22752 _("co-processor offset out of range"));
22753 cp_off_common:
22754 sign = value > 0;
22755 if (value < 0)
22756 value = -value;
22757 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22758 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22759 newval = md_chars_to_number (buf, INSN_SIZE);
22760 else
22761 newval = get_thumb32_insn (buf);
22762 if (value == 0)
22763 newval &= 0xffffff00;
22764 else
22765 {
22766 newval &= 0xff7fff00;
22767 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22768 }
22769 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22770 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22771 md_number_to_chars (buf, newval, INSN_SIZE);
22772 else
22773 put_thumb32_insn (buf, newval);
22774 break;
22775
22776 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22777 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22778 if (value < -255 || value > 255)
22779 as_bad_where (fixP->fx_file, fixP->fx_line,
22780 _("co-processor offset out of range"));
22781 value *= 4;
22782 goto cp_off_common;
22783
22784 case BFD_RELOC_ARM_THUMB_OFFSET:
22785 newval = md_chars_to_number (buf, THUMB_SIZE);
22786 /* Exactly what ranges, and where the offset is inserted depends
22787 on the type of instruction, we can establish this from the
22788 top 4 bits. */
22789 switch (newval >> 12)
22790 {
22791 case 4: /* PC load. */
22792 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22793 forced to zero for these loads; md_pcrel_from has already
22794 compensated for this. */
22795 if (value & 3)
22796 as_bad_where (fixP->fx_file, fixP->fx_line,
22797 _("invalid offset, target not word aligned (0x%08lX)"),
22798 (((unsigned long) fixP->fx_frag->fr_address
22799 + (unsigned long) fixP->fx_where) & ~3)
22800 + (unsigned long) value);
22801
22802 if (value & ~0x3fc)
22803 as_bad_where (fixP->fx_file, fixP->fx_line,
22804 _("invalid offset, value too big (0x%08lX)"),
22805 (long) value);
22806
22807 newval |= value >> 2;
22808 break;
22809
22810 case 9: /* SP load/store. */
22811 if (value & ~0x3fc)
22812 as_bad_where (fixP->fx_file, fixP->fx_line,
22813 _("invalid offset, value too big (0x%08lX)"),
22814 (long) value);
22815 newval |= value >> 2;
22816 break;
22817
22818 case 6: /* Word load/store. */
22819 if (value & ~0x7c)
22820 as_bad_where (fixP->fx_file, fixP->fx_line,
22821 _("invalid offset, value too big (0x%08lX)"),
22822 (long) value);
22823 newval |= value << 4; /* 6 - 2. */
22824 break;
22825
22826 case 7: /* Byte load/store. */
22827 if (value & ~0x1f)
22828 as_bad_where (fixP->fx_file, fixP->fx_line,
22829 _("invalid offset, value too big (0x%08lX)"),
22830 (long) value);
22831 newval |= value << 6;
22832 break;
22833
22834 case 8: /* Halfword load/store. */
22835 if (value & ~0x3e)
22836 as_bad_where (fixP->fx_file, fixP->fx_line,
22837 _("invalid offset, value too big (0x%08lX)"),
22838 (long) value);
22839 newval |= value << 5; /* 6 - 1. */
22840 break;
22841
22842 default:
22843 as_bad_where (fixP->fx_file, fixP->fx_line,
22844 "Unable to process relocation for thumb opcode: %lx",
22845 (unsigned long) newval);
22846 break;
22847 }
22848 md_number_to_chars (buf, newval, THUMB_SIZE);
22849 break;
22850
22851 case BFD_RELOC_ARM_THUMB_ADD:
22852 /* This is a complicated relocation, since we use it for all of
22853 the following immediate relocations:
22854
22855 3bit ADD/SUB
22856 8bit ADD/SUB
22857 9bit ADD/SUB SP word-aligned
22858 10bit ADD PC/SP word-aligned
22859
22860 The type of instruction being processed is encoded in the
22861 instruction field:
22862
22863 0x8000 SUB
22864 0x00F0 Rd
22865 0x000F Rs
22866 */
22867 newval = md_chars_to_number (buf, THUMB_SIZE);
22868 {
22869 int rd = (newval >> 4) & 0xf;
22870 int rs = newval & 0xf;
22871 int subtract = !!(newval & 0x8000);
22872
22873 /* Check for HI regs, only very restricted cases allowed:
22874 Adjusting SP, and using PC or SP to get an address. */
22875 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22876 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22877 as_bad_where (fixP->fx_file, fixP->fx_line,
22878 _("invalid Hi register with immediate"));
22879
22880 /* If value is negative, choose the opposite instruction. */
22881 if (value < 0)
22882 {
22883 value = -value;
22884 subtract = !subtract;
22885 if (value < 0)
22886 as_bad_where (fixP->fx_file, fixP->fx_line,
22887 _("immediate value out of range"));
22888 }
22889
22890 if (rd == REG_SP)
22891 {
22892 if (value & ~0x1fc)
22893 as_bad_where (fixP->fx_file, fixP->fx_line,
22894 _("invalid immediate for stack address calculation"));
22895 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22896 newval |= value >> 2;
22897 }
22898 else if (rs == REG_PC || rs == REG_SP)
22899 {
22900 if (subtract || value & ~0x3fc)
22901 as_bad_where (fixP->fx_file, fixP->fx_line,
22902 _("invalid immediate for address calculation (value = 0x%08lX)"),
22903 (unsigned long) value);
22904 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22905 newval |= rd << 8;
22906 newval |= value >> 2;
22907 }
22908 else if (rs == rd)
22909 {
22910 if (value & ~0xff)
22911 as_bad_where (fixP->fx_file, fixP->fx_line,
22912 _("immediate value out of range"));
22913 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22914 newval |= (rd << 8) | value;
22915 }
22916 else
22917 {
22918 if (value & ~0x7)
22919 as_bad_where (fixP->fx_file, fixP->fx_line,
22920 _("immediate value out of range"));
22921 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22922 newval |= rd | (rs << 3) | (value << 6);
22923 }
22924 }
22925 md_number_to_chars (buf, newval, THUMB_SIZE);
22926 break;
22927
22928 case BFD_RELOC_ARM_THUMB_IMM:
22929 newval = md_chars_to_number (buf, THUMB_SIZE);
22930 if (value < 0 || value > 255)
22931 as_bad_where (fixP->fx_file, fixP->fx_line,
22932 _("invalid immediate: %ld is out of range"),
22933 (long) value);
22934 newval |= value;
22935 md_number_to_chars (buf, newval, THUMB_SIZE);
22936 break;
22937
22938 case BFD_RELOC_ARM_THUMB_SHIFT:
22939 /* 5bit shift value (0..32). LSL cannot take 32. */
22940 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22941 temp = newval & 0xf800;
22942 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22943 as_bad_where (fixP->fx_file, fixP->fx_line,
22944 _("invalid shift value: %ld"), (long) value);
22945 /* Shifts of zero must be encoded as LSL. */
22946 if (value == 0)
22947 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22948 /* Shifts of 32 are encoded as zero. */
22949 else if (value == 32)
22950 value = 0;
22951 newval |= value << 6;
22952 md_number_to_chars (buf, newval, THUMB_SIZE);
22953 break;
22954
22955 case BFD_RELOC_VTABLE_INHERIT:
22956 case BFD_RELOC_VTABLE_ENTRY:
22957 fixP->fx_done = 0;
22958 return;
22959
22960 case BFD_RELOC_ARM_MOVW:
22961 case BFD_RELOC_ARM_MOVT:
22962 case BFD_RELOC_ARM_THUMB_MOVW:
22963 case BFD_RELOC_ARM_THUMB_MOVT:
22964 if (fixP->fx_done || !seg->use_rela_p)
22965 {
22966 /* REL format relocations are limited to a 16-bit addend. */
22967 if (!fixP->fx_done)
22968 {
22969 if (value < -0x8000 || value > 0x7fff)
22970 as_bad_where (fixP->fx_file, fixP->fx_line,
22971 _("offset out of range"));
22972 }
22973 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22974 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22975 {
22976 value >>= 16;
22977 }
22978
22979 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22980 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22981 {
22982 newval = get_thumb32_insn (buf);
22983 newval &= 0xfbf08f00;
22984 newval |= (value & 0xf000) << 4;
22985 newval |= (value & 0x0800) << 15;
22986 newval |= (value & 0x0700) << 4;
22987 newval |= (value & 0x00ff);
22988 put_thumb32_insn (buf, newval);
22989 }
22990 else
22991 {
22992 newval = md_chars_to_number (buf, 4);
22993 newval &= 0xfff0f000;
22994 newval |= value & 0x0fff;
22995 newval |= (value & 0xf000) << 4;
22996 md_number_to_chars (buf, newval, 4);
22997 }
22998 }
22999 return;
23000
23001 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23002 case BFD_RELOC_ARM_ALU_PC_G0:
23003 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23004 case BFD_RELOC_ARM_ALU_PC_G1:
23005 case BFD_RELOC_ARM_ALU_PC_G2:
23006 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23007 case BFD_RELOC_ARM_ALU_SB_G0:
23008 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23009 case BFD_RELOC_ARM_ALU_SB_G1:
23010 case BFD_RELOC_ARM_ALU_SB_G2:
23011 gas_assert (!fixP->fx_done);
23012 if (!seg->use_rela_p)
23013 {
23014 bfd_vma insn;
23015 bfd_vma encoded_addend;
23016 bfd_vma addend_abs = abs (value);
23017
23018 /* Check that the absolute value of the addend can be
23019 expressed as an 8-bit constant plus a rotation. */
23020 encoded_addend = encode_arm_immediate (addend_abs);
23021 if (encoded_addend == (unsigned int) FAIL)
23022 as_bad_where (fixP->fx_file, fixP->fx_line,
23023 _("the offset 0x%08lX is not representable"),
23024 (unsigned long) addend_abs);
23025
23026 /* Extract the instruction. */
23027 insn = md_chars_to_number (buf, INSN_SIZE);
23028
23029 /* If the addend is positive, use an ADD instruction.
23030 Otherwise use a SUB. Take care not to destroy the S bit. */
23031 insn &= 0xff1fffff;
23032 if (value < 0)
23033 insn |= 1 << 22;
23034 else
23035 insn |= 1 << 23;
23036
23037 /* Place the encoded addend into the first 12 bits of the
23038 instruction. */
23039 insn &= 0xfffff000;
23040 insn |= encoded_addend;
23041
23042 /* Update the instruction. */
23043 md_number_to_chars (buf, insn, INSN_SIZE);
23044 }
23045 break;
23046
23047 case BFD_RELOC_ARM_LDR_PC_G0:
23048 case BFD_RELOC_ARM_LDR_PC_G1:
23049 case BFD_RELOC_ARM_LDR_PC_G2:
23050 case BFD_RELOC_ARM_LDR_SB_G0:
23051 case BFD_RELOC_ARM_LDR_SB_G1:
23052 case BFD_RELOC_ARM_LDR_SB_G2:
23053 gas_assert (!fixP->fx_done);
23054 if (!seg->use_rela_p)
23055 {
23056 bfd_vma insn;
23057 bfd_vma addend_abs = abs (value);
23058
23059 /* Check that the absolute value of the addend can be
23060 encoded in 12 bits. */
23061 if (addend_abs >= 0x1000)
23062 as_bad_where (fixP->fx_file, fixP->fx_line,
23063 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23064 (unsigned long) addend_abs);
23065
23066 /* Extract the instruction. */
23067 insn = md_chars_to_number (buf, INSN_SIZE);
23068
23069 /* If the addend is negative, clear bit 23 of the instruction.
23070 Otherwise set it. */
23071 if (value < 0)
23072 insn &= ~(1 << 23);
23073 else
23074 insn |= 1 << 23;
23075
23076 /* Place the absolute value of the addend into the first 12 bits
23077 of the instruction. */
23078 insn &= 0xfffff000;
23079 insn |= addend_abs;
23080
23081 /* Update the instruction. */
23082 md_number_to_chars (buf, insn, INSN_SIZE);
23083 }
23084 break;
23085
23086 case BFD_RELOC_ARM_LDRS_PC_G0:
23087 case BFD_RELOC_ARM_LDRS_PC_G1:
23088 case BFD_RELOC_ARM_LDRS_PC_G2:
23089 case BFD_RELOC_ARM_LDRS_SB_G0:
23090 case BFD_RELOC_ARM_LDRS_SB_G1:
23091 case BFD_RELOC_ARM_LDRS_SB_G2:
23092 gas_assert (!fixP->fx_done);
23093 if (!seg->use_rela_p)
23094 {
23095 bfd_vma insn;
23096 bfd_vma addend_abs = abs (value);
23097
23098 /* Check that the absolute value of the addend can be
23099 encoded in 8 bits. */
23100 if (addend_abs >= 0x100)
23101 as_bad_where (fixP->fx_file, fixP->fx_line,
23102 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23103 (unsigned long) addend_abs);
23104
23105 /* Extract the instruction. */
23106 insn = md_chars_to_number (buf, INSN_SIZE);
23107
23108 /* If the addend is negative, clear bit 23 of the instruction.
23109 Otherwise set it. */
23110 if (value < 0)
23111 insn &= ~(1 << 23);
23112 else
23113 insn |= 1 << 23;
23114
23115 /* Place the first four bits of the absolute value of the addend
23116 into the first 4 bits of the instruction, and the remaining
23117 four into bits 8 .. 11. */
23118 insn &= 0xfffff0f0;
23119 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23120
23121 /* Update the instruction. */
23122 md_number_to_chars (buf, insn, INSN_SIZE);
23123 }
23124 break;
23125
23126 case BFD_RELOC_ARM_LDC_PC_G0:
23127 case BFD_RELOC_ARM_LDC_PC_G1:
23128 case BFD_RELOC_ARM_LDC_PC_G2:
23129 case BFD_RELOC_ARM_LDC_SB_G0:
23130 case BFD_RELOC_ARM_LDC_SB_G1:
23131 case BFD_RELOC_ARM_LDC_SB_G2:
23132 gas_assert (!fixP->fx_done);
23133 if (!seg->use_rela_p)
23134 {
23135 bfd_vma insn;
23136 bfd_vma addend_abs = abs (value);
23137
23138 /* Check that the absolute value of the addend is a multiple of
23139 four and, when divided by four, fits in 8 bits. */
23140 if (addend_abs & 0x3)
23141 as_bad_where (fixP->fx_file, fixP->fx_line,
23142 _("bad offset 0x%08lX (must be word-aligned)"),
23143 (unsigned long) addend_abs);
23144
23145 if ((addend_abs >> 2) > 0xff)
23146 as_bad_where (fixP->fx_file, fixP->fx_line,
23147 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23148 (unsigned long) addend_abs);
23149
23150 /* Extract the instruction. */
23151 insn = md_chars_to_number (buf, INSN_SIZE);
23152
23153 /* If the addend is negative, clear bit 23 of the instruction.
23154 Otherwise set it. */
23155 if (value < 0)
23156 insn &= ~(1 << 23);
23157 else
23158 insn |= 1 << 23;
23159
23160 /* Place the addend (divided by four) into the first eight
23161 bits of the instruction. */
23162 insn &= 0xfffffff0;
23163 insn |= addend_abs >> 2;
23164
23165 /* Update the instruction. */
23166 md_number_to_chars (buf, insn, INSN_SIZE);
23167 }
23168 break;
23169
23170 case BFD_RELOC_ARM_V4BX:
23171 /* This will need to go in the object file. */
23172 fixP->fx_done = 0;
23173 break;
23174
23175 case BFD_RELOC_UNUSED:
23176 default:
23177 as_bad_where (fixP->fx_file, fixP->fx_line,
23178 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23179 }
23180 }
23181
23182 /* Translate internal representation of relocation info to BFD target
23183 format. */
23184
23185 arelent *
23186 tc_gen_reloc (asection *section, fixS *fixp)
23187 {
23188 arelent * reloc;
23189 bfd_reloc_code_real_type code;
23190
23191 reloc = (arelent *) xmalloc (sizeof (arelent));
23192
23193 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23194 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23195 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23196
23197 if (fixp->fx_pcrel)
23198 {
23199 if (section->use_rela_p)
23200 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23201 else
23202 fixp->fx_offset = reloc->address;
23203 }
23204 reloc->addend = fixp->fx_offset;
23205
23206 switch (fixp->fx_r_type)
23207 {
23208 case BFD_RELOC_8:
23209 if (fixp->fx_pcrel)
23210 {
23211 code = BFD_RELOC_8_PCREL;
23212 break;
23213 }
23214
23215 case BFD_RELOC_16:
23216 if (fixp->fx_pcrel)
23217 {
23218 code = BFD_RELOC_16_PCREL;
23219 break;
23220 }
23221
23222 case BFD_RELOC_32:
23223 if (fixp->fx_pcrel)
23224 {
23225 code = BFD_RELOC_32_PCREL;
23226 break;
23227 }
23228
23229 case BFD_RELOC_ARM_MOVW:
23230 if (fixp->fx_pcrel)
23231 {
23232 code = BFD_RELOC_ARM_MOVW_PCREL;
23233 break;
23234 }
23235
23236 case BFD_RELOC_ARM_MOVT:
23237 if (fixp->fx_pcrel)
23238 {
23239 code = BFD_RELOC_ARM_MOVT_PCREL;
23240 break;
23241 }
23242
23243 case BFD_RELOC_ARM_THUMB_MOVW:
23244 if (fixp->fx_pcrel)
23245 {
23246 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23247 break;
23248 }
23249
23250 case BFD_RELOC_ARM_THUMB_MOVT:
23251 if (fixp->fx_pcrel)
23252 {
23253 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23254 break;
23255 }
23256
23257 case BFD_RELOC_NONE:
23258 case BFD_RELOC_ARM_PCREL_BRANCH:
23259 case BFD_RELOC_ARM_PCREL_BLX:
23260 case BFD_RELOC_RVA:
23261 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23262 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23263 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23264 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23265 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23266 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23267 case BFD_RELOC_VTABLE_ENTRY:
23268 case BFD_RELOC_VTABLE_INHERIT:
23269 #ifdef TE_PE
23270 case BFD_RELOC_32_SECREL:
23271 #endif
23272 code = fixp->fx_r_type;
23273 break;
23274
23275 case BFD_RELOC_THUMB_PCREL_BLX:
23276 #ifdef OBJ_ELF
23277 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23278 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23279 else
23280 #endif
23281 code = BFD_RELOC_THUMB_PCREL_BLX;
23282 break;
23283
23284 case BFD_RELOC_ARM_LITERAL:
23285 case BFD_RELOC_ARM_HWLITERAL:
23286 /* If this is called then the a literal has
23287 been referenced across a section boundary. */
23288 as_bad_where (fixp->fx_file, fixp->fx_line,
23289 _("literal referenced across section boundary"));
23290 return NULL;
23291
23292 #ifdef OBJ_ELF
23293 case BFD_RELOC_ARM_TLS_CALL:
23294 case BFD_RELOC_ARM_THM_TLS_CALL:
23295 case BFD_RELOC_ARM_TLS_DESCSEQ:
23296 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23297 case BFD_RELOC_ARM_GOT32:
23298 case BFD_RELOC_ARM_GOTOFF:
23299 case BFD_RELOC_ARM_GOT_PREL:
23300 case BFD_RELOC_ARM_PLT32:
23301 case BFD_RELOC_ARM_TARGET1:
23302 case BFD_RELOC_ARM_ROSEGREL32:
23303 case BFD_RELOC_ARM_SBREL32:
23304 case BFD_RELOC_ARM_PREL31:
23305 case BFD_RELOC_ARM_TARGET2:
23306 case BFD_RELOC_ARM_TLS_LE32:
23307 case BFD_RELOC_ARM_TLS_LDO32:
23308 case BFD_RELOC_ARM_PCREL_CALL:
23309 case BFD_RELOC_ARM_PCREL_JUMP:
23310 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23311 case BFD_RELOC_ARM_ALU_PC_G0:
23312 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23313 case BFD_RELOC_ARM_ALU_PC_G1:
23314 case BFD_RELOC_ARM_ALU_PC_G2:
23315 case BFD_RELOC_ARM_LDR_PC_G0:
23316 case BFD_RELOC_ARM_LDR_PC_G1:
23317 case BFD_RELOC_ARM_LDR_PC_G2:
23318 case BFD_RELOC_ARM_LDRS_PC_G0:
23319 case BFD_RELOC_ARM_LDRS_PC_G1:
23320 case BFD_RELOC_ARM_LDRS_PC_G2:
23321 case BFD_RELOC_ARM_LDC_PC_G0:
23322 case BFD_RELOC_ARM_LDC_PC_G1:
23323 case BFD_RELOC_ARM_LDC_PC_G2:
23324 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23325 case BFD_RELOC_ARM_ALU_SB_G0:
23326 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23327 case BFD_RELOC_ARM_ALU_SB_G1:
23328 case BFD_RELOC_ARM_ALU_SB_G2:
23329 case BFD_RELOC_ARM_LDR_SB_G0:
23330 case BFD_RELOC_ARM_LDR_SB_G1:
23331 case BFD_RELOC_ARM_LDR_SB_G2:
23332 case BFD_RELOC_ARM_LDRS_SB_G0:
23333 case BFD_RELOC_ARM_LDRS_SB_G1:
23334 case BFD_RELOC_ARM_LDRS_SB_G2:
23335 case BFD_RELOC_ARM_LDC_SB_G0:
23336 case BFD_RELOC_ARM_LDC_SB_G1:
23337 case BFD_RELOC_ARM_LDC_SB_G2:
23338 case BFD_RELOC_ARM_V4BX:
23339 code = fixp->fx_r_type;
23340 break;
23341
23342 case BFD_RELOC_ARM_TLS_GOTDESC:
23343 case BFD_RELOC_ARM_TLS_GD32:
23344 case BFD_RELOC_ARM_TLS_IE32:
23345 case BFD_RELOC_ARM_TLS_LDM32:
23346 /* BFD will include the symbol's address in the addend.
23347 But we don't want that, so subtract it out again here. */
23348 if (!S_IS_COMMON (fixp->fx_addsy))
23349 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23350 code = fixp->fx_r_type;
23351 break;
23352 #endif
23353
23354 case BFD_RELOC_ARM_IMMEDIATE:
23355 as_bad_where (fixp->fx_file, fixp->fx_line,
23356 _("internal relocation (type: IMMEDIATE) not fixed up"));
23357 return NULL;
23358
23359 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23360 as_bad_where (fixp->fx_file, fixp->fx_line,
23361 _("ADRL used for a symbol not defined in the same file"));
23362 return NULL;
23363
23364 case BFD_RELOC_ARM_OFFSET_IMM:
23365 if (section->use_rela_p)
23366 {
23367 code = fixp->fx_r_type;
23368 break;
23369 }
23370
23371 if (fixp->fx_addsy != NULL
23372 && !S_IS_DEFINED (fixp->fx_addsy)
23373 && S_IS_LOCAL (fixp->fx_addsy))
23374 {
23375 as_bad_where (fixp->fx_file, fixp->fx_line,
23376 _("undefined local label `%s'"),
23377 S_GET_NAME (fixp->fx_addsy));
23378 return NULL;
23379 }
23380
23381 as_bad_where (fixp->fx_file, fixp->fx_line,
23382 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23383 return NULL;
23384
23385 default:
23386 {
23387 char * type;
23388
23389 switch (fixp->fx_r_type)
23390 {
23391 case BFD_RELOC_NONE: type = "NONE"; break;
23392 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23393 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23394 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23395 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23396 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23397 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23398 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23399 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23400 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23401 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23402 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23403 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23404 default: type = _("<unknown>"); break;
23405 }
23406 as_bad_where (fixp->fx_file, fixp->fx_line,
23407 _("cannot represent %s relocation in this object file format"),
23408 type);
23409 return NULL;
23410 }
23411 }
23412
23413 #ifdef OBJ_ELF
23414 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23415 && GOT_symbol
23416 && fixp->fx_addsy == GOT_symbol)
23417 {
23418 code = BFD_RELOC_ARM_GOTPC;
23419 reloc->addend = fixp->fx_offset = reloc->address;
23420 }
23421 #endif
23422
23423 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23424
23425 if (reloc->howto == NULL)
23426 {
23427 as_bad_where (fixp->fx_file, fixp->fx_line,
23428 _("cannot represent %s relocation in this object file format"),
23429 bfd_get_reloc_code_name (code));
23430 return NULL;
23431 }
23432
23433 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23434 vtable entry to be used in the relocation's section offset. */
23435 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23436 reloc->address = fixp->fx_offset;
23437
23438 return reloc;
23439 }
23440
23441 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23442
23443 void
23444 cons_fix_new_arm (fragS * frag,
23445 int where,
23446 int size,
23447 expressionS * exp,
23448 bfd_reloc_code_real_type reloc)
23449 {
23450 int pcrel = 0;
23451
23452 /* Pick a reloc.
23453 FIXME: @@ Should look at CPU word size. */
23454 switch (size)
23455 {
23456 case 1:
23457 reloc = BFD_RELOC_8;
23458 break;
23459 case 2:
23460 reloc = BFD_RELOC_16;
23461 break;
23462 case 4:
23463 default:
23464 reloc = BFD_RELOC_32;
23465 break;
23466 case 8:
23467 reloc = BFD_RELOC_64;
23468 break;
23469 }
23470
23471 #ifdef TE_PE
23472 if (exp->X_op == O_secrel)
23473 {
23474 exp->X_op = O_symbol;
23475 reloc = BFD_RELOC_32_SECREL;
23476 }
23477 #endif
23478
23479 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23480 }
23481
23482 #if defined (OBJ_COFF)
23483 void
23484 arm_validate_fix (fixS * fixP)
23485 {
23486 /* If the destination of the branch is a defined symbol which does not have
23487 the THUMB_FUNC attribute, then we must be calling a function which has
23488 the (interfacearm) attribute. We look for the Thumb entry point to that
23489 function and change the branch to refer to that function instead. */
23490 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23491 && fixP->fx_addsy != NULL
23492 && S_IS_DEFINED (fixP->fx_addsy)
23493 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23494 {
23495 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23496 }
23497 }
23498 #endif
23499
23500
23501 int
23502 arm_force_relocation (struct fix * fixp)
23503 {
23504 #if defined (OBJ_COFF) && defined (TE_PE)
23505 if (fixp->fx_r_type == BFD_RELOC_RVA)
23506 return 1;
23507 #endif
23508
23509 /* In case we have a call or a branch to a function in ARM ISA mode from
23510 a thumb function or vice-versa force the relocation. These relocations
23511 are cleared off for some cores that might have blx and simple transformations
23512 are possible. */
23513
23514 #ifdef OBJ_ELF
23515 switch (fixp->fx_r_type)
23516 {
23517 case BFD_RELOC_ARM_PCREL_JUMP:
23518 case BFD_RELOC_ARM_PCREL_CALL:
23519 case BFD_RELOC_THUMB_PCREL_BLX:
23520 if (THUMB_IS_FUNC (fixp->fx_addsy))
23521 return 1;
23522 break;
23523
23524 case BFD_RELOC_ARM_PCREL_BLX:
23525 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23526 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23527 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23528 if (ARM_IS_FUNC (fixp->fx_addsy))
23529 return 1;
23530 break;
23531
23532 default:
23533 break;
23534 }
23535 #endif
23536
23537 /* Resolve these relocations even if the symbol is extern or weak.
23538 Technically this is probably wrong due to symbol preemption.
23539 In practice these relocations do not have enough range to be useful
23540 at dynamic link time, and some code (e.g. in the Linux kernel)
23541 expects these references to be resolved. */
23542 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23543 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23544 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23545 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23546 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23547 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23548 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23549 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23550 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23551 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23552 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23553 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23554 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23555 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23556 return 0;
23557
23558 /* Always leave these relocations for the linker. */
23559 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23560 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23561 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23562 return 1;
23563
23564 /* Always generate relocations against function symbols. */
23565 if (fixp->fx_r_type == BFD_RELOC_32
23566 && fixp->fx_addsy
23567 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23568 return 1;
23569
23570 return generic_force_reloc (fixp);
23571 }
23572
23573 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23574 /* Relocations against function names must be left unadjusted,
23575 so that the linker can use this information to generate interworking
23576 stubs. The MIPS version of this function
23577 also prevents relocations that are mips-16 specific, but I do not
23578 know why it does this.
23579
23580 FIXME:
23581 There is one other problem that ought to be addressed here, but
23582 which currently is not: Taking the address of a label (rather
23583 than a function) and then later jumping to that address. Such
23584 addresses also ought to have their bottom bit set (assuming that
23585 they reside in Thumb code), but at the moment they will not. */
23586
23587 bfd_boolean
23588 arm_fix_adjustable (fixS * fixP)
23589 {
23590 if (fixP->fx_addsy == NULL)
23591 return 1;
23592
23593 /* Preserve relocations against symbols with function type. */
23594 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23595 return FALSE;
23596
23597 if (THUMB_IS_FUNC (fixP->fx_addsy)
23598 && fixP->fx_subsy == NULL)
23599 return FALSE;
23600
23601 /* We need the symbol name for the VTABLE entries. */
23602 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23603 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23604 return FALSE;
23605
23606 /* Don't allow symbols to be discarded on GOT related relocs. */
23607 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23608 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23609 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23610 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23611 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23612 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23613 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23614 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23615 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23616 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23617 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23618 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23619 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23620 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23621 return FALSE;
23622
23623 /* Similarly for group relocations. */
23624 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23625 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23626 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23627 return FALSE;
23628
23629 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23630 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23631 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23632 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23633 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23634 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23635 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23636 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23637 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23638 return FALSE;
23639
23640 return TRUE;
23641 }
23642 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23643
23644 #ifdef OBJ_ELF
23645
23646 const char *
23647 elf32_arm_target_format (void)
23648 {
23649 #ifdef TE_SYMBIAN
23650 return (target_big_endian
23651 ? "elf32-bigarm-symbian"
23652 : "elf32-littlearm-symbian");
23653 #elif defined (TE_VXWORKS)
23654 return (target_big_endian
23655 ? "elf32-bigarm-vxworks"
23656 : "elf32-littlearm-vxworks");
23657 #elif defined (TE_NACL)
23658 return (target_big_endian
23659 ? "elf32-bigarm-nacl"
23660 : "elf32-littlearm-nacl");
23661 #else
23662 if (target_big_endian)
23663 return "elf32-bigarm";
23664 else
23665 return "elf32-littlearm";
23666 #endif
23667 }
23668
23669 void
23670 armelf_frob_symbol (symbolS * symp,
23671 int * puntp)
23672 {
23673 elf_frob_symbol (symp, puntp);
23674 }
23675 #endif
23676
23677 /* MD interface: Finalization. */
23678
23679 void
23680 arm_cleanup (void)
23681 {
23682 literal_pool * pool;
23683
23684 /* Ensure that all the IT blocks are properly closed. */
23685 check_it_blocks_finished ();
23686
23687 for (pool = list_of_pools; pool; pool = pool->next)
23688 {
23689 /* Put it at the end of the relevant section. */
23690 subseg_set (pool->section, pool->sub_section);
23691 #ifdef OBJ_ELF
23692 arm_elf_change_section ();
23693 #endif
23694 s_ltorg (0);
23695 }
23696 }
23697
23698 #ifdef OBJ_ELF
23699 /* Remove any excess mapping symbols generated for alignment frags in
23700 SEC. We may have created a mapping symbol before a zero byte
23701 alignment; remove it if there's a mapping symbol after the
23702 alignment. */
23703 static void
23704 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23705 void *dummy ATTRIBUTE_UNUSED)
23706 {
23707 segment_info_type *seginfo = seg_info (sec);
23708 fragS *fragp;
23709
23710 if (seginfo == NULL || seginfo->frchainP == NULL)
23711 return;
23712
23713 for (fragp = seginfo->frchainP->frch_root;
23714 fragp != NULL;
23715 fragp = fragp->fr_next)
23716 {
23717 symbolS *sym = fragp->tc_frag_data.last_map;
23718 fragS *next = fragp->fr_next;
23719
23720 /* Variable-sized frags have been converted to fixed size by
23721 this point. But if this was variable-sized to start with,
23722 there will be a fixed-size frag after it. So don't handle
23723 next == NULL. */
23724 if (sym == NULL || next == NULL)
23725 continue;
23726
23727 if (S_GET_VALUE (sym) < next->fr_address)
23728 /* Not at the end of this frag. */
23729 continue;
23730 know (S_GET_VALUE (sym) == next->fr_address);
23731
23732 do
23733 {
23734 if (next->tc_frag_data.first_map != NULL)
23735 {
23736 /* Next frag starts with a mapping symbol. Discard this
23737 one. */
23738 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23739 break;
23740 }
23741
23742 if (next->fr_next == NULL)
23743 {
23744 /* This mapping symbol is at the end of the section. Discard
23745 it. */
23746 know (next->fr_fix == 0 && next->fr_var == 0);
23747 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23748 break;
23749 }
23750
23751 /* As long as we have empty frags without any mapping symbols,
23752 keep looking. */
23753 /* If the next frag is non-empty and does not start with a
23754 mapping symbol, then this mapping symbol is required. */
23755 if (next->fr_address != next->fr_next->fr_address)
23756 break;
23757
23758 next = next->fr_next;
23759 }
23760 while (next != NULL);
23761 }
23762 }
23763 #endif
23764
23765 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23766 ARM ones. */
23767
23768 void
23769 arm_adjust_symtab (void)
23770 {
23771 #ifdef OBJ_COFF
23772 symbolS * sym;
23773
23774 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23775 {
23776 if (ARM_IS_THUMB (sym))
23777 {
23778 if (THUMB_IS_FUNC (sym))
23779 {
23780 /* Mark the symbol as a Thumb function. */
23781 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23782 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23783 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23784
23785 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23786 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23787 else
23788 as_bad (_("%s: unexpected function type: %d"),
23789 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23790 }
23791 else switch (S_GET_STORAGE_CLASS (sym))
23792 {
23793 case C_EXT:
23794 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23795 break;
23796 case C_STAT:
23797 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23798 break;
23799 case C_LABEL:
23800 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23801 break;
23802 default:
23803 /* Do nothing. */
23804 break;
23805 }
23806 }
23807
23808 if (ARM_IS_INTERWORK (sym))
23809 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23810 }
23811 #endif
23812 #ifdef OBJ_ELF
23813 symbolS * sym;
23814 char bind;
23815
23816 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23817 {
23818 if (ARM_IS_THUMB (sym))
23819 {
23820 elf_symbol_type * elf_sym;
23821
23822 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23823 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23824
23825 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23826 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23827 {
23828 /* If it's a .thumb_func, declare it as so,
23829 otherwise tag label as .code 16. */
23830 if (THUMB_IS_FUNC (sym))
23831 elf_sym->internal_elf_sym.st_target_internal
23832 = ST_BRANCH_TO_THUMB;
23833 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23834 elf_sym->internal_elf_sym.st_info =
23835 ELF_ST_INFO (bind, STT_ARM_16BIT);
23836 }
23837 }
23838 }
23839
23840 /* Remove any overlapping mapping symbols generated by alignment frags. */
23841 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23842 /* Now do generic ELF adjustments. */
23843 elf_adjust_symtab ();
23844 #endif
23845 }
23846
23847 /* MD interface: Initialization. */
23848
23849 static void
23850 set_constant_flonums (void)
23851 {
23852 int i;
23853
23854 for (i = 0; i < NUM_FLOAT_VALS; i++)
23855 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23856 abort ();
23857 }
23858
23859 /* Auto-select Thumb mode if it's the only available instruction set for the
23860 given architecture. */
23861
23862 static void
23863 autoselect_thumb_from_cpu_variant (void)
23864 {
23865 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23866 opcode_select (16);
23867 }
23868
23869 void
23870 md_begin (void)
23871 {
23872 unsigned mach;
23873 unsigned int i;
23874
23875 if ( (arm_ops_hsh = hash_new ()) == NULL
23876 || (arm_cond_hsh = hash_new ()) == NULL
23877 || (arm_shift_hsh = hash_new ()) == NULL
23878 || (arm_psr_hsh = hash_new ()) == NULL
23879 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23880 || (arm_reg_hsh = hash_new ()) == NULL
23881 || (arm_reloc_hsh = hash_new ()) == NULL
23882 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23883 as_fatal (_("virtual memory exhausted"));
23884
23885 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23886 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23887 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23888 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23889 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23890 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23891 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23892 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23893 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23894 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23895 (void *) (v7m_psrs + i));
23896 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23897 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23898 for (i = 0;
23899 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23900 i++)
23901 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23902 (void *) (barrier_opt_names + i));
23903 #ifdef OBJ_ELF
23904 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23905 {
23906 struct reloc_entry * entry = reloc_names + i;
23907
23908 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23909 /* This makes encode_branch() use the EABI versions of this relocation. */
23910 entry->reloc = BFD_RELOC_UNUSED;
23911
23912 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23913 }
23914 #endif
23915
23916 set_constant_flonums ();
23917
23918 /* Set the cpu variant based on the command-line options. We prefer
23919 -mcpu= over -march= if both are set (as for GCC); and we prefer
23920 -mfpu= over any other way of setting the floating point unit.
23921 Use of legacy options with new options are faulted. */
23922 if (legacy_cpu)
23923 {
23924 if (mcpu_cpu_opt || march_cpu_opt)
23925 as_bad (_("use of old and new-style options to set CPU type"));
23926
23927 mcpu_cpu_opt = legacy_cpu;
23928 }
23929 else if (!mcpu_cpu_opt)
23930 mcpu_cpu_opt = march_cpu_opt;
23931
23932 if (legacy_fpu)
23933 {
23934 if (mfpu_opt)
23935 as_bad (_("use of old and new-style options to set FPU type"));
23936
23937 mfpu_opt = legacy_fpu;
23938 }
23939 else if (!mfpu_opt)
23940 {
23941 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23942 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23943 /* Some environments specify a default FPU. If they don't, infer it
23944 from the processor. */
23945 if (mcpu_fpu_opt)
23946 mfpu_opt = mcpu_fpu_opt;
23947 else
23948 mfpu_opt = march_fpu_opt;
23949 #else
23950 mfpu_opt = &fpu_default;
23951 #endif
23952 }
23953
23954 if (!mfpu_opt)
23955 {
23956 if (mcpu_cpu_opt != NULL)
23957 mfpu_opt = &fpu_default;
23958 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23959 mfpu_opt = &fpu_arch_vfp_v2;
23960 else
23961 mfpu_opt = &fpu_arch_fpa;
23962 }
23963
23964 #ifdef CPU_DEFAULT
23965 if (!mcpu_cpu_opt)
23966 {
23967 mcpu_cpu_opt = &cpu_default;
23968 selected_cpu = cpu_default;
23969 }
23970 else if (no_cpu_selected ())
23971 selected_cpu = cpu_default;
23972 #else
23973 if (mcpu_cpu_opt)
23974 selected_cpu = *mcpu_cpu_opt;
23975 else
23976 mcpu_cpu_opt = &arm_arch_any;
23977 #endif
23978
23979 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23980
23981 autoselect_thumb_from_cpu_variant ();
23982
23983 arm_arch_used = thumb_arch_used = arm_arch_none;
23984
23985 #if defined OBJ_COFF || defined OBJ_ELF
23986 {
23987 unsigned int flags = 0;
23988
23989 #if defined OBJ_ELF
23990 flags = meabi_flags;
23991
23992 switch (meabi_flags)
23993 {
23994 case EF_ARM_EABI_UNKNOWN:
23995 #endif
23996 /* Set the flags in the private structure. */
23997 if (uses_apcs_26) flags |= F_APCS26;
23998 if (support_interwork) flags |= F_INTERWORK;
23999 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24000 if (pic_code) flags |= F_PIC;
24001 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24002 flags |= F_SOFT_FLOAT;
24003
24004 switch (mfloat_abi_opt)
24005 {
24006 case ARM_FLOAT_ABI_SOFT:
24007 case ARM_FLOAT_ABI_SOFTFP:
24008 flags |= F_SOFT_FLOAT;
24009 break;
24010
24011 case ARM_FLOAT_ABI_HARD:
24012 if (flags & F_SOFT_FLOAT)
24013 as_bad (_("hard-float conflicts with specified fpu"));
24014 break;
24015 }
24016
24017 /* Using pure-endian doubles (even if soft-float). */
24018 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24019 flags |= F_VFP_FLOAT;
24020
24021 #if defined OBJ_ELF
24022 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24023 flags |= EF_ARM_MAVERICK_FLOAT;
24024 break;
24025
24026 case EF_ARM_EABI_VER4:
24027 case EF_ARM_EABI_VER5:
24028 /* No additional flags to set. */
24029 break;
24030
24031 default:
24032 abort ();
24033 }
24034 #endif
24035 bfd_set_private_flags (stdoutput, flags);
24036
24037 /* We have run out flags in the COFF header to encode the
24038 status of ATPCS support, so instead we create a dummy,
24039 empty, debug section called .arm.atpcs. */
24040 if (atpcs)
24041 {
24042 asection * sec;
24043
24044 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24045
24046 if (sec != NULL)
24047 {
24048 bfd_set_section_flags
24049 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24050 bfd_set_section_size (stdoutput, sec, 0);
24051 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24052 }
24053 }
24054 }
24055 #endif
24056
24057 /* Record the CPU type as well. */
24058 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24059 mach = bfd_mach_arm_iWMMXt2;
24060 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24061 mach = bfd_mach_arm_iWMMXt;
24062 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24063 mach = bfd_mach_arm_XScale;
24064 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24065 mach = bfd_mach_arm_ep9312;
24066 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24067 mach = bfd_mach_arm_5TE;
24068 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24069 {
24070 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24071 mach = bfd_mach_arm_5T;
24072 else
24073 mach = bfd_mach_arm_5;
24074 }
24075 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24076 {
24077 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24078 mach = bfd_mach_arm_4T;
24079 else
24080 mach = bfd_mach_arm_4;
24081 }
24082 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24083 mach = bfd_mach_arm_3M;
24084 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24085 mach = bfd_mach_arm_3;
24086 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24087 mach = bfd_mach_arm_2a;
24088 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24089 mach = bfd_mach_arm_2;
24090 else
24091 mach = bfd_mach_arm_unknown;
24092
24093 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24094 }
24095
24096 /* Command line processing. */
24097
24098 /* md_parse_option
24099 Invocation line includes a switch not recognized by the base assembler.
24100 See if it's a processor-specific option.
24101
24102 This routine is somewhat complicated by the need for backwards
24103 compatibility (since older releases of gcc can't be changed).
24104 The new options try to make the interface as compatible as
24105 possible with GCC.
24106
24107 New options (supported) are:
24108
24109 -mcpu=<cpu name> Assemble for selected processor
24110 -march=<architecture name> Assemble for selected architecture
24111 -mfpu=<fpu architecture> Assemble for selected FPU.
24112 -EB/-mbig-endian Big-endian
24113 -EL/-mlittle-endian Little-endian
24114 -k Generate PIC code
24115 -mthumb Start in Thumb mode
24116 -mthumb-interwork Code supports ARM/Thumb interworking
24117
24118 -m[no-]warn-deprecated Warn about deprecated features
24119
24120 For now we will also provide support for:
24121
24122 -mapcs-32 32-bit Program counter
24123 -mapcs-26 26-bit Program counter
24124 -macps-float Floats passed in FP registers
24125 -mapcs-reentrant Reentrant code
24126 -matpcs
24127 (sometime these will probably be replaced with -mapcs=<list of options>
24128 and -matpcs=<list of options>)
24129
24130 The remaining options are only supported for back-wards compatibility.
24131 Cpu variants, the arm part is optional:
24132 -m[arm]1 Currently not supported.
24133 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24134 -m[arm]3 Arm 3 processor
24135 -m[arm]6[xx], Arm 6 processors
24136 -m[arm]7[xx][t][[d]m] Arm 7 processors
24137 -m[arm]8[10] Arm 8 processors
24138 -m[arm]9[20][tdmi] Arm 9 processors
24139 -mstrongarm[110[0]] StrongARM processors
24140 -mxscale XScale processors
24141 -m[arm]v[2345[t[e]]] Arm architectures
24142 -mall All (except the ARM1)
24143 FP variants:
24144 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24145 -mfpe-old (No float load/store multiples)
24146 -mvfpxd VFP Single precision
24147 -mvfp All VFP
24148 -mno-fpu Disable all floating point instructions
24149
24150 The following CPU names are recognized:
24151 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24152 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24153 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24154 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24155 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24156 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24157 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24158
24159 */
24160
24161 const char * md_shortopts = "m:k";
24162
24163 #ifdef ARM_BI_ENDIAN
24164 #define OPTION_EB (OPTION_MD_BASE + 0)
24165 #define OPTION_EL (OPTION_MD_BASE + 1)
24166 #else
24167 #if TARGET_BYTES_BIG_ENDIAN
24168 #define OPTION_EB (OPTION_MD_BASE + 0)
24169 #else
24170 #define OPTION_EL (OPTION_MD_BASE + 1)
24171 #endif
24172 #endif
24173 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24174
24175 struct option md_longopts[] =
24176 {
24177 #ifdef OPTION_EB
24178 {"EB", no_argument, NULL, OPTION_EB},
24179 #endif
24180 #ifdef OPTION_EL
24181 {"EL", no_argument, NULL, OPTION_EL},
24182 #endif
24183 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24184 {NULL, no_argument, NULL, 0}
24185 };
24186
24187 size_t md_longopts_size = sizeof (md_longopts);
24188
24189 struct arm_option_table
24190 {
24191 char *option; /* Option name to match. */
24192 char *help; /* Help information. */
24193 int *var; /* Variable to change. */
24194 int value; /* What to change it to. */
24195 char *deprecated; /* If non-null, print this message. */
24196 };
24197
24198 struct arm_option_table arm_opts[] =
24199 {
24200 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24201 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24202 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24203 &support_interwork, 1, NULL},
24204 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24205 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24206 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24207 1, NULL},
24208 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24209 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24210 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24211 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24212 NULL},
24213
24214 /* These are recognized by the assembler, but have no affect on code. */
24215 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24216 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24217
24218 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24219 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24220 &warn_on_deprecated, 0, NULL},
24221 {NULL, NULL, NULL, 0, NULL}
24222 };
24223
24224 struct arm_legacy_option_table
24225 {
24226 char *option; /* Option name to match. */
24227 const arm_feature_set **var; /* Variable to change. */
24228 const arm_feature_set value; /* What to change it to. */
24229 char *deprecated; /* If non-null, print this message. */
24230 };
24231
24232 const struct arm_legacy_option_table arm_legacy_opts[] =
24233 {
24234 /* DON'T add any new processors to this list -- we want the whole list
24235 to go away... Add them to the processors table instead. */
24236 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24237 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24238 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24239 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24240 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24241 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24242 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24243 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24244 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24245 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24246 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24247 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24248 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24249 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24250 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24251 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24252 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24253 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24254 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24255 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24256 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24257 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24258 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24259 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24260 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24261 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24262 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24263 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24264 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24265 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24266 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24267 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24268 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24269 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24270 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24271 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24272 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24273 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24274 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24275 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24276 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24277 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24278 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24279 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24280 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24281 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24282 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24283 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24284 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24285 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24286 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24287 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24288 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24289 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24290 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24291 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24292 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24293 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24294 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24295 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24296 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24297 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24298 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24299 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24300 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24301 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24302 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24303 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24304 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24305 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24306 N_("use -mcpu=strongarm110")},
24307 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24308 N_("use -mcpu=strongarm1100")},
24309 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24310 N_("use -mcpu=strongarm1110")},
24311 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24312 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24313 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24314
24315 /* Architecture variants -- don't add any more to this list either. */
24316 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24317 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24318 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24319 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24320 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24321 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24322 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24323 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24324 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24325 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24326 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24327 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24328 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24329 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24330 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24331 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24332 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24333 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24334
24335 /* Floating point variants -- don't add any more to this list either. */
24336 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24337 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24338 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24339 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24340 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24341
24342 {NULL, NULL, ARM_ARCH_NONE, NULL}
24343 };
24344
24345 struct arm_cpu_option_table
24346 {
24347 char *name;
24348 size_t name_len;
24349 const arm_feature_set value;
24350 /* For some CPUs we assume an FPU unless the user explicitly sets
24351 -mfpu=... */
24352 const arm_feature_set default_fpu;
24353 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24354 case. */
24355 const char *canonical_name;
24356 };
24357
24358 /* This list should, at a minimum, contain all the cpu names
24359 recognized by GCC. */
24360 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24361 static const struct arm_cpu_option_table arm_cpus[] =
24362 {
24363 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24364 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24365 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24366 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24367 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24368 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24369 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24370 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24371 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24372 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24373 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24374 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24375 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24376 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24377 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24378 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24379 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24380 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24381 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24382 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24383 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24384 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24385 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24386 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24387 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24388 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24389 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24390 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24391 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24392 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24393 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24394 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24395 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24396 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24397 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24398 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24399 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24400 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24401 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24402 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24403 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24404 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24405 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24406 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24407 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24408 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24409 /* For V5 or later processors we default to using VFP; but the user
24410 should really set the FPU type explicitly. */
24411 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24412 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24413 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24414 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24415 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24416 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24417 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24418 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24419 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24420 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24421 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24422 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24423 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24424 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24425 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24426 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24427 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24428 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24429 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24430 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24431 "ARM1026EJ-S"),
24432 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24433 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24434 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24435 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24436 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24437 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24438 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24439 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24440 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24441 "ARM1136JF-S"),
24442 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24443 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24444 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24445 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24446 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24447 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24448 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24449 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24450 FPU_NONE, "Cortex-A5"),
24451 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24452 "Cortex-A7"),
24453 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24454 ARM_FEATURE_COPROC (FPU_VFP_V3
24455 | FPU_NEON_EXT_V1),
24456 "Cortex-A8"),
24457 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24458 ARM_FEATURE_COPROC (FPU_VFP_V3
24459 | FPU_NEON_EXT_V1),
24460 "Cortex-A9"),
24461 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24462 "Cortex-A12"),
24463 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24464 "Cortex-A15"),
24465 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24466 "Cortex-A17"),
24467 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24468 "Cortex-A53"),
24469 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24470 "Cortex-A57"),
24471 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24472 "Cortex-A72"),
24473 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24474 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24475 "Cortex-R4F"),
24476 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24477 FPU_NONE, "Cortex-R5"),
24478 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24479 FPU_ARCH_VFP_V3D16,
24480 "Cortex-R7"),
24481 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
24482 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24483 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24484 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24485 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24486 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24487 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24488 "Samsung " \
24489 "Exynos M1"),
24490 /* ??? XSCALE is really an architecture. */
24491 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24492 /* ??? iwmmxt is not a processor. */
24493 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24494 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24495 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24496 /* Maverick */
24497 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24498 FPU_ARCH_MAVERICK, "ARM920T"),
24499 /* Marvell processors. */
24500 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24501 | ARM_EXT_SEC),
24502 FPU_ARCH_VFP_V3D16, NULL),
24503 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE_LOW (ARM_AEXT_V7A | ARM_EXT_MP
24504 | ARM_EXT_SEC),
24505 FPU_ARCH_NEON_VFP_V4, NULL),
24506 /* APM X-Gene family. */
24507 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24508 "APM X-Gene 1"),
24509 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24510 "APM X-Gene 2"),
24511
24512 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24513 };
24514 #undef ARM_CPU_OPT
24515
24516 struct arm_arch_option_table
24517 {
24518 char *name;
24519 size_t name_len;
24520 const arm_feature_set value;
24521 const arm_feature_set default_fpu;
24522 };
24523
24524 /* This list should, at a minimum, contain all the architecture names
24525 recognized by GCC. */
24526 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24527 static const struct arm_arch_option_table arm_archs[] =
24528 {
24529 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24530 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24531 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24532 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24533 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24534 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24535 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24536 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24537 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24538 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24539 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24540 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24541 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24542 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24543 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24544 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24545 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24546 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24547 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24548 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24549 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24550 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24551 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24552 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24553 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24554 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24555 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24556 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24557 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24558 /* The official spelling of the ARMv7 profile variants is the dashed form.
24559 Accept the non-dashed form for compatibility with old toolchains. */
24560 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24561 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24562 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24563 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24564 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24565 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24566 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24567 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24568 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24569 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24570 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24571 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24572 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24573 };
24574 #undef ARM_ARCH_OPT
24575
24576 /* ISA extensions in the co-processor and main instruction set space. */
24577 struct arm_option_extension_value_table
24578 {
24579 char *name;
24580 size_t name_len;
24581 const arm_feature_set merge_value;
24582 const arm_feature_set clear_value;
24583 const arm_feature_set allowed_archs;
24584 };
24585
24586 /* The following table must be in alphabetical order with a NULL last entry.
24587 */
24588 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
24589 static const struct arm_option_extension_value_table arm_extensions[] =
24590 {
24591 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
24592 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24593 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24594 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
24595 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24596 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
24597 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24598 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24599 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
24600 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24601 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
24602 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
24603 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
24604 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
24605 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
24606 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
24607 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24608 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
24609 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
24610 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24611 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
24612 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
24613 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24614 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
24615 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
24616 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24617 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
24618 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
24619 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
24620 | ARM_EXT_DIV),
24621 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
24622 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
24623 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
24624 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
24625 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
24626 };
24627 #undef ARM_EXT_OPT
24628
24629 /* ISA floating-point and Advanced SIMD extensions. */
24630 struct arm_option_fpu_value_table
24631 {
24632 char *name;
24633 const arm_feature_set value;
24634 };
24635
24636 /* This list should, at a minimum, contain all the fpu names
24637 recognized by GCC. */
24638 static const struct arm_option_fpu_value_table arm_fpus[] =
24639 {
24640 {"softfpa", FPU_NONE},
24641 {"fpe", FPU_ARCH_FPE},
24642 {"fpe2", FPU_ARCH_FPE},
24643 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24644 {"fpa", FPU_ARCH_FPA},
24645 {"fpa10", FPU_ARCH_FPA},
24646 {"fpa11", FPU_ARCH_FPA},
24647 {"arm7500fe", FPU_ARCH_FPA},
24648 {"softvfp", FPU_ARCH_VFP},
24649 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24650 {"vfp", FPU_ARCH_VFP_V2},
24651 {"vfp9", FPU_ARCH_VFP_V2},
24652 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24653 {"vfp10", FPU_ARCH_VFP_V2},
24654 {"vfp10-r0", FPU_ARCH_VFP_V1},
24655 {"vfpxd", FPU_ARCH_VFP_V1xD},
24656 {"vfpv2", FPU_ARCH_VFP_V2},
24657 {"vfpv3", FPU_ARCH_VFP_V3},
24658 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24659 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24660 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24661 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24662 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24663 {"arm1020t", FPU_ARCH_VFP_V1},
24664 {"arm1020e", FPU_ARCH_VFP_V2},
24665 {"arm1136jfs", FPU_ARCH_VFP_V2},
24666 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24667 {"maverick", FPU_ARCH_MAVERICK},
24668 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24669 {"neon-fp16", FPU_ARCH_NEON_FP16},
24670 {"vfpv4", FPU_ARCH_VFP_V4},
24671 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24672 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24673 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
24674 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
24675 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24676 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24677 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24678 {"crypto-neon-fp-armv8",
24679 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24680 {NULL, ARM_ARCH_NONE}
24681 };
24682
24683 struct arm_option_value_table
24684 {
24685 char *name;
24686 long value;
24687 };
24688
24689 static const struct arm_option_value_table arm_float_abis[] =
24690 {
24691 {"hard", ARM_FLOAT_ABI_HARD},
24692 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24693 {"soft", ARM_FLOAT_ABI_SOFT},
24694 {NULL, 0}
24695 };
24696
24697 #ifdef OBJ_ELF
24698 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24699 static const struct arm_option_value_table arm_eabis[] =
24700 {
24701 {"gnu", EF_ARM_EABI_UNKNOWN},
24702 {"4", EF_ARM_EABI_VER4},
24703 {"5", EF_ARM_EABI_VER5},
24704 {NULL, 0}
24705 };
24706 #endif
24707
24708 struct arm_long_option_table
24709 {
24710 char * option; /* Substring to match. */
24711 char * help; /* Help information. */
24712 int (* func) (char * subopt); /* Function to decode sub-option. */
24713 char * deprecated; /* If non-null, print this message. */
24714 };
24715
24716 static bfd_boolean
24717 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24718 {
24719 arm_feature_set *ext_set = (arm_feature_set *)
24720 xmalloc (sizeof (arm_feature_set));
24721
24722 /* We insist on extensions being specified in alphabetical order, and with
24723 extensions being added before being removed. We achieve this by having
24724 the global ARM_EXTENSIONS table in alphabetical order, and using the
24725 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24726 or removing it (0) and only allowing it to change in the order
24727 -1 -> 1 -> 0. */
24728 const struct arm_option_extension_value_table * opt = NULL;
24729 int adding_value = -1;
24730
24731 /* Copy the feature set, so that we can modify it. */
24732 *ext_set = **opt_p;
24733 *opt_p = ext_set;
24734
24735 while (str != NULL && *str != 0)
24736 {
24737 char *ext;
24738 size_t len;
24739
24740 if (*str != '+')
24741 {
24742 as_bad (_("invalid architectural extension"));
24743 return FALSE;
24744 }
24745
24746 str++;
24747 ext = strchr (str, '+');
24748
24749 if (ext != NULL)
24750 len = ext - str;
24751 else
24752 len = strlen (str);
24753
24754 if (len >= 2 && strncmp (str, "no", 2) == 0)
24755 {
24756 if (adding_value != 0)
24757 {
24758 adding_value = 0;
24759 opt = arm_extensions;
24760 }
24761
24762 len -= 2;
24763 str += 2;
24764 }
24765 else if (len > 0)
24766 {
24767 if (adding_value == -1)
24768 {
24769 adding_value = 1;
24770 opt = arm_extensions;
24771 }
24772 else if (adding_value != 1)
24773 {
24774 as_bad (_("must specify extensions to add before specifying "
24775 "those to remove"));
24776 return FALSE;
24777 }
24778 }
24779
24780 if (len == 0)
24781 {
24782 as_bad (_("missing architectural extension"));
24783 return FALSE;
24784 }
24785
24786 gas_assert (adding_value != -1);
24787 gas_assert (opt != NULL);
24788
24789 /* Scan over the options table trying to find an exact match. */
24790 for (; opt->name != NULL; opt++)
24791 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24792 {
24793 /* Check we can apply the extension to this architecture. */
24794 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24795 {
24796 as_bad (_("extension does not apply to the base architecture"));
24797 return FALSE;
24798 }
24799
24800 /* Add or remove the extension. */
24801 if (adding_value)
24802 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
24803 else
24804 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
24805
24806 break;
24807 }
24808
24809 if (opt->name == NULL)
24810 {
24811 /* Did we fail to find an extension because it wasn't specified in
24812 alphabetical order, or because it does not exist? */
24813
24814 for (opt = arm_extensions; opt->name != NULL; opt++)
24815 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24816 break;
24817
24818 if (opt->name == NULL)
24819 as_bad (_("unknown architectural extension `%s'"), str);
24820 else
24821 as_bad (_("architectural extensions must be specified in "
24822 "alphabetical order"));
24823
24824 return FALSE;
24825 }
24826 else
24827 {
24828 /* We should skip the extension we've just matched the next time
24829 round. */
24830 opt++;
24831 }
24832
24833 str = ext;
24834 };
24835
24836 return TRUE;
24837 }
24838
24839 static bfd_boolean
24840 arm_parse_cpu (char *str)
24841 {
24842 const struct arm_cpu_option_table *opt;
24843 char *ext = strchr (str, '+');
24844 size_t len;
24845
24846 if (ext != NULL)
24847 len = ext - str;
24848 else
24849 len = strlen (str);
24850
24851 if (len == 0)
24852 {
24853 as_bad (_("missing cpu name `%s'"), str);
24854 return FALSE;
24855 }
24856
24857 for (opt = arm_cpus; opt->name != NULL; opt++)
24858 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24859 {
24860 mcpu_cpu_opt = &opt->value;
24861 mcpu_fpu_opt = &opt->default_fpu;
24862 if (opt->canonical_name)
24863 strcpy (selected_cpu_name, opt->canonical_name);
24864 else
24865 {
24866 size_t i;
24867
24868 for (i = 0; i < len; i++)
24869 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24870 selected_cpu_name[i] = 0;
24871 }
24872
24873 if (ext != NULL)
24874 return arm_parse_extension (ext, &mcpu_cpu_opt);
24875
24876 return TRUE;
24877 }
24878
24879 as_bad (_("unknown cpu `%s'"), str);
24880 return FALSE;
24881 }
24882
24883 static bfd_boolean
24884 arm_parse_arch (char *str)
24885 {
24886 const struct arm_arch_option_table *opt;
24887 char *ext = strchr (str, '+');
24888 size_t len;
24889
24890 if (ext != NULL)
24891 len = ext - str;
24892 else
24893 len = strlen (str);
24894
24895 if (len == 0)
24896 {
24897 as_bad (_("missing architecture name `%s'"), str);
24898 return FALSE;
24899 }
24900
24901 for (opt = arm_archs; opt->name != NULL; opt++)
24902 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24903 {
24904 march_cpu_opt = &opt->value;
24905 march_fpu_opt = &opt->default_fpu;
24906 strcpy (selected_cpu_name, opt->name);
24907
24908 if (ext != NULL)
24909 return arm_parse_extension (ext, &march_cpu_opt);
24910
24911 return TRUE;
24912 }
24913
24914 as_bad (_("unknown architecture `%s'\n"), str);
24915 return FALSE;
24916 }
24917
24918 static bfd_boolean
24919 arm_parse_fpu (char * str)
24920 {
24921 const struct arm_option_fpu_value_table * opt;
24922
24923 for (opt = arm_fpus; opt->name != NULL; opt++)
24924 if (streq (opt->name, str))
24925 {
24926 mfpu_opt = &opt->value;
24927 return TRUE;
24928 }
24929
24930 as_bad (_("unknown floating point format `%s'\n"), str);
24931 return FALSE;
24932 }
24933
24934 static bfd_boolean
24935 arm_parse_float_abi (char * str)
24936 {
24937 const struct arm_option_value_table * opt;
24938
24939 for (opt = arm_float_abis; opt->name != NULL; opt++)
24940 if (streq (opt->name, str))
24941 {
24942 mfloat_abi_opt = opt->value;
24943 return TRUE;
24944 }
24945
24946 as_bad (_("unknown floating point abi `%s'\n"), str);
24947 return FALSE;
24948 }
24949
24950 #ifdef OBJ_ELF
24951 static bfd_boolean
24952 arm_parse_eabi (char * str)
24953 {
24954 const struct arm_option_value_table *opt;
24955
24956 for (opt = arm_eabis; opt->name != NULL; opt++)
24957 if (streq (opt->name, str))
24958 {
24959 meabi_flags = opt->value;
24960 return TRUE;
24961 }
24962 as_bad (_("unknown EABI `%s'\n"), str);
24963 return FALSE;
24964 }
24965 #endif
24966
24967 static bfd_boolean
24968 arm_parse_it_mode (char * str)
24969 {
24970 bfd_boolean ret = TRUE;
24971
24972 if (streq ("arm", str))
24973 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24974 else if (streq ("thumb", str))
24975 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24976 else if (streq ("always", str))
24977 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24978 else if (streq ("never", str))
24979 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24980 else
24981 {
24982 as_bad (_("unknown implicit IT mode `%s', should be "\
24983 "arm, thumb, always, or never."), str);
24984 ret = FALSE;
24985 }
24986
24987 return ret;
24988 }
24989
24990 static bfd_boolean
24991 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24992 {
24993 codecomposer_syntax = TRUE;
24994 arm_comment_chars[0] = ';';
24995 arm_line_separator_chars[0] = 0;
24996 return TRUE;
24997 }
24998
24999 struct arm_long_option_table arm_long_opts[] =
25000 {
25001 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25002 arm_parse_cpu, NULL},
25003 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25004 arm_parse_arch, NULL},
25005 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25006 arm_parse_fpu, NULL},
25007 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25008 arm_parse_float_abi, NULL},
25009 #ifdef OBJ_ELF
25010 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25011 arm_parse_eabi, NULL},
25012 #endif
25013 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25014 arm_parse_it_mode, NULL},
25015 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25016 arm_ccs_mode, NULL},
25017 {NULL, NULL, 0, NULL}
25018 };
25019
25020 int
25021 md_parse_option (int c, char * arg)
25022 {
25023 struct arm_option_table *opt;
25024 const struct arm_legacy_option_table *fopt;
25025 struct arm_long_option_table *lopt;
25026
25027 switch (c)
25028 {
25029 #ifdef OPTION_EB
25030 case OPTION_EB:
25031 target_big_endian = 1;
25032 break;
25033 #endif
25034
25035 #ifdef OPTION_EL
25036 case OPTION_EL:
25037 target_big_endian = 0;
25038 break;
25039 #endif
25040
25041 case OPTION_FIX_V4BX:
25042 fix_v4bx = TRUE;
25043 break;
25044
25045 case 'a':
25046 /* Listing option. Just ignore these, we don't support additional
25047 ones. */
25048 return 0;
25049
25050 default:
25051 for (opt = arm_opts; opt->option != NULL; opt++)
25052 {
25053 if (c == opt->option[0]
25054 && ((arg == NULL && opt->option[1] == 0)
25055 || streq (arg, opt->option + 1)))
25056 {
25057 /* If the option is deprecated, tell the user. */
25058 if (warn_on_deprecated && opt->deprecated != NULL)
25059 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25060 arg ? arg : "", _(opt->deprecated));
25061
25062 if (opt->var != NULL)
25063 *opt->var = opt->value;
25064
25065 return 1;
25066 }
25067 }
25068
25069 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25070 {
25071 if (c == fopt->option[0]
25072 && ((arg == NULL && fopt->option[1] == 0)
25073 || streq (arg, fopt->option + 1)))
25074 {
25075 /* If the option is deprecated, tell the user. */
25076 if (warn_on_deprecated && fopt->deprecated != NULL)
25077 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25078 arg ? arg : "", _(fopt->deprecated));
25079
25080 if (fopt->var != NULL)
25081 *fopt->var = &fopt->value;
25082
25083 return 1;
25084 }
25085 }
25086
25087 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25088 {
25089 /* These options are expected to have an argument. */
25090 if (c == lopt->option[0]
25091 && arg != NULL
25092 && strncmp (arg, lopt->option + 1,
25093 strlen (lopt->option + 1)) == 0)
25094 {
25095 /* If the option is deprecated, tell the user. */
25096 if (warn_on_deprecated && lopt->deprecated != NULL)
25097 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25098 _(lopt->deprecated));
25099
25100 /* Call the sup-option parser. */
25101 return lopt->func (arg + strlen (lopt->option) - 1);
25102 }
25103 }
25104
25105 return 0;
25106 }
25107
25108 return 1;
25109 }
25110
25111 void
25112 md_show_usage (FILE * fp)
25113 {
25114 struct arm_option_table *opt;
25115 struct arm_long_option_table *lopt;
25116
25117 fprintf (fp, _(" ARM-specific assembler options:\n"));
25118
25119 for (opt = arm_opts; opt->option != NULL; opt++)
25120 if (opt->help != NULL)
25121 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25122
25123 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25124 if (lopt->help != NULL)
25125 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25126
25127 #ifdef OPTION_EB
25128 fprintf (fp, _("\
25129 -EB assemble code for a big-endian cpu\n"));
25130 #endif
25131
25132 #ifdef OPTION_EL
25133 fprintf (fp, _("\
25134 -EL assemble code for a little-endian cpu\n"));
25135 #endif
25136
25137 fprintf (fp, _("\
25138 --fix-v4bx Allow BX in ARMv4 code\n"));
25139 }
25140
25141
25142 #ifdef OBJ_ELF
25143 typedef struct
25144 {
25145 int val;
25146 arm_feature_set flags;
25147 } cpu_arch_ver_table;
25148
25149 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25150 least features first. */
25151 static const cpu_arch_ver_table cpu_arch_ver[] =
25152 {
25153 {1, ARM_ARCH_V4},
25154 {2, ARM_ARCH_V4T},
25155 {3, ARM_ARCH_V5},
25156 {3, ARM_ARCH_V5T},
25157 {4, ARM_ARCH_V5TE},
25158 {5, ARM_ARCH_V5TEJ},
25159 {6, ARM_ARCH_V6},
25160 {9, ARM_ARCH_V6K},
25161 {7, ARM_ARCH_V6Z},
25162 {11, ARM_ARCH_V6M},
25163 {12, ARM_ARCH_V6SM},
25164 {8, ARM_ARCH_V6T2},
25165 {10, ARM_ARCH_V7VE},
25166 {10, ARM_ARCH_V7R},
25167 {10, ARM_ARCH_V7M},
25168 {14, ARM_ARCH_V8A},
25169 {0, ARM_ARCH_NONE}
25170 };
25171
25172 /* Set an attribute if it has not already been set by the user. */
25173 static void
25174 aeabi_set_attribute_int (int tag, int value)
25175 {
25176 if (tag < 1
25177 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25178 || !attributes_set_explicitly[tag])
25179 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25180 }
25181
25182 static void
25183 aeabi_set_attribute_string (int tag, const char *value)
25184 {
25185 if (tag < 1
25186 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25187 || !attributes_set_explicitly[tag])
25188 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25189 }
25190
25191 /* Set the public EABI object attributes. */
25192 void
25193 aeabi_set_public_attributes (void)
25194 {
25195 int arch;
25196 char profile;
25197 int virt_sec = 0;
25198 int fp16_optional = 0;
25199 arm_feature_set flags;
25200 arm_feature_set tmp;
25201 const cpu_arch_ver_table *p;
25202
25203 /* Choose the architecture based on the capabilities of the requested cpu
25204 (if any) and/or the instructions actually used. */
25205 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25206 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25207 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25208
25209 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25210 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25211
25212 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25213 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25214
25215 selected_cpu = flags;
25216
25217 /* Allow the user to override the reported architecture. */
25218 if (object_arch)
25219 {
25220 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25221 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25222 }
25223
25224 /* We need to make sure that the attributes do not identify us as v6S-M
25225 when the only v6S-M feature in use is the Operating System Extensions. */
25226 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25227 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25228 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25229
25230 tmp = flags;
25231 arch = 0;
25232 for (p = cpu_arch_ver; p->val; p++)
25233 {
25234 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25235 {
25236 arch = p->val;
25237 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25238 }
25239 }
25240
25241 /* The table lookup above finds the last architecture to contribute
25242 a new feature. Unfortunately, Tag13 is a subset of the union of
25243 v6T2 and v7-M, so it is never seen as contributing a new feature.
25244 We can not search for the last entry which is entirely used,
25245 because if no CPU is specified we build up only those flags
25246 actually used. Perhaps we should separate out the specified
25247 and implicit cases. Avoid taking this path for -march=all by
25248 checking for contradictory v7-A / v7-M features. */
25249 if (arch == 10
25250 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25251 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25252 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25253 arch = 13;
25254
25255 /* Tag_CPU_name. */
25256 if (selected_cpu_name[0])
25257 {
25258 char *q;
25259
25260 q = selected_cpu_name;
25261 if (strncmp (q, "armv", 4) == 0)
25262 {
25263 int i;
25264
25265 q += 4;
25266 for (i = 0; q[i]; i++)
25267 q[i] = TOUPPER (q[i]);
25268 }
25269 aeabi_set_attribute_string (Tag_CPU_name, q);
25270 }
25271
25272 /* Tag_CPU_arch. */
25273 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25274
25275 /* Tag_CPU_arch_profile. */
25276 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25277 profile = 'A';
25278 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25279 profile = 'R';
25280 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25281 profile = 'M';
25282 else
25283 profile = '\0';
25284
25285 if (profile != '\0')
25286 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25287
25288 /* Tag_ARM_ISA_use. */
25289 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25290 || arch == 0)
25291 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25292
25293 /* Tag_THUMB_ISA_use. */
25294 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25295 || arch == 0)
25296 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25297 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25298
25299 /* Tag_VFP_arch. */
25300 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25301 aeabi_set_attribute_int (Tag_VFP_arch,
25302 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25303 ? 7 : 8);
25304 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25305 aeabi_set_attribute_int (Tag_VFP_arch,
25306 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25307 ? 5 : 6);
25308 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25309 {
25310 fp16_optional = 1;
25311 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25312 }
25313 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25314 {
25315 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25316 fp16_optional = 1;
25317 }
25318 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25319 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25320 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25321 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25322 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25323
25324 /* Tag_ABI_HardFP_use. */
25325 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25326 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25327 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25328
25329 /* Tag_WMMX_arch. */
25330 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25331 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25332 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25333 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25334
25335 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25336 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25337 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25338 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25339 {
25340 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25341 {
25342 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25343 }
25344 else
25345 {
25346 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25347 fp16_optional = 1;
25348 }
25349 }
25350
25351 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25352 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25353 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25354
25355 /* Tag_DIV_use.
25356
25357 We set Tag_DIV_use to two when integer divide instructions have been used
25358 in ARM state, or when Thumb integer divide instructions have been used,
25359 but we have no architecture profile set, nor have we any ARM instructions.
25360
25361 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25362 architecture.
25363
25364 For new architectures we will have to check these tests. */
25365 gas_assert (arch <= TAG_CPU_ARCH_V8);
25366 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25367 aeabi_set_attribute_int (Tag_DIV_use, 0);
25368 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25369 || (profile == '\0'
25370 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25371 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25372 aeabi_set_attribute_int (Tag_DIV_use, 2);
25373
25374 /* Tag_MP_extension_use. */
25375 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25376 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25377
25378 /* Tag Virtualization_use. */
25379 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25380 virt_sec |= 1;
25381 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25382 virt_sec |= 2;
25383 if (virt_sec != 0)
25384 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25385 }
25386
25387 /* Add the default contents for the .ARM.attributes section. */
25388 void
25389 arm_md_end (void)
25390 {
25391 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25392 return;
25393
25394 aeabi_set_public_attributes ();
25395 }
25396 #endif /* OBJ_ELF */
25397
25398
25399 /* Parse a .cpu directive. */
25400
25401 static void
25402 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25403 {
25404 const struct arm_cpu_option_table *opt;
25405 char *name;
25406 char saved_char;
25407
25408 name = input_line_pointer;
25409 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25410 input_line_pointer++;
25411 saved_char = *input_line_pointer;
25412 *input_line_pointer = 0;
25413
25414 /* Skip the first "all" entry. */
25415 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25416 if (streq (opt->name, name))
25417 {
25418 mcpu_cpu_opt = &opt->value;
25419 selected_cpu = opt->value;
25420 if (opt->canonical_name)
25421 strcpy (selected_cpu_name, opt->canonical_name);
25422 else
25423 {
25424 int i;
25425 for (i = 0; opt->name[i]; i++)
25426 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25427
25428 selected_cpu_name[i] = 0;
25429 }
25430 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25431 *input_line_pointer = saved_char;
25432 demand_empty_rest_of_line ();
25433 return;
25434 }
25435 as_bad (_("unknown cpu `%s'"), name);
25436 *input_line_pointer = saved_char;
25437 ignore_rest_of_line ();
25438 }
25439
25440
25441 /* Parse a .arch directive. */
25442
25443 static void
25444 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25445 {
25446 const struct arm_arch_option_table *opt;
25447 char saved_char;
25448 char *name;
25449
25450 name = input_line_pointer;
25451 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25452 input_line_pointer++;
25453 saved_char = *input_line_pointer;
25454 *input_line_pointer = 0;
25455
25456 /* Skip the first "all" entry. */
25457 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25458 if (streq (opt->name, name))
25459 {
25460 mcpu_cpu_opt = &opt->value;
25461 selected_cpu = opt->value;
25462 strcpy (selected_cpu_name, opt->name);
25463 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25464 *input_line_pointer = saved_char;
25465 demand_empty_rest_of_line ();
25466 return;
25467 }
25468
25469 as_bad (_("unknown architecture `%s'\n"), name);
25470 *input_line_pointer = saved_char;
25471 ignore_rest_of_line ();
25472 }
25473
25474
25475 /* Parse a .object_arch directive. */
25476
25477 static void
25478 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25479 {
25480 const struct arm_arch_option_table *opt;
25481 char saved_char;
25482 char *name;
25483
25484 name = input_line_pointer;
25485 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25486 input_line_pointer++;
25487 saved_char = *input_line_pointer;
25488 *input_line_pointer = 0;
25489
25490 /* Skip the first "all" entry. */
25491 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25492 if (streq (opt->name, name))
25493 {
25494 object_arch = &opt->value;
25495 *input_line_pointer = saved_char;
25496 demand_empty_rest_of_line ();
25497 return;
25498 }
25499
25500 as_bad (_("unknown architecture `%s'\n"), name);
25501 *input_line_pointer = saved_char;
25502 ignore_rest_of_line ();
25503 }
25504
25505 /* Parse a .arch_extension directive. */
25506
25507 static void
25508 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25509 {
25510 const struct arm_option_extension_value_table *opt;
25511 char saved_char;
25512 char *name;
25513 int adding_value = 1;
25514
25515 name = input_line_pointer;
25516 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25517 input_line_pointer++;
25518 saved_char = *input_line_pointer;
25519 *input_line_pointer = 0;
25520
25521 if (strlen (name) >= 2
25522 && strncmp (name, "no", 2) == 0)
25523 {
25524 adding_value = 0;
25525 name += 2;
25526 }
25527
25528 for (opt = arm_extensions; opt->name != NULL; opt++)
25529 if (streq (opt->name, name))
25530 {
25531 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25532 {
25533 as_bad (_("architectural extension `%s' is not allowed for the "
25534 "current base architecture"), name);
25535 break;
25536 }
25537
25538 if (adding_value)
25539 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
25540 opt->merge_value);
25541 else
25542 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
25543
25544 mcpu_cpu_opt = &selected_cpu;
25545 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25546 *input_line_pointer = saved_char;
25547 demand_empty_rest_of_line ();
25548 return;
25549 }
25550
25551 if (opt->name == NULL)
25552 as_bad (_("unknown architecture extension `%s'\n"), name);
25553
25554 *input_line_pointer = saved_char;
25555 ignore_rest_of_line ();
25556 }
25557
25558 /* Parse a .fpu directive. */
25559
25560 static void
25561 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25562 {
25563 const struct arm_option_fpu_value_table *opt;
25564 char saved_char;
25565 char *name;
25566
25567 name = input_line_pointer;
25568 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25569 input_line_pointer++;
25570 saved_char = *input_line_pointer;
25571 *input_line_pointer = 0;
25572
25573 for (opt = arm_fpus; opt->name != NULL; opt++)
25574 if (streq (opt->name, name))
25575 {
25576 mfpu_opt = &opt->value;
25577 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25578 *input_line_pointer = saved_char;
25579 demand_empty_rest_of_line ();
25580 return;
25581 }
25582
25583 as_bad (_("unknown floating point format `%s'\n"), name);
25584 *input_line_pointer = saved_char;
25585 ignore_rest_of_line ();
25586 }
25587
25588 /* Copy symbol information. */
25589
25590 void
25591 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25592 {
25593 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25594 }
25595
25596 #ifdef OBJ_ELF
25597 /* Given a symbolic attribute NAME, return the proper integer value.
25598 Returns -1 if the attribute is not known. */
25599
25600 int
25601 arm_convert_symbolic_attribute (const char *name)
25602 {
25603 static const struct
25604 {
25605 const char * name;
25606 const int tag;
25607 }
25608 attribute_table[] =
25609 {
25610 /* When you modify this table you should
25611 also modify the list in doc/c-arm.texi. */
25612 #define T(tag) {#tag, tag}
25613 T (Tag_CPU_raw_name),
25614 T (Tag_CPU_name),
25615 T (Tag_CPU_arch),
25616 T (Tag_CPU_arch_profile),
25617 T (Tag_ARM_ISA_use),
25618 T (Tag_THUMB_ISA_use),
25619 T (Tag_FP_arch),
25620 T (Tag_VFP_arch),
25621 T (Tag_WMMX_arch),
25622 T (Tag_Advanced_SIMD_arch),
25623 T (Tag_PCS_config),
25624 T (Tag_ABI_PCS_R9_use),
25625 T (Tag_ABI_PCS_RW_data),
25626 T (Tag_ABI_PCS_RO_data),
25627 T (Tag_ABI_PCS_GOT_use),
25628 T (Tag_ABI_PCS_wchar_t),
25629 T (Tag_ABI_FP_rounding),
25630 T (Tag_ABI_FP_denormal),
25631 T (Tag_ABI_FP_exceptions),
25632 T (Tag_ABI_FP_user_exceptions),
25633 T (Tag_ABI_FP_number_model),
25634 T (Tag_ABI_align_needed),
25635 T (Tag_ABI_align8_needed),
25636 T (Tag_ABI_align_preserved),
25637 T (Tag_ABI_align8_preserved),
25638 T (Tag_ABI_enum_size),
25639 T (Tag_ABI_HardFP_use),
25640 T (Tag_ABI_VFP_args),
25641 T (Tag_ABI_WMMX_args),
25642 T (Tag_ABI_optimization_goals),
25643 T (Tag_ABI_FP_optimization_goals),
25644 T (Tag_compatibility),
25645 T (Tag_CPU_unaligned_access),
25646 T (Tag_FP_HP_extension),
25647 T (Tag_VFP_HP_extension),
25648 T (Tag_ABI_FP_16bit_format),
25649 T (Tag_MPextension_use),
25650 T (Tag_DIV_use),
25651 T (Tag_nodefaults),
25652 T (Tag_also_compatible_with),
25653 T (Tag_conformance),
25654 T (Tag_T2EE_use),
25655 T (Tag_Virtualization_use),
25656 /* We deliberately do not include Tag_MPextension_use_legacy. */
25657 #undef T
25658 };
25659 unsigned int i;
25660
25661 if (name == NULL)
25662 return -1;
25663
25664 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25665 if (streq (name, attribute_table[i].name))
25666 return attribute_table[i].tag;
25667
25668 return -1;
25669 }
25670
25671
25672 /* Apply sym value for relocations only in the case that they are for
25673 local symbols in the same segment as the fixup and you have the
25674 respective architectural feature for blx and simple switches. */
25675 int
25676 arm_apply_sym_value (struct fix * fixP, segT this_seg)
25677 {
25678 if (fixP->fx_addsy
25679 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25680 /* PR 17444: If the local symbol is in a different section then a reloc
25681 will always be generated for it, so applying the symbol value now
25682 will result in a double offset being stored in the relocation. */
25683 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
25684 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25685 {
25686 switch (fixP->fx_r_type)
25687 {
25688 case BFD_RELOC_ARM_PCREL_BLX:
25689 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25690 if (ARM_IS_FUNC (fixP->fx_addsy))
25691 return 1;
25692 break;
25693
25694 case BFD_RELOC_ARM_PCREL_CALL:
25695 case BFD_RELOC_THUMB_PCREL_BLX:
25696 if (THUMB_IS_FUNC (fixP->fx_addsy))
25697 return 1;
25698 break;
25699
25700 default:
25701 break;
25702 }
25703
25704 }
25705 return 0;
25706 }
25707 #endif /* OBJ_ELF */
This page took 0.595568 seconds and 4 git commands to generate.