2a7f649872920e4d8afa6b4248057613e3c26177
[deliverable/binutils-gdb.git] / gas / config / tc-arm.c
1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2016 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9 This file is part of GAS, the GNU Assembler.
10
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
14 any later version.
15
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 02110-1301, USA. */
25
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35
36 #ifdef OBJ_ELF
37 #include "elf/arm.h"
38 #include "dw2gencfi.h"
39 #endif
40
41 #include "dwarf2dbg.h"
42
43 #ifdef OBJ_ELF
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
46
47 /* This structure holds the unwinding state. */
48
49 static struct
50 {
51 symbolS * proc_start;
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
56 segT saved_seg;
57 subsegT saved_subseg;
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
60 int opcode_count;
61 int opcode_alloc;
62 /* The number of bytes pushed to the stack. */
63 offsetT frame_size;
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
70 offsetT fp_offset;
71 int fp_reg;
72 /* Nonzero if an unwind_setfp directive has been seen. */
73 unsigned fp_used:1;
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
76 } unwind;
77
78 #endif /* OBJ_ELF */
79
80 /* Results from operand parsing worker functions. */
81
82 typedef enum
83 {
84 PARSE_OPERAND_SUCCESS,
85 PARSE_OPERAND_FAIL,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
88
89 enum arm_float_abi
90 {
91 ARM_FLOAT_ABI_HARD,
92 ARM_FLOAT_ABI_SOFTFP,
93 ARM_FLOAT_ABI_SOFT
94 };
95
96 /* Types of processor to assemble for. */
97 #ifndef CPU_DEFAULT
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
101
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
104 #endif
105
106 #ifndef FPU_DEFAULT
107 # ifdef TE_LINUX
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
110 # ifdef OBJ_ELF
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
112 # else
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
115 # endif
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
118 # else
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
121 # endif
122 #endif /* ifndef FPU_DEFAULT */
123
124 #define streq(a, b) (strcmp (a, b) == 0)
125
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
129
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
139
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
142
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
145 assembly flags. */
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
148
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
155
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
166
167 #ifdef CPU_DEFAULT
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
169 #endif
170
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE_CORE_LOW (ARM_EXT_V6M);
189 static const arm_feature_set arm_ext_v6_notm =
190 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
191 static const arm_feature_set arm_ext_v6_dsp =
192 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
193 static const arm_feature_set arm_ext_barrier =
194 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
195 static const arm_feature_set arm_ext_msr =
196 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
197 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
198 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
199 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
200 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
201 static const arm_feature_set arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
202 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
203 static const arm_feature_set arm_ext_m =
204 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, ARM_EXT2_V8M);
205 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
206 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
207 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
208 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
209 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
210 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
211 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
212 static const arm_feature_set arm_ext_v6t2_v8m =
213 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
214 /* Instructions shared between ARMv8-A and ARMv8-M. */
215 static const arm_feature_set arm_ext_atomics =
216 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
217 static const arm_feature_set arm_ext_v8_2 =
218 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
219 /* FP16 instructions. */
220 static const arm_feature_set arm_ext_fp16 =
221 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
222
223 static const arm_feature_set arm_arch_any = ARM_ANY;
224 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1, -1);
225 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
226 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
227 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
228
229 static const arm_feature_set arm_cext_iwmmxt2 =
230 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
231 static const arm_feature_set arm_cext_iwmmxt =
232 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
233 static const arm_feature_set arm_cext_xscale =
234 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
235 static const arm_feature_set arm_cext_maverick =
236 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
237 static const arm_feature_set fpu_fpa_ext_v1 =
238 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
239 static const arm_feature_set fpu_fpa_ext_v2 =
240 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
241 static const arm_feature_set fpu_vfp_ext_v1xd =
242 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
243 static const arm_feature_set fpu_vfp_ext_v1 =
244 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
245 static const arm_feature_set fpu_vfp_ext_v2 =
246 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
247 static const arm_feature_set fpu_vfp_ext_v3xd =
248 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
249 static const arm_feature_set fpu_vfp_ext_v3 =
250 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
251 static const arm_feature_set fpu_vfp_ext_d32 =
252 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
253 static const arm_feature_set fpu_neon_ext_v1 =
254 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
255 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
256 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
257 static const arm_feature_set fpu_vfp_fp16 =
258 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
259 static const arm_feature_set fpu_neon_ext_fma =
260 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
261 static const arm_feature_set fpu_vfp_ext_fma =
262 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
263 static const arm_feature_set fpu_vfp_ext_armv8 =
264 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
265 static const arm_feature_set fpu_vfp_ext_armv8xd =
266 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
267 static const arm_feature_set fpu_neon_ext_armv8 =
268 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
269 static const arm_feature_set fpu_crypto_ext_armv8 =
270 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
271 static const arm_feature_set crc_ext_armv8 =
272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
273 static const arm_feature_set fpu_neon_ext_v8_1 =
274 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8 | FPU_NEON_EXT_RDMA);
275
276 static int mfloat_abi_opt = -1;
277 /* Record user cpu selection for object attributes. */
278 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
279 /* Must be long enough to hold any of the names in arm_cpus. */
280 static char selected_cpu_name[20];
281
282 extern FLONUM_TYPE generic_floating_point_number;
283
284 /* Return if no cpu was selected on command-line. */
285 static bfd_boolean
286 no_cpu_selected (void)
287 {
288 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
289 }
290
291 #ifdef OBJ_ELF
292 # ifdef EABI_DEFAULT
293 static int meabi_flags = EABI_DEFAULT;
294 # else
295 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
296 # endif
297
298 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
299
300 bfd_boolean
301 arm_is_eabi (void)
302 {
303 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
304 }
305 #endif
306
307 #ifdef OBJ_ELF
308 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
309 symbolS * GOT_symbol;
310 #endif
311
312 /* 0: assemble for ARM,
313 1: assemble for Thumb,
314 2: assemble for Thumb even though target CPU does not support thumb
315 instructions. */
316 static int thumb_mode = 0;
317 /* A value distinct from the possible values for thumb_mode that we
318 can use to record whether thumb_mode has been copied into the
319 tc_frag_data field of a frag. */
320 #define MODE_RECORDED (1 << 4)
321
322 /* Specifies the intrinsic IT insn behavior mode. */
323 enum implicit_it_mode
324 {
325 IMPLICIT_IT_MODE_NEVER = 0x00,
326 IMPLICIT_IT_MODE_ARM = 0x01,
327 IMPLICIT_IT_MODE_THUMB = 0x02,
328 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
329 };
330 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
331
332 /* If unified_syntax is true, we are processing the new unified
333 ARM/Thumb syntax. Important differences from the old ARM mode:
334
335 - Immediate operands do not require a # prefix.
336 - Conditional affixes always appear at the end of the
337 instruction. (For backward compatibility, those instructions
338 that formerly had them in the middle, continue to accept them
339 there.)
340 - The IT instruction may appear, and if it does is validated
341 against subsequent conditional affixes. It does not generate
342 machine code.
343
344 Important differences from the old Thumb mode:
345
346 - Immediate operands do not require a # prefix.
347 - Most of the V6T2 instructions are only available in unified mode.
348 - The .N and .W suffixes are recognized and honored (it is an error
349 if they cannot be honored).
350 - All instructions set the flags if and only if they have an 's' affix.
351 - Conditional affixes may be used. They are validated against
352 preceding IT instructions. Unlike ARM mode, you cannot use a
353 conditional affix except in the scope of an IT instruction. */
354
355 static bfd_boolean unified_syntax = FALSE;
356
357 /* An immediate operand can start with #, and ld*, st*, pld operands
358 can contain [ and ]. We need to tell APP not to elide whitespace
359 before a [, which can appear as the first operand for pld.
360 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
361 const char arm_symbol_chars[] = "#[]{}";
362
363 enum neon_el_type
364 {
365 NT_invtype,
366 NT_untyped,
367 NT_integer,
368 NT_float,
369 NT_poly,
370 NT_signed,
371 NT_unsigned
372 };
373
374 struct neon_type_el
375 {
376 enum neon_el_type type;
377 unsigned size;
378 };
379
380 #define NEON_MAX_TYPE_ELS 4
381
382 struct neon_type
383 {
384 struct neon_type_el el[NEON_MAX_TYPE_ELS];
385 unsigned elems;
386 };
387
388 enum it_instruction_type
389 {
390 OUTSIDE_IT_INSN,
391 INSIDE_IT_INSN,
392 INSIDE_IT_LAST_INSN,
393 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
394 if inside, should be the last one. */
395 NEUTRAL_IT_INSN, /* This could be either inside or outside,
396 i.e. BKPT and NOP. */
397 IT_INSN /* The IT insn has been parsed. */
398 };
399
400 /* The maximum number of operands we need. */
401 #define ARM_IT_MAX_OPERANDS 6
402
403 struct arm_it
404 {
405 const char * error;
406 unsigned long instruction;
407 int size;
408 int size_req;
409 int cond;
410 /* "uncond_value" is set to the value in place of the conditional field in
411 unconditional versions of the instruction, or -1 if nothing is
412 appropriate. */
413 int uncond_value;
414 struct neon_type vectype;
415 /* This does not indicate an actual NEON instruction, only that
416 the mnemonic accepts neon-style type suffixes. */
417 int is_neon;
418 /* Set to the opcode if the instruction needs relaxation.
419 Zero if the instruction is not relaxed. */
420 unsigned long relax;
421 struct
422 {
423 bfd_reloc_code_real_type type;
424 expressionS exp;
425 int pc_rel;
426 } reloc;
427
428 enum it_instruction_type it_insn_type;
429
430 struct
431 {
432 unsigned reg;
433 signed int imm;
434 struct neon_type_el vectype;
435 unsigned present : 1; /* Operand present. */
436 unsigned isreg : 1; /* Operand was a register. */
437 unsigned immisreg : 1; /* .imm field is a second register. */
438 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
439 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
440 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
441 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
442 instructions. This allows us to disambiguate ARM <-> vector insns. */
443 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
444 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
445 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
446 unsigned issingle : 1; /* Operand is VFP single-precision register. */
447 unsigned hasreloc : 1; /* Operand has relocation suffix. */
448 unsigned writeback : 1; /* Operand has trailing ! */
449 unsigned preind : 1; /* Preindexed address. */
450 unsigned postind : 1; /* Postindexed address. */
451 unsigned negative : 1; /* Index register was negated. */
452 unsigned shifted : 1; /* Shift applied to operation. */
453 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
454 } operands[ARM_IT_MAX_OPERANDS];
455 };
456
457 static struct arm_it inst;
458
459 #define NUM_FLOAT_VALS 8
460
461 const char * fp_const[] =
462 {
463 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
464 };
465
466 /* Number of littlenums required to hold an extended precision number. */
467 #define MAX_LITTLENUMS 6
468
469 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
470
471 #define FAIL (-1)
472 #define SUCCESS (0)
473
474 #define SUFF_S 1
475 #define SUFF_D 2
476 #define SUFF_E 3
477 #define SUFF_P 4
478
479 #define CP_T_X 0x00008000
480 #define CP_T_Y 0x00400000
481
482 #define CONDS_BIT 0x00100000
483 #define LOAD_BIT 0x00100000
484
485 #define DOUBLE_LOAD_FLAG 0x00000001
486
487 struct asm_cond
488 {
489 const char * template_name;
490 unsigned long value;
491 };
492
493 #define COND_ALWAYS 0xE
494
495 struct asm_psr
496 {
497 const char * template_name;
498 unsigned long field;
499 };
500
501 struct asm_barrier_opt
502 {
503 const char * template_name;
504 unsigned long value;
505 const arm_feature_set arch;
506 };
507
508 /* The bit that distinguishes CPSR and SPSR. */
509 #define SPSR_BIT (1 << 22)
510
511 /* The individual PSR flag bits. */
512 #define PSR_c (1 << 16)
513 #define PSR_x (1 << 17)
514 #define PSR_s (1 << 18)
515 #define PSR_f (1 << 19)
516
517 struct reloc_entry
518 {
519 char * name;
520 bfd_reloc_code_real_type reloc;
521 };
522
523 enum vfp_reg_pos
524 {
525 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
526 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
527 };
528
529 enum vfp_ldstm_type
530 {
531 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
532 };
533
534 /* Bits for DEFINED field in neon_typed_alias. */
535 #define NTA_HASTYPE 1
536 #define NTA_HASINDEX 2
537
538 struct neon_typed_alias
539 {
540 unsigned char defined;
541 unsigned char index;
542 struct neon_type_el eltype;
543 };
544
545 /* ARM register categories. This includes coprocessor numbers and various
546 architecture extensions' registers. */
547 enum arm_reg_type
548 {
549 REG_TYPE_RN,
550 REG_TYPE_CP,
551 REG_TYPE_CN,
552 REG_TYPE_FN,
553 REG_TYPE_VFS,
554 REG_TYPE_VFD,
555 REG_TYPE_NQ,
556 REG_TYPE_VFSD,
557 REG_TYPE_NDQ,
558 REG_TYPE_NSDQ,
559 REG_TYPE_VFC,
560 REG_TYPE_MVF,
561 REG_TYPE_MVD,
562 REG_TYPE_MVFX,
563 REG_TYPE_MVDX,
564 REG_TYPE_MVAX,
565 REG_TYPE_DSPSC,
566 REG_TYPE_MMXWR,
567 REG_TYPE_MMXWC,
568 REG_TYPE_MMXWCG,
569 REG_TYPE_XSCALE,
570 REG_TYPE_RNB
571 };
572
573 /* Structure for a hash table entry for a register.
574 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
575 information which states whether a vector type or index is specified (for a
576 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
577 struct reg_entry
578 {
579 const char * name;
580 unsigned int number;
581 unsigned char type;
582 unsigned char builtin;
583 struct neon_typed_alias * neon;
584 };
585
586 /* Diagnostics used when we don't get a register of the expected type. */
587 const char * const reg_expected_msgs[] =
588 {
589 N_("ARM register expected"),
590 N_("bad or missing co-processor number"),
591 N_("co-processor register expected"),
592 N_("FPA register expected"),
593 N_("VFP single precision register expected"),
594 N_("VFP/Neon double precision register expected"),
595 N_("Neon quad precision register expected"),
596 N_("VFP single or double precision register expected"),
597 N_("Neon double or quad precision register expected"),
598 N_("VFP single, double or Neon quad precision register expected"),
599 N_("VFP system register expected"),
600 N_("Maverick MVF register expected"),
601 N_("Maverick MVD register expected"),
602 N_("Maverick MVFX register expected"),
603 N_("Maverick MVDX register expected"),
604 N_("Maverick MVAX register expected"),
605 N_("Maverick DSPSC register expected"),
606 N_("iWMMXt data register expected"),
607 N_("iWMMXt control register expected"),
608 N_("iWMMXt scalar register expected"),
609 N_("XScale accumulator register expected"),
610 };
611
612 /* Some well known registers that we refer to directly elsewhere. */
613 #define REG_R12 12
614 #define REG_SP 13
615 #define REG_LR 14
616 #define REG_PC 15
617
618 /* ARM instructions take 4bytes in the object file, Thumb instructions
619 take 2: */
620 #define INSN_SIZE 4
621
622 struct asm_opcode
623 {
624 /* Basic string to match. */
625 const char * template_name;
626
627 /* Parameters to instruction. */
628 unsigned int operands[8];
629
630 /* Conditional tag - see opcode_lookup. */
631 unsigned int tag : 4;
632
633 /* Basic instruction code. */
634 unsigned int avalue : 28;
635
636 /* Thumb-format instruction code. */
637 unsigned int tvalue;
638
639 /* Which architecture variant provides this instruction. */
640 const arm_feature_set * avariant;
641 const arm_feature_set * tvariant;
642
643 /* Function to call to encode instruction in ARM format. */
644 void (* aencode) (void);
645
646 /* Function to call to encode instruction in Thumb format. */
647 void (* tencode) (void);
648 };
649
650 /* Defines for various bits that we will want to toggle. */
651 #define INST_IMMEDIATE 0x02000000
652 #define OFFSET_REG 0x02000000
653 #define HWOFFSET_IMM 0x00400000
654 #define SHIFT_BY_REG 0x00000010
655 #define PRE_INDEX 0x01000000
656 #define INDEX_UP 0x00800000
657 #define WRITE_BACK 0x00200000
658 #define LDM_TYPE_2_OR_3 0x00400000
659 #define CPSI_MMOD 0x00020000
660
661 #define LITERAL_MASK 0xf000f000
662 #define OPCODE_MASK 0xfe1fffff
663 #define V4_STR_BIT 0x00000020
664 #define VLDR_VMOV_SAME 0x0040f000
665
666 #define T2_SUBS_PC_LR 0xf3de8f00
667
668 #define DATA_OP_SHIFT 21
669
670 #define T2_OPCODE_MASK 0xfe1fffff
671 #define T2_DATA_OP_SHIFT 21
672
673 #define A_COND_MASK 0xf0000000
674 #define A_PUSH_POP_OP_MASK 0x0fff0000
675
676 /* Opcodes for pushing/poping registers to/from the stack. */
677 #define A1_OPCODE_PUSH 0x092d0000
678 #define A2_OPCODE_PUSH 0x052d0004
679 #define A2_OPCODE_POP 0x049d0004
680
681 /* Codes to distinguish the arithmetic instructions. */
682 #define OPCODE_AND 0
683 #define OPCODE_EOR 1
684 #define OPCODE_SUB 2
685 #define OPCODE_RSB 3
686 #define OPCODE_ADD 4
687 #define OPCODE_ADC 5
688 #define OPCODE_SBC 6
689 #define OPCODE_RSC 7
690 #define OPCODE_TST 8
691 #define OPCODE_TEQ 9
692 #define OPCODE_CMP 10
693 #define OPCODE_CMN 11
694 #define OPCODE_ORR 12
695 #define OPCODE_MOV 13
696 #define OPCODE_BIC 14
697 #define OPCODE_MVN 15
698
699 #define T2_OPCODE_AND 0
700 #define T2_OPCODE_BIC 1
701 #define T2_OPCODE_ORR 2
702 #define T2_OPCODE_ORN 3
703 #define T2_OPCODE_EOR 4
704 #define T2_OPCODE_ADD 8
705 #define T2_OPCODE_ADC 10
706 #define T2_OPCODE_SBC 11
707 #define T2_OPCODE_SUB 13
708 #define T2_OPCODE_RSB 14
709
710 #define T_OPCODE_MUL 0x4340
711 #define T_OPCODE_TST 0x4200
712 #define T_OPCODE_CMN 0x42c0
713 #define T_OPCODE_NEG 0x4240
714 #define T_OPCODE_MVN 0x43c0
715
716 #define T_OPCODE_ADD_R3 0x1800
717 #define T_OPCODE_SUB_R3 0x1a00
718 #define T_OPCODE_ADD_HI 0x4400
719 #define T_OPCODE_ADD_ST 0xb000
720 #define T_OPCODE_SUB_ST 0xb080
721 #define T_OPCODE_ADD_SP 0xa800
722 #define T_OPCODE_ADD_PC 0xa000
723 #define T_OPCODE_ADD_I8 0x3000
724 #define T_OPCODE_SUB_I8 0x3800
725 #define T_OPCODE_ADD_I3 0x1c00
726 #define T_OPCODE_SUB_I3 0x1e00
727
728 #define T_OPCODE_ASR_R 0x4100
729 #define T_OPCODE_LSL_R 0x4080
730 #define T_OPCODE_LSR_R 0x40c0
731 #define T_OPCODE_ROR_R 0x41c0
732 #define T_OPCODE_ASR_I 0x1000
733 #define T_OPCODE_LSL_I 0x0000
734 #define T_OPCODE_LSR_I 0x0800
735
736 #define T_OPCODE_MOV_I8 0x2000
737 #define T_OPCODE_CMP_I8 0x2800
738 #define T_OPCODE_CMP_LR 0x4280
739 #define T_OPCODE_MOV_HR 0x4600
740 #define T_OPCODE_CMP_HR 0x4500
741
742 #define T_OPCODE_LDR_PC 0x4800
743 #define T_OPCODE_LDR_SP 0x9800
744 #define T_OPCODE_STR_SP 0x9000
745 #define T_OPCODE_LDR_IW 0x6800
746 #define T_OPCODE_STR_IW 0x6000
747 #define T_OPCODE_LDR_IH 0x8800
748 #define T_OPCODE_STR_IH 0x8000
749 #define T_OPCODE_LDR_IB 0x7800
750 #define T_OPCODE_STR_IB 0x7000
751 #define T_OPCODE_LDR_RW 0x5800
752 #define T_OPCODE_STR_RW 0x5000
753 #define T_OPCODE_LDR_RH 0x5a00
754 #define T_OPCODE_STR_RH 0x5200
755 #define T_OPCODE_LDR_RB 0x5c00
756 #define T_OPCODE_STR_RB 0x5400
757
758 #define T_OPCODE_PUSH 0xb400
759 #define T_OPCODE_POP 0xbc00
760
761 #define T_OPCODE_BRANCH 0xe000
762
763 #define THUMB_SIZE 2 /* Size of thumb instruction. */
764 #define THUMB_PP_PC_LR 0x0100
765 #define THUMB_LOAD_BIT 0x0800
766 #define THUMB2_LOAD_BIT 0x00100000
767
768 #define BAD_ARGS _("bad arguments to instruction")
769 #define BAD_SP _("r13 not allowed here")
770 #define BAD_PC _("r15 not allowed here")
771 #define BAD_COND _("instruction cannot be conditional")
772 #define BAD_OVERLAP _("registers may not be the same")
773 #define BAD_HIREG _("lo register required")
774 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
775 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
776 #define BAD_BRANCH _("branch must be last instruction in IT block")
777 #define BAD_NOT_IT _("instruction not allowed in IT block")
778 #define BAD_FPU _("selected FPU does not support instruction")
779 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
780 #define BAD_IT_COND _("incorrect condition in IT block")
781 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
782 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
783 #define BAD_PC_ADDRESSING \
784 _("cannot use register index with PC-relative addressing")
785 #define BAD_PC_WRITEBACK \
786 _("cannot use writeback with PC-relative addressing")
787 #define BAD_RANGE _("branch out of range")
788 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
789
790 static struct hash_control * arm_ops_hsh;
791 static struct hash_control * arm_cond_hsh;
792 static struct hash_control * arm_shift_hsh;
793 static struct hash_control * arm_psr_hsh;
794 static struct hash_control * arm_v7m_psr_hsh;
795 static struct hash_control * arm_reg_hsh;
796 static struct hash_control * arm_reloc_hsh;
797 static struct hash_control * arm_barrier_opt_hsh;
798
799 /* Stuff needed to resolve the label ambiguity
800 As:
801 ...
802 label: <insn>
803 may differ from:
804 ...
805 label:
806 <insn> */
807
808 symbolS * last_label_seen;
809 static int label_is_thumb_function_name = FALSE;
810
811 /* Literal pool structure. Held on a per-section
812 and per-sub-section basis. */
813
814 #define MAX_LITERAL_POOL_SIZE 1024
815 typedef struct literal_pool
816 {
817 expressionS literals [MAX_LITERAL_POOL_SIZE];
818 unsigned int next_free_entry;
819 unsigned int id;
820 symbolS * symbol;
821 segT section;
822 subsegT sub_section;
823 #ifdef OBJ_ELF
824 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
825 #endif
826 struct literal_pool * next;
827 unsigned int alignment;
828 } literal_pool;
829
830 /* Pointer to a linked list of literal pools. */
831 literal_pool * list_of_pools = NULL;
832
833 typedef enum asmfunc_states
834 {
835 OUTSIDE_ASMFUNC,
836 WAITING_ASMFUNC_NAME,
837 WAITING_ENDASMFUNC
838 } asmfunc_states;
839
840 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
841
842 #ifdef OBJ_ELF
843 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
844 #else
845 static struct current_it now_it;
846 #endif
847
848 static inline int
849 now_it_compatible (int cond)
850 {
851 return (cond & ~1) == (now_it.cc & ~1);
852 }
853
854 static inline int
855 conditional_insn (void)
856 {
857 return inst.cond != COND_ALWAYS;
858 }
859
860 static int in_it_block (void);
861
862 static int handle_it_state (void);
863
864 static void force_automatic_it_block_close (void);
865
866 static void it_fsm_post_encode (void);
867
868 #define set_it_insn_type(type) \
869 do \
870 { \
871 inst.it_insn_type = type; \
872 if (handle_it_state () == FAIL) \
873 return; \
874 } \
875 while (0)
876
877 #define set_it_insn_type_nonvoid(type, failret) \
878 do \
879 { \
880 inst.it_insn_type = type; \
881 if (handle_it_state () == FAIL) \
882 return failret; \
883 } \
884 while(0)
885
886 #define set_it_insn_type_last() \
887 do \
888 { \
889 if (inst.cond == COND_ALWAYS) \
890 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
891 else \
892 set_it_insn_type (INSIDE_IT_LAST_INSN); \
893 } \
894 while (0)
895
896 /* Pure syntax. */
897
898 /* This array holds the chars that always start a comment. If the
899 pre-processor is disabled, these aren't very useful. */
900 char arm_comment_chars[] = "@";
901
902 /* This array holds the chars that only start a comment at the beginning of
903 a line. If the line seems to have the form '# 123 filename'
904 .line and .file directives will appear in the pre-processed output. */
905 /* Note that input_file.c hand checks for '#' at the beginning of the
906 first line of the input file. This is because the compiler outputs
907 #NO_APP at the beginning of its output. */
908 /* Also note that comments like this one will always work. */
909 const char line_comment_chars[] = "#";
910
911 char arm_line_separator_chars[] = ";";
912
913 /* Chars that can be used to separate mant
914 from exp in floating point numbers. */
915 const char EXP_CHARS[] = "eE";
916
917 /* Chars that mean this number is a floating point constant. */
918 /* As in 0f12.456 */
919 /* or 0d1.2345e12 */
920
921 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
922
923 /* Prefix characters that indicate the start of an immediate
924 value. */
925 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
926
927 /* Separator character handling. */
928
929 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
930
931 static inline int
932 skip_past_char (char ** str, char c)
933 {
934 /* PR gas/14987: Allow for whitespace before the expected character. */
935 skip_whitespace (*str);
936
937 if (**str == c)
938 {
939 (*str)++;
940 return SUCCESS;
941 }
942 else
943 return FAIL;
944 }
945
946 #define skip_past_comma(str) skip_past_char (str, ',')
947
948 /* Arithmetic expressions (possibly involving symbols). */
949
950 /* Return TRUE if anything in the expression is a bignum. */
951
952 static int
953 walk_no_bignums (symbolS * sp)
954 {
955 if (symbol_get_value_expression (sp)->X_op == O_big)
956 return 1;
957
958 if (symbol_get_value_expression (sp)->X_add_symbol)
959 {
960 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
961 || (symbol_get_value_expression (sp)->X_op_symbol
962 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
963 }
964
965 return 0;
966 }
967
968 static int in_my_get_expression = 0;
969
970 /* Third argument to my_get_expression. */
971 #define GE_NO_PREFIX 0
972 #define GE_IMM_PREFIX 1
973 #define GE_OPT_PREFIX 2
974 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
975 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
976 #define GE_OPT_PREFIX_BIG 3
977
978 static int
979 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
980 {
981 char * save_in;
982 segT seg;
983
984 /* In unified syntax, all prefixes are optional. */
985 if (unified_syntax)
986 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
987 : GE_OPT_PREFIX;
988
989 switch (prefix_mode)
990 {
991 case GE_NO_PREFIX: break;
992 case GE_IMM_PREFIX:
993 if (!is_immediate_prefix (**str))
994 {
995 inst.error = _("immediate expression requires a # prefix");
996 return FAIL;
997 }
998 (*str)++;
999 break;
1000 case GE_OPT_PREFIX:
1001 case GE_OPT_PREFIX_BIG:
1002 if (is_immediate_prefix (**str))
1003 (*str)++;
1004 break;
1005 default: abort ();
1006 }
1007
1008 memset (ep, 0, sizeof (expressionS));
1009
1010 save_in = input_line_pointer;
1011 input_line_pointer = *str;
1012 in_my_get_expression = 1;
1013 seg = expression (ep);
1014 in_my_get_expression = 0;
1015
1016 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1017 {
1018 /* We found a bad or missing expression in md_operand(). */
1019 *str = input_line_pointer;
1020 input_line_pointer = save_in;
1021 if (inst.error == NULL)
1022 inst.error = (ep->X_op == O_absent
1023 ? _("missing expression") :_("bad expression"));
1024 return 1;
1025 }
1026
1027 #ifdef OBJ_AOUT
1028 if (seg != absolute_section
1029 && seg != text_section
1030 && seg != data_section
1031 && seg != bss_section
1032 && seg != undefined_section)
1033 {
1034 inst.error = _("bad segment");
1035 *str = input_line_pointer;
1036 input_line_pointer = save_in;
1037 return 1;
1038 }
1039 #else
1040 (void) seg;
1041 #endif
1042
1043 /* Get rid of any bignums now, so that we don't generate an error for which
1044 we can't establish a line number later on. Big numbers are never valid
1045 in instructions, which is where this routine is always called. */
1046 if (prefix_mode != GE_OPT_PREFIX_BIG
1047 && (ep->X_op == O_big
1048 || (ep->X_add_symbol
1049 && (walk_no_bignums (ep->X_add_symbol)
1050 || (ep->X_op_symbol
1051 && walk_no_bignums (ep->X_op_symbol))))))
1052 {
1053 inst.error = _("invalid constant");
1054 *str = input_line_pointer;
1055 input_line_pointer = save_in;
1056 return 1;
1057 }
1058
1059 *str = input_line_pointer;
1060 input_line_pointer = save_in;
1061 return 0;
1062 }
1063
1064 /* Turn a string in input_line_pointer into a floating point constant
1065 of type TYPE, and store the appropriate bytes in *LITP. The number
1066 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1067 returned, or NULL on OK.
1068
1069 Note that fp constants aren't represent in the normal way on the ARM.
1070 In big endian mode, things are as expected. However, in little endian
1071 mode fp constants are big-endian word-wise, and little-endian byte-wise
1072 within the words. For example, (double) 1.1 in big endian mode is
1073 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1074 the byte sequence 99 99 f1 3f 9a 99 99 99.
1075
1076 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1077
1078 char *
1079 md_atof (int type, char * litP, int * sizeP)
1080 {
1081 int prec;
1082 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1083 char *t;
1084 int i;
1085
1086 switch (type)
1087 {
1088 case 'f':
1089 case 'F':
1090 case 's':
1091 case 'S':
1092 prec = 2;
1093 break;
1094
1095 case 'd':
1096 case 'D':
1097 case 'r':
1098 case 'R':
1099 prec = 4;
1100 break;
1101
1102 case 'x':
1103 case 'X':
1104 prec = 5;
1105 break;
1106
1107 case 'p':
1108 case 'P':
1109 prec = 5;
1110 break;
1111
1112 default:
1113 *sizeP = 0;
1114 return _("Unrecognized or unsupported floating point constant");
1115 }
1116
1117 t = atof_ieee (input_line_pointer, type, words);
1118 if (t)
1119 input_line_pointer = t;
1120 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1121
1122 if (target_big_endian)
1123 {
1124 for (i = 0; i < prec; i++)
1125 {
1126 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1127 litP += sizeof (LITTLENUM_TYPE);
1128 }
1129 }
1130 else
1131 {
1132 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1133 for (i = prec - 1; i >= 0; i--)
1134 {
1135 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1136 litP += sizeof (LITTLENUM_TYPE);
1137 }
1138 else
1139 /* For a 4 byte float the order of elements in `words' is 1 0.
1140 For an 8 byte float the order is 1 0 3 2. */
1141 for (i = 0; i < prec; i += 2)
1142 {
1143 md_number_to_chars (litP, (valueT) words[i + 1],
1144 sizeof (LITTLENUM_TYPE));
1145 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1146 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1147 litP += 2 * sizeof (LITTLENUM_TYPE);
1148 }
1149 }
1150
1151 return NULL;
1152 }
1153
1154 /* We handle all bad expressions here, so that we can report the faulty
1155 instruction in the error message. */
1156 void
1157 md_operand (expressionS * exp)
1158 {
1159 if (in_my_get_expression)
1160 exp->X_op = O_illegal;
1161 }
1162
1163 /* Immediate values. */
1164
1165 /* Generic immediate-value read function for use in directives.
1166 Accepts anything that 'expression' can fold to a constant.
1167 *val receives the number. */
1168 #ifdef OBJ_ELF
1169 static int
1170 immediate_for_directive (int *val)
1171 {
1172 expressionS exp;
1173 exp.X_op = O_illegal;
1174
1175 if (is_immediate_prefix (*input_line_pointer))
1176 {
1177 input_line_pointer++;
1178 expression (&exp);
1179 }
1180
1181 if (exp.X_op != O_constant)
1182 {
1183 as_bad (_("expected #constant"));
1184 ignore_rest_of_line ();
1185 return FAIL;
1186 }
1187 *val = exp.X_add_number;
1188 return SUCCESS;
1189 }
1190 #endif
1191
1192 /* Register parsing. */
1193
1194 /* Generic register parser. CCP points to what should be the
1195 beginning of a register name. If it is indeed a valid register
1196 name, advance CCP over it and return the reg_entry structure;
1197 otherwise return NULL. Does not issue diagnostics. */
1198
1199 static struct reg_entry *
1200 arm_reg_parse_multi (char **ccp)
1201 {
1202 char *start = *ccp;
1203 char *p;
1204 struct reg_entry *reg;
1205
1206 skip_whitespace (start);
1207
1208 #ifdef REGISTER_PREFIX
1209 if (*start != REGISTER_PREFIX)
1210 return NULL;
1211 start++;
1212 #endif
1213 #ifdef OPTIONAL_REGISTER_PREFIX
1214 if (*start == OPTIONAL_REGISTER_PREFIX)
1215 start++;
1216 #endif
1217
1218 p = start;
1219 if (!ISALPHA (*p) || !is_name_beginner (*p))
1220 return NULL;
1221
1222 do
1223 p++;
1224 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1225
1226 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1227
1228 if (!reg)
1229 return NULL;
1230
1231 *ccp = p;
1232 return reg;
1233 }
1234
1235 static int
1236 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1237 enum arm_reg_type type)
1238 {
1239 /* Alternative syntaxes are accepted for a few register classes. */
1240 switch (type)
1241 {
1242 case REG_TYPE_MVF:
1243 case REG_TYPE_MVD:
1244 case REG_TYPE_MVFX:
1245 case REG_TYPE_MVDX:
1246 /* Generic coprocessor register names are allowed for these. */
1247 if (reg && reg->type == REG_TYPE_CN)
1248 return reg->number;
1249 break;
1250
1251 case REG_TYPE_CP:
1252 /* For backward compatibility, a bare number is valid here. */
1253 {
1254 unsigned long processor = strtoul (start, ccp, 10);
1255 if (*ccp != start && processor <= 15)
1256 return processor;
1257 }
1258
1259 case REG_TYPE_MMXWC:
1260 /* WC includes WCG. ??? I'm not sure this is true for all
1261 instructions that take WC registers. */
1262 if (reg && reg->type == REG_TYPE_MMXWCG)
1263 return reg->number;
1264 break;
1265
1266 default:
1267 break;
1268 }
1269
1270 return FAIL;
1271 }
1272
1273 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1274 return value is the register number or FAIL. */
1275
1276 static int
1277 arm_reg_parse (char **ccp, enum arm_reg_type type)
1278 {
1279 char *start = *ccp;
1280 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1281 int ret;
1282
1283 /* Do not allow a scalar (reg+index) to parse as a register. */
1284 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1285 return FAIL;
1286
1287 if (reg && reg->type == type)
1288 return reg->number;
1289
1290 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1291 return ret;
1292
1293 *ccp = start;
1294 return FAIL;
1295 }
1296
1297 /* Parse a Neon type specifier. *STR should point at the leading '.'
1298 character. Does no verification at this stage that the type fits the opcode
1299 properly. E.g.,
1300
1301 .i32.i32.s16
1302 .s32.f32
1303 .u16
1304
1305 Can all be legally parsed by this function.
1306
1307 Fills in neon_type struct pointer with parsed information, and updates STR
1308 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1309 type, FAIL if not. */
1310
1311 static int
1312 parse_neon_type (struct neon_type *type, char **str)
1313 {
1314 char *ptr = *str;
1315
1316 if (type)
1317 type->elems = 0;
1318
1319 while (type->elems < NEON_MAX_TYPE_ELS)
1320 {
1321 enum neon_el_type thistype = NT_untyped;
1322 unsigned thissize = -1u;
1323
1324 if (*ptr != '.')
1325 break;
1326
1327 ptr++;
1328
1329 /* Just a size without an explicit type. */
1330 if (ISDIGIT (*ptr))
1331 goto parsesize;
1332
1333 switch (TOLOWER (*ptr))
1334 {
1335 case 'i': thistype = NT_integer; break;
1336 case 'f': thistype = NT_float; break;
1337 case 'p': thistype = NT_poly; break;
1338 case 's': thistype = NT_signed; break;
1339 case 'u': thistype = NT_unsigned; break;
1340 case 'd':
1341 thistype = NT_float;
1342 thissize = 64;
1343 ptr++;
1344 goto done;
1345 default:
1346 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1347 return FAIL;
1348 }
1349
1350 ptr++;
1351
1352 /* .f is an abbreviation for .f32. */
1353 if (thistype == NT_float && !ISDIGIT (*ptr))
1354 thissize = 32;
1355 else
1356 {
1357 parsesize:
1358 thissize = strtoul (ptr, &ptr, 10);
1359
1360 if (thissize != 8 && thissize != 16 && thissize != 32
1361 && thissize != 64)
1362 {
1363 as_bad (_("bad size %d in type specifier"), thissize);
1364 return FAIL;
1365 }
1366 }
1367
1368 done:
1369 if (type)
1370 {
1371 type->el[type->elems].type = thistype;
1372 type->el[type->elems].size = thissize;
1373 type->elems++;
1374 }
1375 }
1376
1377 /* Empty/missing type is not a successful parse. */
1378 if (type->elems == 0)
1379 return FAIL;
1380
1381 *str = ptr;
1382
1383 return SUCCESS;
1384 }
1385
1386 /* Errors may be set multiple times during parsing or bit encoding
1387 (particularly in the Neon bits), but usually the earliest error which is set
1388 will be the most meaningful. Avoid overwriting it with later (cascading)
1389 errors by calling this function. */
1390
1391 static void
1392 first_error (const char *err)
1393 {
1394 if (!inst.error)
1395 inst.error = err;
1396 }
1397
1398 /* Parse a single type, e.g. ".s32", leading period included. */
1399 static int
1400 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1401 {
1402 char *str = *ccp;
1403 struct neon_type optype;
1404
1405 if (*str == '.')
1406 {
1407 if (parse_neon_type (&optype, &str) == SUCCESS)
1408 {
1409 if (optype.elems == 1)
1410 *vectype = optype.el[0];
1411 else
1412 {
1413 first_error (_("only one type should be specified for operand"));
1414 return FAIL;
1415 }
1416 }
1417 else
1418 {
1419 first_error (_("vector type expected"));
1420 return FAIL;
1421 }
1422 }
1423 else
1424 return FAIL;
1425
1426 *ccp = str;
1427
1428 return SUCCESS;
1429 }
1430
1431 /* Special meanings for indices (which have a range of 0-7), which will fit into
1432 a 4-bit integer. */
1433
1434 #define NEON_ALL_LANES 15
1435 #define NEON_INTERLEAVE_LANES 14
1436
1437 /* Parse either a register or a scalar, with an optional type. Return the
1438 register number, and optionally fill in the actual type of the register
1439 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1440 type/index information in *TYPEINFO. */
1441
1442 static int
1443 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1444 enum arm_reg_type *rtype,
1445 struct neon_typed_alias *typeinfo)
1446 {
1447 char *str = *ccp;
1448 struct reg_entry *reg = arm_reg_parse_multi (&str);
1449 struct neon_typed_alias atype;
1450 struct neon_type_el parsetype;
1451
1452 atype.defined = 0;
1453 atype.index = -1;
1454 atype.eltype.type = NT_invtype;
1455 atype.eltype.size = -1;
1456
1457 /* Try alternate syntax for some types of register. Note these are mutually
1458 exclusive with the Neon syntax extensions. */
1459 if (reg == NULL)
1460 {
1461 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1462 if (altreg != FAIL)
1463 *ccp = str;
1464 if (typeinfo)
1465 *typeinfo = atype;
1466 return altreg;
1467 }
1468
1469 /* Undo polymorphism when a set of register types may be accepted. */
1470 if ((type == REG_TYPE_NDQ
1471 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1472 || (type == REG_TYPE_VFSD
1473 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1474 || (type == REG_TYPE_NSDQ
1475 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1476 || reg->type == REG_TYPE_NQ))
1477 || (type == REG_TYPE_MMXWC
1478 && (reg->type == REG_TYPE_MMXWCG)))
1479 type = (enum arm_reg_type) reg->type;
1480
1481 if (type != reg->type)
1482 return FAIL;
1483
1484 if (reg->neon)
1485 atype = *reg->neon;
1486
1487 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1488 {
1489 if ((atype.defined & NTA_HASTYPE) != 0)
1490 {
1491 first_error (_("can't redefine type for operand"));
1492 return FAIL;
1493 }
1494 atype.defined |= NTA_HASTYPE;
1495 atype.eltype = parsetype;
1496 }
1497
1498 if (skip_past_char (&str, '[') == SUCCESS)
1499 {
1500 if (type != REG_TYPE_VFD)
1501 {
1502 first_error (_("only D registers may be indexed"));
1503 return FAIL;
1504 }
1505
1506 if ((atype.defined & NTA_HASINDEX) != 0)
1507 {
1508 first_error (_("can't change index for operand"));
1509 return FAIL;
1510 }
1511
1512 atype.defined |= NTA_HASINDEX;
1513
1514 if (skip_past_char (&str, ']') == SUCCESS)
1515 atype.index = NEON_ALL_LANES;
1516 else
1517 {
1518 expressionS exp;
1519
1520 my_get_expression (&exp, &str, GE_NO_PREFIX);
1521
1522 if (exp.X_op != O_constant)
1523 {
1524 first_error (_("constant expression required"));
1525 return FAIL;
1526 }
1527
1528 if (skip_past_char (&str, ']') == FAIL)
1529 return FAIL;
1530
1531 atype.index = exp.X_add_number;
1532 }
1533 }
1534
1535 if (typeinfo)
1536 *typeinfo = atype;
1537
1538 if (rtype)
1539 *rtype = type;
1540
1541 *ccp = str;
1542
1543 return reg->number;
1544 }
1545
1546 /* Like arm_reg_parse, but allow allow the following extra features:
1547 - If RTYPE is non-zero, return the (possibly restricted) type of the
1548 register (e.g. Neon double or quad reg when either has been requested).
1549 - If this is a Neon vector type with additional type information, fill
1550 in the struct pointed to by VECTYPE (if non-NULL).
1551 This function will fault on encountering a scalar. */
1552
1553 static int
1554 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1555 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1556 {
1557 struct neon_typed_alias atype;
1558 char *str = *ccp;
1559 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1560
1561 if (reg == FAIL)
1562 return FAIL;
1563
1564 /* Do not allow regname(... to parse as a register. */
1565 if (*str == '(')
1566 return FAIL;
1567
1568 /* Do not allow a scalar (reg+index) to parse as a register. */
1569 if ((atype.defined & NTA_HASINDEX) != 0)
1570 {
1571 first_error (_("register operand expected, but got scalar"));
1572 return FAIL;
1573 }
1574
1575 if (vectype)
1576 *vectype = atype.eltype;
1577
1578 *ccp = str;
1579
1580 return reg;
1581 }
1582
1583 #define NEON_SCALAR_REG(X) ((X) >> 4)
1584 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1585
1586 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1587 have enough information to be able to do a good job bounds-checking. So, we
1588 just do easy checks here, and do further checks later. */
1589
1590 static int
1591 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1592 {
1593 int reg;
1594 char *str = *ccp;
1595 struct neon_typed_alias atype;
1596
1597 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1598
1599 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1600 return FAIL;
1601
1602 if (atype.index == NEON_ALL_LANES)
1603 {
1604 first_error (_("scalar must have an index"));
1605 return FAIL;
1606 }
1607 else if (atype.index >= 64 / elsize)
1608 {
1609 first_error (_("scalar index out of range"));
1610 return FAIL;
1611 }
1612
1613 if (type)
1614 *type = atype.eltype;
1615
1616 *ccp = str;
1617
1618 return reg * 16 + atype.index;
1619 }
1620
1621 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1622
1623 static long
1624 parse_reg_list (char ** strp)
1625 {
1626 char * str = * strp;
1627 long range = 0;
1628 int another_range;
1629
1630 /* We come back here if we get ranges concatenated by '+' or '|'. */
1631 do
1632 {
1633 skip_whitespace (str);
1634
1635 another_range = 0;
1636
1637 if (*str == '{')
1638 {
1639 int in_range = 0;
1640 int cur_reg = -1;
1641
1642 str++;
1643 do
1644 {
1645 int reg;
1646
1647 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1648 {
1649 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1650 return FAIL;
1651 }
1652
1653 if (in_range)
1654 {
1655 int i;
1656
1657 if (reg <= cur_reg)
1658 {
1659 first_error (_("bad range in register list"));
1660 return FAIL;
1661 }
1662
1663 for (i = cur_reg + 1; i < reg; i++)
1664 {
1665 if (range & (1 << i))
1666 as_tsktsk
1667 (_("Warning: duplicated register (r%d) in register list"),
1668 i);
1669 else
1670 range |= 1 << i;
1671 }
1672 in_range = 0;
1673 }
1674
1675 if (range & (1 << reg))
1676 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1677 reg);
1678 else if (reg <= cur_reg)
1679 as_tsktsk (_("Warning: register range not in ascending order"));
1680
1681 range |= 1 << reg;
1682 cur_reg = reg;
1683 }
1684 while (skip_past_comma (&str) != FAIL
1685 || (in_range = 1, *str++ == '-'));
1686 str--;
1687
1688 if (skip_past_char (&str, '}') == FAIL)
1689 {
1690 first_error (_("missing `}'"));
1691 return FAIL;
1692 }
1693 }
1694 else
1695 {
1696 expressionS exp;
1697
1698 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1699 return FAIL;
1700
1701 if (exp.X_op == O_constant)
1702 {
1703 if (exp.X_add_number
1704 != (exp.X_add_number & 0x0000ffff))
1705 {
1706 inst.error = _("invalid register mask");
1707 return FAIL;
1708 }
1709
1710 if ((range & exp.X_add_number) != 0)
1711 {
1712 int regno = range & exp.X_add_number;
1713
1714 regno &= -regno;
1715 regno = (1 << regno) - 1;
1716 as_tsktsk
1717 (_("Warning: duplicated register (r%d) in register list"),
1718 regno);
1719 }
1720
1721 range |= exp.X_add_number;
1722 }
1723 else
1724 {
1725 if (inst.reloc.type != 0)
1726 {
1727 inst.error = _("expression too complex");
1728 return FAIL;
1729 }
1730
1731 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1732 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1733 inst.reloc.pc_rel = 0;
1734 }
1735 }
1736
1737 if (*str == '|' || *str == '+')
1738 {
1739 str++;
1740 another_range = 1;
1741 }
1742 }
1743 while (another_range);
1744
1745 *strp = str;
1746 return range;
1747 }
1748
1749 /* Types of registers in a list. */
1750
1751 enum reg_list_els
1752 {
1753 REGLIST_VFP_S,
1754 REGLIST_VFP_D,
1755 REGLIST_NEON_D
1756 };
1757
1758 /* Parse a VFP register list. If the string is invalid return FAIL.
1759 Otherwise return the number of registers, and set PBASE to the first
1760 register. Parses registers of type ETYPE.
1761 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1762 - Q registers can be used to specify pairs of D registers
1763 - { } can be omitted from around a singleton register list
1764 FIXME: This is not implemented, as it would require backtracking in
1765 some cases, e.g.:
1766 vtbl.8 d3,d4,d5
1767 This could be done (the meaning isn't really ambiguous), but doesn't
1768 fit in well with the current parsing framework.
1769 - 32 D registers may be used (also true for VFPv3).
1770 FIXME: Types are ignored in these register lists, which is probably a
1771 bug. */
1772
1773 static int
1774 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1775 {
1776 char *str = *ccp;
1777 int base_reg;
1778 int new_base;
1779 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1780 int max_regs = 0;
1781 int count = 0;
1782 int warned = 0;
1783 unsigned long mask = 0;
1784 int i;
1785
1786 if (skip_past_char (&str, '{') == FAIL)
1787 {
1788 inst.error = _("expecting {");
1789 return FAIL;
1790 }
1791
1792 switch (etype)
1793 {
1794 case REGLIST_VFP_S:
1795 regtype = REG_TYPE_VFS;
1796 max_regs = 32;
1797 break;
1798
1799 case REGLIST_VFP_D:
1800 regtype = REG_TYPE_VFD;
1801 break;
1802
1803 case REGLIST_NEON_D:
1804 regtype = REG_TYPE_NDQ;
1805 break;
1806 }
1807
1808 if (etype != REGLIST_VFP_S)
1809 {
1810 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1811 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1812 {
1813 max_regs = 32;
1814 if (thumb_mode)
1815 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1816 fpu_vfp_ext_d32);
1817 else
1818 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1819 fpu_vfp_ext_d32);
1820 }
1821 else
1822 max_regs = 16;
1823 }
1824
1825 base_reg = max_regs;
1826
1827 do
1828 {
1829 int setmask = 1, addregs = 1;
1830
1831 new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
1832
1833 if (new_base == FAIL)
1834 {
1835 first_error (_(reg_expected_msgs[regtype]));
1836 return FAIL;
1837 }
1838
1839 if (new_base >= max_regs)
1840 {
1841 first_error (_("register out of range in list"));
1842 return FAIL;
1843 }
1844
1845 /* Note: a value of 2 * n is returned for the register Q<n>. */
1846 if (regtype == REG_TYPE_NQ)
1847 {
1848 setmask = 3;
1849 addregs = 2;
1850 }
1851
1852 if (new_base < base_reg)
1853 base_reg = new_base;
1854
1855 if (mask & (setmask << new_base))
1856 {
1857 first_error (_("invalid register list"));
1858 return FAIL;
1859 }
1860
1861 if ((mask >> new_base) != 0 && ! warned)
1862 {
1863 as_tsktsk (_("register list not in ascending order"));
1864 warned = 1;
1865 }
1866
1867 mask |= setmask << new_base;
1868 count += addregs;
1869
1870 if (*str == '-') /* We have the start of a range expression */
1871 {
1872 int high_range;
1873
1874 str++;
1875
1876 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1877 == FAIL)
1878 {
1879 inst.error = gettext (reg_expected_msgs[regtype]);
1880 return FAIL;
1881 }
1882
1883 if (high_range >= max_regs)
1884 {
1885 first_error (_("register out of range in list"));
1886 return FAIL;
1887 }
1888
1889 if (regtype == REG_TYPE_NQ)
1890 high_range = high_range + 1;
1891
1892 if (high_range <= new_base)
1893 {
1894 inst.error = _("register range not in ascending order");
1895 return FAIL;
1896 }
1897
1898 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1899 {
1900 if (mask & (setmask << new_base))
1901 {
1902 inst.error = _("invalid register list");
1903 return FAIL;
1904 }
1905
1906 mask |= setmask << new_base;
1907 count += addregs;
1908 }
1909 }
1910 }
1911 while (skip_past_comma (&str) != FAIL);
1912
1913 str++;
1914
1915 /* Sanity check -- should have raised a parse error above. */
1916 if (count == 0 || count > max_regs)
1917 abort ();
1918
1919 *pbase = base_reg;
1920
1921 /* Final test -- the registers must be consecutive. */
1922 mask >>= base_reg;
1923 for (i = 0; i < count; i++)
1924 {
1925 if ((mask & (1u << i)) == 0)
1926 {
1927 inst.error = _("non-contiguous register range");
1928 return FAIL;
1929 }
1930 }
1931
1932 *ccp = str;
1933
1934 return count;
1935 }
1936
1937 /* True if two alias types are the same. */
1938
1939 static bfd_boolean
1940 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1941 {
1942 if (!a && !b)
1943 return TRUE;
1944
1945 if (!a || !b)
1946 return FALSE;
1947
1948 if (a->defined != b->defined)
1949 return FALSE;
1950
1951 if ((a->defined & NTA_HASTYPE) != 0
1952 && (a->eltype.type != b->eltype.type
1953 || a->eltype.size != b->eltype.size))
1954 return FALSE;
1955
1956 if ((a->defined & NTA_HASINDEX) != 0
1957 && (a->index != b->index))
1958 return FALSE;
1959
1960 return TRUE;
1961 }
1962
1963 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1964 The base register is put in *PBASE.
1965 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1966 the return value.
1967 The register stride (minus one) is put in bit 4 of the return value.
1968 Bits [6:5] encode the list length (minus one).
1969 The type of the list elements is put in *ELTYPE, if non-NULL. */
1970
1971 #define NEON_LANE(X) ((X) & 0xf)
1972 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1973 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1974
1975 static int
1976 parse_neon_el_struct_list (char **str, unsigned *pbase,
1977 struct neon_type_el *eltype)
1978 {
1979 char *ptr = *str;
1980 int base_reg = -1;
1981 int reg_incr = -1;
1982 int count = 0;
1983 int lane = -1;
1984 int leading_brace = 0;
1985 enum arm_reg_type rtype = REG_TYPE_NDQ;
1986 const char *const incr_error = _("register stride must be 1 or 2");
1987 const char *const type_error = _("mismatched element/structure types in list");
1988 struct neon_typed_alias firsttype;
1989
1990 if (skip_past_char (&ptr, '{') == SUCCESS)
1991 leading_brace = 1;
1992
1993 do
1994 {
1995 struct neon_typed_alias atype;
1996 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1997
1998 if (getreg == FAIL)
1999 {
2000 first_error (_(reg_expected_msgs[rtype]));
2001 return FAIL;
2002 }
2003
2004 if (base_reg == -1)
2005 {
2006 base_reg = getreg;
2007 if (rtype == REG_TYPE_NQ)
2008 {
2009 reg_incr = 1;
2010 }
2011 firsttype = atype;
2012 }
2013 else if (reg_incr == -1)
2014 {
2015 reg_incr = getreg - base_reg;
2016 if (reg_incr < 1 || reg_incr > 2)
2017 {
2018 first_error (_(incr_error));
2019 return FAIL;
2020 }
2021 }
2022 else if (getreg != base_reg + reg_incr * count)
2023 {
2024 first_error (_(incr_error));
2025 return FAIL;
2026 }
2027
2028 if (! neon_alias_types_same (&atype, &firsttype))
2029 {
2030 first_error (_(type_error));
2031 return FAIL;
2032 }
2033
2034 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2035 modes. */
2036 if (ptr[0] == '-')
2037 {
2038 struct neon_typed_alias htype;
2039 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2040 if (lane == -1)
2041 lane = NEON_INTERLEAVE_LANES;
2042 else if (lane != NEON_INTERLEAVE_LANES)
2043 {
2044 first_error (_(type_error));
2045 return FAIL;
2046 }
2047 if (reg_incr == -1)
2048 reg_incr = 1;
2049 else if (reg_incr != 1)
2050 {
2051 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2052 return FAIL;
2053 }
2054 ptr++;
2055 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2056 if (hireg == FAIL)
2057 {
2058 first_error (_(reg_expected_msgs[rtype]));
2059 return FAIL;
2060 }
2061 if (! neon_alias_types_same (&htype, &firsttype))
2062 {
2063 first_error (_(type_error));
2064 return FAIL;
2065 }
2066 count += hireg + dregs - getreg;
2067 continue;
2068 }
2069
2070 /* If we're using Q registers, we can't use [] or [n] syntax. */
2071 if (rtype == REG_TYPE_NQ)
2072 {
2073 count += 2;
2074 continue;
2075 }
2076
2077 if ((atype.defined & NTA_HASINDEX) != 0)
2078 {
2079 if (lane == -1)
2080 lane = atype.index;
2081 else if (lane != atype.index)
2082 {
2083 first_error (_(type_error));
2084 return FAIL;
2085 }
2086 }
2087 else if (lane == -1)
2088 lane = NEON_INTERLEAVE_LANES;
2089 else if (lane != NEON_INTERLEAVE_LANES)
2090 {
2091 first_error (_(type_error));
2092 return FAIL;
2093 }
2094 count++;
2095 }
2096 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2097
2098 /* No lane set by [x]. We must be interleaving structures. */
2099 if (lane == -1)
2100 lane = NEON_INTERLEAVE_LANES;
2101
2102 /* Sanity check. */
2103 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2104 || (count > 1 && reg_incr == -1))
2105 {
2106 first_error (_("error parsing element/structure list"));
2107 return FAIL;
2108 }
2109
2110 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2111 {
2112 first_error (_("expected }"));
2113 return FAIL;
2114 }
2115
2116 if (reg_incr == -1)
2117 reg_incr = 1;
2118
2119 if (eltype)
2120 *eltype = firsttype.eltype;
2121
2122 *pbase = base_reg;
2123 *str = ptr;
2124
2125 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2126 }
2127
2128 /* Parse an explicit relocation suffix on an expression. This is
2129 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2130 arm_reloc_hsh contains no entries, so this function can only
2131 succeed if there is no () after the word. Returns -1 on error,
2132 BFD_RELOC_UNUSED if there wasn't any suffix. */
2133
2134 static int
2135 parse_reloc (char **str)
2136 {
2137 struct reloc_entry *r;
2138 char *p, *q;
2139
2140 if (**str != '(')
2141 return BFD_RELOC_UNUSED;
2142
2143 p = *str + 1;
2144 q = p;
2145
2146 while (*q && *q != ')' && *q != ',')
2147 q++;
2148 if (*q != ')')
2149 return -1;
2150
2151 if ((r = (struct reloc_entry *)
2152 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2153 return -1;
2154
2155 *str = q + 1;
2156 return r->reloc;
2157 }
2158
2159 /* Directives: register aliases. */
2160
2161 static struct reg_entry *
2162 insert_reg_alias (char *str, unsigned number, int type)
2163 {
2164 struct reg_entry *new_reg;
2165 const char *name;
2166
2167 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2168 {
2169 if (new_reg->builtin)
2170 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2171
2172 /* Only warn about a redefinition if it's not defined as the
2173 same register. */
2174 else if (new_reg->number != number || new_reg->type != type)
2175 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2176
2177 return NULL;
2178 }
2179
2180 name = xstrdup (str);
2181 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2182
2183 new_reg->name = name;
2184 new_reg->number = number;
2185 new_reg->type = type;
2186 new_reg->builtin = FALSE;
2187 new_reg->neon = NULL;
2188
2189 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2190 abort ();
2191
2192 return new_reg;
2193 }
2194
2195 static void
2196 insert_neon_reg_alias (char *str, int number, int type,
2197 struct neon_typed_alias *atype)
2198 {
2199 struct reg_entry *reg = insert_reg_alias (str, number, type);
2200
2201 if (!reg)
2202 {
2203 first_error (_("attempt to redefine typed alias"));
2204 return;
2205 }
2206
2207 if (atype)
2208 {
2209 reg->neon = (struct neon_typed_alias *)
2210 xmalloc (sizeof (struct neon_typed_alias));
2211 *reg->neon = *atype;
2212 }
2213 }
2214
2215 /* Look for the .req directive. This is of the form:
2216
2217 new_register_name .req existing_register_name
2218
2219 If we find one, or if it looks sufficiently like one that we want to
2220 handle any error here, return TRUE. Otherwise return FALSE. */
2221
2222 static bfd_boolean
2223 create_register_alias (char * newname, char *p)
2224 {
2225 struct reg_entry *old;
2226 char *oldname, *nbuf;
2227 size_t nlen;
2228
2229 /* The input scrubber ensures that whitespace after the mnemonic is
2230 collapsed to single spaces. */
2231 oldname = p;
2232 if (strncmp (oldname, " .req ", 6) != 0)
2233 return FALSE;
2234
2235 oldname += 6;
2236 if (*oldname == '\0')
2237 return FALSE;
2238
2239 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2240 if (!old)
2241 {
2242 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2243 return TRUE;
2244 }
2245
2246 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2247 the desired alias name, and p points to its end. If not, then
2248 the desired alias name is in the global original_case_string. */
2249 #ifdef TC_CASE_SENSITIVE
2250 nlen = p - newname;
2251 #else
2252 newname = original_case_string;
2253 nlen = strlen (newname);
2254 #endif
2255
2256 nbuf = (char *) alloca (nlen + 1);
2257 memcpy (nbuf, newname, nlen);
2258 nbuf[nlen] = '\0';
2259
2260 /* Create aliases under the new name as stated; an all-lowercase
2261 version of the new name; and an all-uppercase version of the new
2262 name. */
2263 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2264 {
2265 for (p = nbuf; *p; p++)
2266 *p = TOUPPER (*p);
2267
2268 if (strncmp (nbuf, newname, nlen))
2269 {
2270 /* If this attempt to create an additional alias fails, do not bother
2271 trying to create the all-lower case alias. We will fail and issue
2272 a second, duplicate error message. This situation arises when the
2273 programmer does something like:
2274 foo .req r0
2275 Foo .req r1
2276 The second .req creates the "Foo" alias but then fails to create
2277 the artificial FOO alias because it has already been created by the
2278 first .req. */
2279 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2280 return TRUE;
2281 }
2282
2283 for (p = nbuf; *p; p++)
2284 *p = TOLOWER (*p);
2285
2286 if (strncmp (nbuf, newname, nlen))
2287 insert_reg_alias (nbuf, old->number, old->type);
2288 }
2289
2290 return TRUE;
2291 }
2292
2293 /* Create a Neon typed/indexed register alias using directives, e.g.:
2294 X .dn d5.s32[1]
2295 Y .qn 6.s16
2296 Z .dn d7
2297 T .dn Z[0]
2298 These typed registers can be used instead of the types specified after the
2299 Neon mnemonic, so long as all operands given have types. Types can also be
2300 specified directly, e.g.:
2301 vadd d0.s32, d1.s32, d2.s32 */
2302
2303 static bfd_boolean
2304 create_neon_reg_alias (char *newname, char *p)
2305 {
2306 enum arm_reg_type basetype;
2307 struct reg_entry *basereg;
2308 struct reg_entry mybasereg;
2309 struct neon_type ntype;
2310 struct neon_typed_alias typeinfo;
2311 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2312 int namelen;
2313
2314 typeinfo.defined = 0;
2315 typeinfo.eltype.type = NT_invtype;
2316 typeinfo.eltype.size = -1;
2317 typeinfo.index = -1;
2318
2319 nameend = p;
2320
2321 if (strncmp (p, " .dn ", 5) == 0)
2322 basetype = REG_TYPE_VFD;
2323 else if (strncmp (p, " .qn ", 5) == 0)
2324 basetype = REG_TYPE_NQ;
2325 else
2326 return FALSE;
2327
2328 p += 5;
2329
2330 if (*p == '\0')
2331 return FALSE;
2332
2333 basereg = arm_reg_parse_multi (&p);
2334
2335 if (basereg && basereg->type != basetype)
2336 {
2337 as_bad (_("bad type for register"));
2338 return FALSE;
2339 }
2340
2341 if (basereg == NULL)
2342 {
2343 expressionS exp;
2344 /* Try parsing as an integer. */
2345 my_get_expression (&exp, &p, GE_NO_PREFIX);
2346 if (exp.X_op != O_constant)
2347 {
2348 as_bad (_("expression must be constant"));
2349 return FALSE;
2350 }
2351 basereg = &mybasereg;
2352 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2353 : exp.X_add_number;
2354 basereg->neon = 0;
2355 }
2356
2357 if (basereg->neon)
2358 typeinfo = *basereg->neon;
2359
2360 if (parse_neon_type (&ntype, &p) == SUCCESS)
2361 {
2362 /* We got a type. */
2363 if (typeinfo.defined & NTA_HASTYPE)
2364 {
2365 as_bad (_("can't redefine the type of a register alias"));
2366 return FALSE;
2367 }
2368
2369 typeinfo.defined |= NTA_HASTYPE;
2370 if (ntype.elems != 1)
2371 {
2372 as_bad (_("you must specify a single type only"));
2373 return FALSE;
2374 }
2375 typeinfo.eltype = ntype.el[0];
2376 }
2377
2378 if (skip_past_char (&p, '[') == SUCCESS)
2379 {
2380 expressionS exp;
2381 /* We got a scalar index. */
2382
2383 if (typeinfo.defined & NTA_HASINDEX)
2384 {
2385 as_bad (_("can't redefine the index of a scalar alias"));
2386 return FALSE;
2387 }
2388
2389 my_get_expression (&exp, &p, GE_NO_PREFIX);
2390
2391 if (exp.X_op != O_constant)
2392 {
2393 as_bad (_("scalar index must be constant"));
2394 return FALSE;
2395 }
2396
2397 typeinfo.defined |= NTA_HASINDEX;
2398 typeinfo.index = exp.X_add_number;
2399
2400 if (skip_past_char (&p, ']') == FAIL)
2401 {
2402 as_bad (_("expecting ]"));
2403 return FALSE;
2404 }
2405 }
2406
2407 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2408 the desired alias name, and p points to its end. If not, then
2409 the desired alias name is in the global original_case_string. */
2410 #ifdef TC_CASE_SENSITIVE
2411 namelen = nameend - newname;
2412 #else
2413 newname = original_case_string;
2414 namelen = strlen (newname);
2415 #endif
2416
2417 namebuf = (char *) alloca (namelen + 1);
2418 strncpy (namebuf, newname, namelen);
2419 namebuf[namelen] = '\0';
2420
2421 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2422 typeinfo.defined != 0 ? &typeinfo : NULL);
2423
2424 /* Insert name in all uppercase. */
2425 for (p = namebuf; *p; p++)
2426 *p = TOUPPER (*p);
2427
2428 if (strncmp (namebuf, newname, namelen))
2429 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2430 typeinfo.defined != 0 ? &typeinfo : NULL);
2431
2432 /* Insert name in all lowercase. */
2433 for (p = namebuf; *p; p++)
2434 *p = TOLOWER (*p);
2435
2436 if (strncmp (namebuf, newname, namelen))
2437 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2438 typeinfo.defined != 0 ? &typeinfo : NULL);
2439
2440 return TRUE;
2441 }
2442
2443 /* Should never be called, as .req goes between the alias and the
2444 register name, not at the beginning of the line. */
2445
2446 static void
2447 s_req (int a ATTRIBUTE_UNUSED)
2448 {
2449 as_bad (_("invalid syntax for .req directive"));
2450 }
2451
2452 static void
2453 s_dn (int a ATTRIBUTE_UNUSED)
2454 {
2455 as_bad (_("invalid syntax for .dn directive"));
2456 }
2457
2458 static void
2459 s_qn (int a ATTRIBUTE_UNUSED)
2460 {
2461 as_bad (_("invalid syntax for .qn directive"));
2462 }
2463
2464 /* The .unreq directive deletes an alias which was previously defined
2465 by .req. For example:
2466
2467 my_alias .req r11
2468 .unreq my_alias */
2469
2470 static void
2471 s_unreq (int a ATTRIBUTE_UNUSED)
2472 {
2473 char * name;
2474 char saved_char;
2475
2476 name = input_line_pointer;
2477
2478 while (*input_line_pointer != 0
2479 && *input_line_pointer != ' '
2480 && *input_line_pointer != '\n')
2481 ++input_line_pointer;
2482
2483 saved_char = *input_line_pointer;
2484 *input_line_pointer = 0;
2485
2486 if (!*name)
2487 as_bad (_("invalid syntax for .unreq directive"));
2488 else
2489 {
2490 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2491 name);
2492
2493 if (!reg)
2494 as_bad (_("unknown register alias '%s'"), name);
2495 else if (reg->builtin)
2496 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2497 name);
2498 else
2499 {
2500 char * p;
2501 char * nbuf;
2502
2503 hash_delete (arm_reg_hsh, name, FALSE);
2504 free ((char *) reg->name);
2505 if (reg->neon)
2506 free (reg->neon);
2507 free (reg);
2508
2509 /* Also locate the all upper case and all lower case versions.
2510 Do not complain if we cannot find one or the other as it
2511 was probably deleted above. */
2512
2513 nbuf = strdup (name);
2514 for (p = nbuf; *p; p++)
2515 *p = TOUPPER (*p);
2516 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2517 if (reg)
2518 {
2519 hash_delete (arm_reg_hsh, nbuf, FALSE);
2520 free ((char *) reg->name);
2521 if (reg->neon)
2522 free (reg->neon);
2523 free (reg);
2524 }
2525
2526 for (p = nbuf; *p; p++)
2527 *p = TOLOWER (*p);
2528 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2529 if (reg)
2530 {
2531 hash_delete (arm_reg_hsh, nbuf, FALSE);
2532 free ((char *) reg->name);
2533 if (reg->neon)
2534 free (reg->neon);
2535 free (reg);
2536 }
2537
2538 free (nbuf);
2539 }
2540 }
2541
2542 *input_line_pointer = saved_char;
2543 demand_empty_rest_of_line ();
2544 }
2545
2546 /* Directives: Instruction set selection. */
2547
2548 #ifdef OBJ_ELF
2549 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2550 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2551 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2552 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2553
2554 /* Create a new mapping symbol for the transition to STATE. */
2555
2556 static void
2557 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2558 {
2559 symbolS * symbolP;
2560 const char * symname;
2561 int type;
2562
2563 switch (state)
2564 {
2565 case MAP_DATA:
2566 symname = "$d";
2567 type = BSF_NO_FLAGS;
2568 break;
2569 case MAP_ARM:
2570 symname = "$a";
2571 type = BSF_NO_FLAGS;
2572 break;
2573 case MAP_THUMB:
2574 symname = "$t";
2575 type = BSF_NO_FLAGS;
2576 break;
2577 default:
2578 abort ();
2579 }
2580
2581 symbolP = symbol_new (symname, now_seg, value, frag);
2582 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2583
2584 switch (state)
2585 {
2586 case MAP_ARM:
2587 THUMB_SET_FUNC (symbolP, 0);
2588 ARM_SET_THUMB (symbolP, 0);
2589 ARM_SET_INTERWORK (symbolP, support_interwork);
2590 break;
2591
2592 case MAP_THUMB:
2593 THUMB_SET_FUNC (symbolP, 1);
2594 ARM_SET_THUMB (symbolP, 1);
2595 ARM_SET_INTERWORK (symbolP, support_interwork);
2596 break;
2597
2598 case MAP_DATA:
2599 default:
2600 break;
2601 }
2602
2603 /* Save the mapping symbols for future reference. Also check that
2604 we do not place two mapping symbols at the same offset within a
2605 frag. We'll handle overlap between frags in
2606 check_mapping_symbols.
2607
2608 If .fill or other data filling directive generates zero sized data,
2609 the mapping symbol for the following code will have the same value
2610 as the one generated for the data filling directive. In this case,
2611 we replace the old symbol with the new one at the same address. */
2612 if (value == 0)
2613 {
2614 if (frag->tc_frag_data.first_map != NULL)
2615 {
2616 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2617 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2618 }
2619 frag->tc_frag_data.first_map = symbolP;
2620 }
2621 if (frag->tc_frag_data.last_map != NULL)
2622 {
2623 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2624 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2625 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2626 }
2627 frag->tc_frag_data.last_map = symbolP;
2628 }
2629
2630 /* We must sometimes convert a region marked as code to data during
2631 code alignment, if an odd number of bytes have to be padded. The
2632 code mapping symbol is pushed to an aligned address. */
2633
2634 static void
2635 insert_data_mapping_symbol (enum mstate state,
2636 valueT value, fragS *frag, offsetT bytes)
2637 {
2638 /* If there was already a mapping symbol, remove it. */
2639 if (frag->tc_frag_data.last_map != NULL
2640 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2641 {
2642 symbolS *symp = frag->tc_frag_data.last_map;
2643
2644 if (value == 0)
2645 {
2646 know (frag->tc_frag_data.first_map == symp);
2647 frag->tc_frag_data.first_map = NULL;
2648 }
2649 frag->tc_frag_data.last_map = NULL;
2650 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2651 }
2652
2653 make_mapping_symbol (MAP_DATA, value, frag);
2654 make_mapping_symbol (state, value + bytes, frag);
2655 }
2656
2657 static void mapping_state_2 (enum mstate state, int max_chars);
2658
2659 /* Set the mapping state to STATE. Only call this when about to
2660 emit some STATE bytes to the file. */
2661
2662 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2663 void
2664 mapping_state (enum mstate state)
2665 {
2666 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2667
2668 if (mapstate == state)
2669 /* The mapping symbol has already been emitted.
2670 There is nothing else to do. */
2671 return;
2672
2673 if (state == MAP_ARM || state == MAP_THUMB)
2674 /* PR gas/12931
2675 All ARM instructions require 4-byte alignment.
2676 (Almost) all Thumb instructions require 2-byte alignment.
2677
2678 When emitting instructions into any section, mark the section
2679 appropriately.
2680
2681 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2682 but themselves require 2-byte alignment; this applies to some
2683 PC- relative forms. However, these cases will invovle implicit
2684 literal pool generation or an explicit .align >=2, both of
2685 which will cause the section to me marked with sufficient
2686 alignment. Thus, we don't handle those cases here. */
2687 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2688
2689 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2690 /* This case will be evaluated later. */
2691 return;
2692
2693 mapping_state_2 (state, 0);
2694 }
2695
2696 /* Same as mapping_state, but MAX_CHARS bytes have already been
2697 allocated. Put the mapping symbol that far back. */
2698
2699 static void
2700 mapping_state_2 (enum mstate state, int max_chars)
2701 {
2702 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2703
2704 if (!SEG_NORMAL (now_seg))
2705 return;
2706
2707 if (mapstate == state)
2708 /* The mapping symbol has already been emitted.
2709 There is nothing else to do. */
2710 return;
2711
2712 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2713 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2714 {
2715 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2716 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2717
2718 if (add_symbol)
2719 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2720 }
2721
2722 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2723 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2724 }
2725 #undef TRANSITION
2726 #else
2727 #define mapping_state(x) ((void)0)
2728 #define mapping_state_2(x, y) ((void)0)
2729 #endif
2730
2731 /* Find the real, Thumb encoded start of a Thumb function. */
2732
2733 #ifdef OBJ_COFF
2734 static symbolS *
2735 find_real_start (symbolS * symbolP)
2736 {
2737 char * real_start;
2738 const char * name = S_GET_NAME (symbolP);
2739 symbolS * new_target;
2740
2741 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2742 #define STUB_NAME ".real_start_of"
2743
2744 if (name == NULL)
2745 abort ();
2746
2747 /* The compiler may generate BL instructions to local labels because
2748 it needs to perform a branch to a far away location. These labels
2749 do not have a corresponding ".real_start_of" label. We check
2750 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2751 the ".real_start_of" convention for nonlocal branches. */
2752 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2753 return symbolP;
2754
2755 real_start = ACONCAT ((STUB_NAME, name, NULL));
2756 new_target = symbol_find (real_start);
2757
2758 if (new_target == NULL)
2759 {
2760 as_warn (_("Failed to find real start of function: %s\n"), name);
2761 new_target = symbolP;
2762 }
2763
2764 return new_target;
2765 }
2766 #endif
2767
2768 static void
2769 opcode_select (int width)
2770 {
2771 switch (width)
2772 {
2773 case 16:
2774 if (! thumb_mode)
2775 {
2776 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2777 as_bad (_("selected processor does not support THUMB opcodes"));
2778
2779 thumb_mode = 1;
2780 /* No need to force the alignment, since we will have been
2781 coming from ARM mode, which is word-aligned. */
2782 record_alignment (now_seg, 1);
2783 }
2784 break;
2785
2786 case 32:
2787 if (thumb_mode)
2788 {
2789 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2790 as_bad (_("selected processor does not support ARM opcodes"));
2791
2792 thumb_mode = 0;
2793
2794 if (!need_pass_2)
2795 frag_align (2, 0, 0);
2796
2797 record_alignment (now_seg, 1);
2798 }
2799 break;
2800
2801 default:
2802 as_bad (_("invalid instruction size selected (%d)"), width);
2803 }
2804 }
2805
2806 static void
2807 s_arm (int ignore ATTRIBUTE_UNUSED)
2808 {
2809 opcode_select (32);
2810 demand_empty_rest_of_line ();
2811 }
2812
2813 static void
2814 s_thumb (int ignore ATTRIBUTE_UNUSED)
2815 {
2816 opcode_select (16);
2817 demand_empty_rest_of_line ();
2818 }
2819
2820 static void
2821 s_code (int unused ATTRIBUTE_UNUSED)
2822 {
2823 int temp;
2824
2825 temp = get_absolute_expression ();
2826 switch (temp)
2827 {
2828 case 16:
2829 case 32:
2830 opcode_select (temp);
2831 break;
2832
2833 default:
2834 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2835 }
2836 }
2837
2838 static void
2839 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2840 {
2841 /* If we are not already in thumb mode go into it, EVEN if
2842 the target processor does not support thumb instructions.
2843 This is used by gcc/config/arm/lib1funcs.asm for example
2844 to compile interworking support functions even if the
2845 target processor should not support interworking. */
2846 if (! thumb_mode)
2847 {
2848 thumb_mode = 2;
2849 record_alignment (now_seg, 1);
2850 }
2851
2852 demand_empty_rest_of_line ();
2853 }
2854
2855 static void
2856 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2857 {
2858 s_thumb (0);
2859
2860 /* The following label is the name/address of the start of a Thumb function.
2861 We need to know this for the interworking support. */
2862 label_is_thumb_function_name = TRUE;
2863 }
2864
2865 /* Perform a .set directive, but also mark the alias as
2866 being a thumb function. */
2867
2868 static void
2869 s_thumb_set (int equiv)
2870 {
2871 /* XXX the following is a duplicate of the code for s_set() in read.c
2872 We cannot just call that code as we need to get at the symbol that
2873 is created. */
2874 char * name;
2875 char delim;
2876 char * end_name;
2877 symbolS * symbolP;
2878
2879 /* Especial apologies for the random logic:
2880 This just grew, and could be parsed much more simply!
2881 Dean - in haste. */
2882 delim = get_symbol_name (& name);
2883 end_name = input_line_pointer;
2884 (void) restore_line_pointer (delim);
2885
2886 if (*input_line_pointer != ',')
2887 {
2888 *end_name = 0;
2889 as_bad (_("expected comma after name \"%s\""), name);
2890 *end_name = delim;
2891 ignore_rest_of_line ();
2892 return;
2893 }
2894
2895 input_line_pointer++;
2896 *end_name = 0;
2897
2898 if (name[0] == '.' && name[1] == '\0')
2899 {
2900 /* XXX - this should not happen to .thumb_set. */
2901 abort ();
2902 }
2903
2904 if ((symbolP = symbol_find (name)) == NULL
2905 && (symbolP = md_undefined_symbol (name)) == NULL)
2906 {
2907 #ifndef NO_LISTING
2908 /* When doing symbol listings, play games with dummy fragments living
2909 outside the normal fragment chain to record the file and line info
2910 for this symbol. */
2911 if (listing & LISTING_SYMBOLS)
2912 {
2913 extern struct list_info_struct * listing_tail;
2914 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2915
2916 memset (dummy_frag, 0, sizeof (fragS));
2917 dummy_frag->fr_type = rs_fill;
2918 dummy_frag->line = listing_tail;
2919 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2920 dummy_frag->fr_symbol = symbolP;
2921 }
2922 else
2923 #endif
2924 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2925
2926 #ifdef OBJ_COFF
2927 /* "set" symbols are local unless otherwise specified. */
2928 SF_SET_LOCAL (symbolP);
2929 #endif /* OBJ_COFF */
2930 } /* Make a new symbol. */
2931
2932 symbol_table_insert (symbolP);
2933
2934 * end_name = delim;
2935
2936 if (equiv
2937 && S_IS_DEFINED (symbolP)
2938 && S_GET_SEGMENT (symbolP) != reg_section)
2939 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2940
2941 pseudo_set (symbolP);
2942
2943 demand_empty_rest_of_line ();
2944
2945 /* XXX Now we come to the Thumb specific bit of code. */
2946
2947 THUMB_SET_FUNC (symbolP, 1);
2948 ARM_SET_THUMB (symbolP, 1);
2949 #if defined OBJ_ELF || defined OBJ_COFF
2950 ARM_SET_INTERWORK (symbolP, support_interwork);
2951 #endif
2952 }
2953
2954 /* Directives: Mode selection. */
2955
2956 /* .syntax [unified|divided] - choose the new unified syntax
2957 (same for Arm and Thumb encoding, modulo slight differences in what
2958 can be represented) or the old divergent syntax for each mode. */
2959 static void
2960 s_syntax (int unused ATTRIBUTE_UNUSED)
2961 {
2962 char *name, delim;
2963
2964 delim = get_symbol_name (& name);
2965
2966 if (!strcasecmp (name, "unified"))
2967 unified_syntax = TRUE;
2968 else if (!strcasecmp (name, "divided"))
2969 unified_syntax = FALSE;
2970 else
2971 {
2972 as_bad (_("unrecognized syntax mode \"%s\""), name);
2973 return;
2974 }
2975 (void) restore_line_pointer (delim);
2976 demand_empty_rest_of_line ();
2977 }
2978
2979 /* Directives: sectioning and alignment. */
2980
2981 static void
2982 s_bss (int ignore ATTRIBUTE_UNUSED)
2983 {
2984 /* We don't support putting frags in the BSS segment, we fake it by
2985 marking in_bss, then looking at s_skip for clues. */
2986 subseg_set (bss_section, 0);
2987 demand_empty_rest_of_line ();
2988
2989 #ifdef md_elf_section_change_hook
2990 md_elf_section_change_hook ();
2991 #endif
2992 }
2993
2994 static void
2995 s_even (int ignore ATTRIBUTE_UNUSED)
2996 {
2997 /* Never make frag if expect extra pass. */
2998 if (!need_pass_2)
2999 frag_align (1, 0, 0);
3000
3001 record_alignment (now_seg, 1);
3002
3003 demand_empty_rest_of_line ();
3004 }
3005
3006 /* Directives: CodeComposer Studio. */
3007
3008 /* .ref (for CodeComposer Studio syntax only). */
3009 static void
3010 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3011 {
3012 if (codecomposer_syntax)
3013 ignore_rest_of_line ();
3014 else
3015 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3016 }
3017
3018 /* If name is not NULL, then it is used for marking the beginning of a
3019 function, wherease if it is NULL then it means the function end. */
3020 static void
3021 asmfunc_debug (const char * name)
3022 {
3023 static const char * last_name = NULL;
3024
3025 if (name != NULL)
3026 {
3027 gas_assert (last_name == NULL);
3028 last_name = name;
3029
3030 if (debug_type == DEBUG_STABS)
3031 stabs_generate_asm_func (name, name);
3032 }
3033 else
3034 {
3035 gas_assert (last_name != NULL);
3036
3037 if (debug_type == DEBUG_STABS)
3038 stabs_generate_asm_endfunc (last_name, last_name);
3039
3040 last_name = NULL;
3041 }
3042 }
3043
3044 static void
3045 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3046 {
3047 if (codecomposer_syntax)
3048 {
3049 switch (asmfunc_state)
3050 {
3051 case OUTSIDE_ASMFUNC:
3052 asmfunc_state = WAITING_ASMFUNC_NAME;
3053 break;
3054
3055 case WAITING_ASMFUNC_NAME:
3056 as_bad (_(".asmfunc repeated."));
3057 break;
3058
3059 case WAITING_ENDASMFUNC:
3060 as_bad (_(".asmfunc without function."));
3061 break;
3062 }
3063 demand_empty_rest_of_line ();
3064 }
3065 else
3066 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3067 }
3068
3069 static void
3070 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3071 {
3072 if (codecomposer_syntax)
3073 {
3074 switch (asmfunc_state)
3075 {
3076 case OUTSIDE_ASMFUNC:
3077 as_bad (_(".endasmfunc without a .asmfunc."));
3078 break;
3079
3080 case WAITING_ASMFUNC_NAME:
3081 as_bad (_(".endasmfunc without function."));
3082 break;
3083
3084 case WAITING_ENDASMFUNC:
3085 asmfunc_state = OUTSIDE_ASMFUNC;
3086 asmfunc_debug (NULL);
3087 break;
3088 }
3089 demand_empty_rest_of_line ();
3090 }
3091 else
3092 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3093 }
3094
3095 static void
3096 s_ccs_def (int name)
3097 {
3098 if (codecomposer_syntax)
3099 s_globl (name);
3100 else
3101 as_bad (_(".def pseudo-op only available with -mccs flag."));
3102 }
3103
3104 /* Directives: Literal pools. */
3105
3106 static literal_pool *
3107 find_literal_pool (void)
3108 {
3109 literal_pool * pool;
3110
3111 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3112 {
3113 if (pool->section == now_seg
3114 && pool->sub_section == now_subseg)
3115 break;
3116 }
3117
3118 return pool;
3119 }
3120
3121 static literal_pool *
3122 find_or_make_literal_pool (void)
3123 {
3124 /* Next literal pool ID number. */
3125 static unsigned int latest_pool_num = 1;
3126 literal_pool * pool;
3127
3128 pool = find_literal_pool ();
3129
3130 if (pool == NULL)
3131 {
3132 /* Create a new pool. */
3133 pool = (literal_pool *) xmalloc (sizeof (* pool));
3134 if (! pool)
3135 return NULL;
3136
3137 pool->next_free_entry = 0;
3138 pool->section = now_seg;
3139 pool->sub_section = now_subseg;
3140 pool->next = list_of_pools;
3141 pool->symbol = NULL;
3142 pool->alignment = 2;
3143
3144 /* Add it to the list. */
3145 list_of_pools = pool;
3146 }
3147
3148 /* New pools, and emptied pools, will have a NULL symbol. */
3149 if (pool->symbol == NULL)
3150 {
3151 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3152 (valueT) 0, &zero_address_frag);
3153 pool->id = latest_pool_num ++;
3154 }
3155
3156 /* Done. */
3157 return pool;
3158 }
3159
3160 /* Add the literal in the global 'inst'
3161 structure to the relevant literal pool. */
3162
3163 static int
3164 add_to_lit_pool (unsigned int nbytes)
3165 {
3166 #define PADDING_SLOT 0x1
3167 #define LIT_ENTRY_SIZE_MASK 0xFF
3168 literal_pool * pool;
3169 unsigned int entry, pool_size = 0;
3170 bfd_boolean padding_slot_p = FALSE;
3171 unsigned imm1 = 0;
3172 unsigned imm2 = 0;
3173
3174 if (nbytes == 8)
3175 {
3176 imm1 = inst.operands[1].imm;
3177 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3178 : inst.reloc.exp.X_unsigned ? 0
3179 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3180 if (target_big_endian)
3181 {
3182 imm1 = imm2;
3183 imm2 = inst.operands[1].imm;
3184 }
3185 }
3186
3187 pool = find_or_make_literal_pool ();
3188
3189 /* Check if this literal value is already in the pool. */
3190 for (entry = 0; entry < pool->next_free_entry; entry ++)
3191 {
3192 if (nbytes == 4)
3193 {
3194 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3195 && (inst.reloc.exp.X_op == O_constant)
3196 && (pool->literals[entry].X_add_number
3197 == inst.reloc.exp.X_add_number)
3198 && (pool->literals[entry].X_md == nbytes)
3199 && (pool->literals[entry].X_unsigned
3200 == inst.reloc.exp.X_unsigned))
3201 break;
3202
3203 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3204 && (inst.reloc.exp.X_op == O_symbol)
3205 && (pool->literals[entry].X_add_number
3206 == inst.reloc.exp.X_add_number)
3207 && (pool->literals[entry].X_add_symbol
3208 == inst.reloc.exp.X_add_symbol)
3209 && (pool->literals[entry].X_op_symbol
3210 == inst.reloc.exp.X_op_symbol)
3211 && (pool->literals[entry].X_md == nbytes))
3212 break;
3213 }
3214 else if ((nbytes == 8)
3215 && !(pool_size & 0x7)
3216 && ((entry + 1) != pool->next_free_entry)
3217 && (pool->literals[entry].X_op == O_constant)
3218 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3219 && (pool->literals[entry].X_unsigned
3220 == inst.reloc.exp.X_unsigned)
3221 && (pool->literals[entry + 1].X_op == O_constant)
3222 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3223 && (pool->literals[entry + 1].X_unsigned
3224 == inst.reloc.exp.X_unsigned))
3225 break;
3226
3227 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3228 if (padding_slot_p && (nbytes == 4))
3229 break;
3230
3231 pool_size += 4;
3232 }
3233
3234 /* Do we need to create a new entry? */
3235 if (entry == pool->next_free_entry)
3236 {
3237 if (entry >= MAX_LITERAL_POOL_SIZE)
3238 {
3239 inst.error = _("literal pool overflow");
3240 return FAIL;
3241 }
3242
3243 if (nbytes == 8)
3244 {
3245 /* For 8-byte entries, we align to an 8-byte boundary,
3246 and split it into two 4-byte entries, because on 32-bit
3247 host, 8-byte constants are treated as big num, thus
3248 saved in "generic_bignum" which will be overwritten
3249 by later assignments.
3250
3251 We also need to make sure there is enough space for
3252 the split.
3253
3254 We also check to make sure the literal operand is a
3255 constant number. */
3256 if (!(inst.reloc.exp.X_op == O_constant
3257 || inst.reloc.exp.X_op == O_big))
3258 {
3259 inst.error = _("invalid type for literal pool");
3260 return FAIL;
3261 }
3262 else if (pool_size & 0x7)
3263 {
3264 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3265 {
3266 inst.error = _("literal pool overflow");
3267 return FAIL;
3268 }
3269
3270 pool->literals[entry] = inst.reloc.exp;
3271 pool->literals[entry].X_add_number = 0;
3272 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3273 pool->next_free_entry += 1;
3274 pool_size += 4;
3275 }
3276 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3277 {
3278 inst.error = _("literal pool overflow");
3279 return FAIL;
3280 }
3281
3282 pool->literals[entry] = inst.reloc.exp;
3283 pool->literals[entry].X_op = O_constant;
3284 pool->literals[entry].X_add_number = imm1;
3285 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3286 pool->literals[entry++].X_md = 4;
3287 pool->literals[entry] = inst.reloc.exp;
3288 pool->literals[entry].X_op = O_constant;
3289 pool->literals[entry].X_add_number = imm2;
3290 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3291 pool->literals[entry].X_md = 4;
3292 pool->alignment = 3;
3293 pool->next_free_entry += 1;
3294 }
3295 else
3296 {
3297 pool->literals[entry] = inst.reloc.exp;
3298 pool->literals[entry].X_md = 4;
3299 }
3300
3301 #ifdef OBJ_ELF
3302 /* PR ld/12974: Record the location of the first source line to reference
3303 this entry in the literal pool. If it turns out during linking that the
3304 symbol does not exist we will be able to give an accurate line number for
3305 the (first use of the) missing reference. */
3306 if (debug_type == DEBUG_DWARF2)
3307 dwarf2_where (pool->locs + entry);
3308 #endif
3309 pool->next_free_entry += 1;
3310 }
3311 else if (padding_slot_p)
3312 {
3313 pool->literals[entry] = inst.reloc.exp;
3314 pool->literals[entry].X_md = nbytes;
3315 }
3316
3317 inst.reloc.exp.X_op = O_symbol;
3318 inst.reloc.exp.X_add_number = pool_size;
3319 inst.reloc.exp.X_add_symbol = pool->symbol;
3320
3321 return SUCCESS;
3322 }
3323
3324 bfd_boolean
3325 tc_start_label_without_colon (void)
3326 {
3327 bfd_boolean ret = TRUE;
3328
3329 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3330 {
3331 const char *label = input_line_pointer;
3332
3333 while (!is_end_of_line[(int) label[-1]])
3334 --label;
3335
3336 if (*label == '.')
3337 {
3338 as_bad (_("Invalid label '%s'"), label);
3339 ret = FALSE;
3340 }
3341
3342 asmfunc_debug (label);
3343
3344 asmfunc_state = WAITING_ENDASMFUNC;
3345 }
3346
3347 return ret;
3348 }
3349
3350 /* Can't use symbol_new here, so have to create a symbol and then at
3351 a later date assign it a value. Thats what these functions do. */
3352
3353 static void
3354 symbol_locate (symbolS * symbolP,
3355 const char * name, /* It is copied, the caller can modify. */
3356 segT segment, /* Segment identifier (SEG_<something>). */
3357 valueT valu, /* Symbol value. */
3358 fragS * frag) /* Associated fragment. */
3359 {
3360 size_t name_length;
3361 char * preserved_copy_of_name;
3362
3363 name_length = strlen (name) + 1; /* +1 for \0. */
3364 obstack_grow (&notes, name, name_length);
3365 preserved_copy_of_name = (char *) obstack_finish (&notes);
3366
3367 #ifdef tc_canonicalize_symbol_name
3368 preserved_copy_of_name =
3369 tc_canonicalize_symbol_name (preserved_copy_of_name);
3370 #endif
3371
3372 S_SET_NAME (symbolP, preserved_copy_of_name);
3373
3374 S_SET_SEGMENT (symbolP, segment);
3375 S_SET_VALUE (symbolP, valu);
3376 symbol_clear_list_pointers (symbolP);
3377
3378 symbol_set_frag (symbolP, frag);
3379
3380 /* Link to end of symbol chain. */
3381 {
3382 extern int symbol_table_frozen;
3383
3384 if (symbol_table_frozen)
3385 abort ();
3386 }
3387
3388 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3389
3390 obj_symbol_new_hook (symbolP);
3391
3392 #ifdef tc_symbol_new_hook
3393 tc_symbol_new_hook (symbolP);
3394 #endif
3395
3396 #ifdef DEBUG_SYMS
3397 verify_symbol_chain (symbol_rootP, symbol_lastP);
3398 #endif /* DEBUG_SYMS */
3399 }
3400
3401 static void
3402 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3403 {
3404 unsigned int entry;
3405 literal_pool * pool;
3406 char sym_name[20];
3407
3408 pool = find_literal_pool ();
3409 if (pool == NULL
3410 || pool->symbol == NULL
3411 || pool->next_free_entry == 0)
3412 return;
3413
3414 /* Align pool as you have word accesses.
3415 Only make a frag if we have to. */
3416 if (!need_pass_2)
3417 frag_align (pool->alignment, 0, 0);
3418
3419 record_alignment (now_seg, 2);
3420
3421 #ifdef OBJ_ELF
3422 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3423 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3424 #endif
3425 sprintf (sym_name, "$$lit_\002%x", pool->id);
3426
3427 symbol_locate (pool->symbol, sym_name, now_seg,
3428 (valueT) frag_now_fix (), frag_now);
3429 symbol_table_insert (pool->symbol);
3430
3431 ARM_SET_THUMB (pool->symbol, thumb_mode);
3432
3433 #if defined OBJ_COFF || defined OBJ_ELF
3434 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3435 #endif
3436
3437 for (entry = 0; entry < pool->next_free_entry; entry ++)
3438 {
3439 #ifdef OBJ_ELF
3440 if (debug_type == DEBUG_DWARF2)
3441 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3442 #endif
3443 /* First output the expression in the instruction to the pool. */
3444 emit_expr (&(pool->literals[entry]),
3445 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3446 }
3447
3448 /* Mark the pool as empty. */
3449 pool->next_free_entry = 0;
3450 pool->symbol = NULL;
3451 }
3452
3453 #ifdef OBJ_ELF
3454 /* Forward declarations for functions below, in the MD interface
3455 section. */
3456 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3457 static valueT create_unwind_entry (int);
3458 static void start_unwind_section (const segT, int);
3459 static void add_unwind_opcode (valueT, int);
3460 static void flush_pending_unwind (void);
3461
3462 /* Directives: Data. */
3463
3464 static void
3465 s_arm_elf_cons (int nbytes)
3466 {
3467 expressionS exp;
3468
3469 #ifdef md_flush_pending_output
3470 md_flush_pending_output ();
3471 #endif
3472
3473 if (is_it_end_of_statement ())
3474 {
3475 demand_empty_rest_of_line ();
3476 return;
3477 }
3478
3479 #ifdef md_cons_align
3480 md_cons_align (nbytes);
3481 #endif
3482
3483 mapping_state (MAP_DATA);
3484 do
3485 {
3486 int reloc;
3487 char *base = input_line_pointer;
3488
3489 expression (& exp);
3490
3491 if (exp.X_op != O_symbol)
3492 emit_expr (&exp, (unsigned int) nbytes);
3493 else
3494 {
3495 char *before_reloc = input_line_pointer;
3496 reloc = parse_reloc (&input_line_pointer);
3497 if (reloc == -1)
3498 {
3499 as_bad (_("unrecognized relocation suffix"));
3500 ignore_rest_of_line ();
3501 return;
3502 }
3503 else if (reloc == BFD_RELOC_UNUSED)
3504 emit_expr (&exp, (unsigned int) nbytes);
3505 else
3506 {
3507 reloc_howto_type *howto = (reloc_howto_type *)
3508 bfd_reloc_type_lookup (stdoutput,
3509 (bfd_reloc_code_real_type) reloc);
3510 int size = bfd_get_reloc_size (howto);
3511
3512 if (reloc == BFD_RELOC_ARM_PLT32)
3513 {
3514 as_bad (_("(plt) is only valid on branch targets"));
3515 reloc = BFD_RELOC_UNUSED;
3516 size = 0;
3517 }
3518
3519 if (size > nbytes)
3520 as_bad (_("%s relocations do not fit in %d bytes"),
3521 howto->name, nbytes);
3522 else
3523 {
3524 /* We've parsed an expression stopping at O_symbol.
3525 But there may be more expression left now that we
3526 have parsed the relocation marker. Parse it again.
3527 XXX Surely there is a cleaner way to do this. */
3528 char *p = input_line_pointer;
3529 int offset;
3530 char *save_buf = (char *) alloca (input_line_pointer - base);
3531 memcpy (save_buf, base, input_line_pointer - base);
3532 memmove (base + (input_line_pointer - before_reloc),
3533 base, before_reloc - base);
3534
3535 input_line_pointer = base + (input_line_pointer-before_reloc);
3536 expression (&exp);
3537 memcpy (base, save_buf, p - base);
3538
3539 offset = nbytes - size;
3540 p = frag_more (nbytes);
3541 memset (p, 0, nbytes);
3542 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3543 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3544 }
3545 }
3546 }
3547 }
3548 while (*input_line_pointer++ == ',');
3549
3550 /* Put terminator back into stream. */
3551 input_line_pointer --;
3552 demand_empty_rest_of_line ();
3553 }
3554
3555 /* Emit an expression containing a 32-bit thumb instruction.
3556 Implementation based on put_thumb32_insn. */
3557
3558 static void
3559 emit_thumb32_expr (expressionS * exp)
3560 {
3561 expressionS exp_high = *exp;
3562
3563 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3564 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3565 exp->X_add_number &= 0xffff;
3566 emit_expr (exp, (unsigned int) THUMB_SIZE);
3567 }
3568
3569 /* Guess the instruction size based on the opcode. */
3570
3571 static int
3572 thumb_insn_size (int opcode)
3573 {
3574 if ((unsigned int) opcode < 0xe800u)
3575 return 2;
3576 else if ((unsigned int) opcode >= 0xe8000000u)
3577 return 4;
3578 else
3579 return 0;
3580 }
3581
3582 static bfd_boolean
3583 emit_insn (expressionS *exp, int nbytes)
3584 {
3585 int size = 0;
3586
3587 if (exp->X_op == O_constant)
3588 {
3589 size = nbytes;
3590
3591 if (size == 0)
3592 size = thumb_insn_size (exp->X_add_number);
3593
3594 if (size != 0)
3595 {
3596 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3597 {
3598 as_bad (_(".inst.n operand too big. "\
3599 "Use .inst.w instead"));
3600 size = 0;
3601 }
3602 else
3603 {
3604 if (now_it.state == AUTOMATIC_IT_BLOCK)
3605 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3606 else
3607 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3608
3609 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3610 emit_thumb32_expr (exp);
3611 else
3612 emit_expr (exp, (unsigned int) size);
3613
3614 it_fsm_post_encode ();
3615 }
3616 }
3617 else
3618 as_bad (_("cannot determine Thumb instruction size. " \
3619 "Use .inst.n/.inst.w instead"));
3620 }
3621 else
3622 as_bad (_("constant expression required"));
3623
3624 return (size != 0);
3625 }
3626
3627 /* Like s_arm_elf_cons but do not use md_cons_align and
3628 set the mapping state to MAP_ARM/MAP_THUMB. */
3629
3630 static void
3631 s_arm_elf_inst (int nbytes)
3632 {
3633 if (is_it_end_of_statement ())
3634 {
3635 demand_empty_rest_of_line ();
3636 return;
3637 }
3638
3639 /* Calling mapping_state () here will not change ARM/THUMB,
3640 but will ensure not to be in DATA state. */
3641
3642 if (thumb_mode)
3643 mapping_state (MAP_THUMB);
3644 else
3645 {
3646 if (nbytes != 0)
3647 {
3648 as_bad (_("width suffixes are invalid in ARM mode"));
3649 ignore_rest_of_line ();
3650 return;
3651 }
3652
3653 nbytes = 4;
3654
3655 mapping_state (MAP_ARM);
3656 }
3657
3658 do
3659 {
3660 expressionS exp;
3661
3662 expression (& exp);
3663
3664 if (! emit_insn (& exp, nbytes))
3665 {
3666 ignore_rest_of_line ();
3667 return;
3668 }
3669 }
3670 while (*input_line_pointer++ == ',');
3671
3672 /* Put terminator back into stream. */
3673 input_line_pointer --;
3674 demand_empty_rest_of_line ();
3675 }
3676
3677 /* Parse a .rel31 directive. */
3678
3679 static void
3680 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3681 {
3682 expressionS exp;
3683 char *p;
3684 valueT highbit;
3685
3686 highbit = 0;
3687 if (*input_line_pointer == '1')
3688 highbit = 0x80000000;
3689 else if (*input_line_pointer != '0')
3690 as_bad (_("expected 0 or 1"));
3691
3692 input_line_pointer++;
3693 if (*input_line_pointer != ',')
3694 as_bad (_("missing comma"));
3695 input_line_pointer++;
3696
3697 #ifdef md_flush_pending_output
3698 md_flush_pending_output ();
3699 #endif
3700
3701 #ifdef md_cons_align
3702 md_cons_align (4);
3703 #endif
3704
3705 mapping_state (MAP_DATA);
3706
3707 expression (&exp);
3708
3709 p = frag_more (4);
3710 md_number_to_chars (p, highbit, 4);
3711 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3712 BFD_RELOC_ARM_PREL31);
3713
3714 demand_empty_rest_of_line ();
3715 }
3716
3717 /* Directives: AEABI stack-unwind tables. */
3718
3719 /* Parse an unwind_fnstart directive. Simply records the current location. */
3720
3721 static void
3722 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3723 {
3724 demand_empty_rest_of_line ();
3725 if (unwind.proc_start)
3726 {
3727 as_bad (_("duplicate .fnstart directive"));
3728 return;
3729 }
3730
3731 /* Mark the start of the function. */
3732 unwind.proc_start = expr_build_dot ();
3733
3734 /* Reset the rest of the unwind info. */
3735 unwind.opcode_count = 0;
3736 unwind.table_entry = NULL;
3737 unwind.personality_routine = NULL;
3738 unwind.personality_index = -1;
3739 unwind.frame_size = 0;
3740 unwind.fp_offset = 0;
3741 unwind.fp_reg = REG_SP;
3742 unwind.fp_used = 0;
3743 unwind.sp_restored = 0;
3744 }
3745
3746
3747 /* Parse a handlerdata directive. Creates the exception handling table entry
3748 for the function. */
3749
3750 static void
3751 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3752 {
3753 demand_empty_rest_of_line ();
3754 if (!unwind.proc_start)
3755 as_bad (MISSING_FNSTART);
3756
3757 if (unwind.table_entry)
3758 as_bad (_("duplicate .handlerdata directive"));
3759
3760 create_unwind_entry (1);
3761 }
3762
3763 /* Parse an unwind_fnend directive. Generates the index table entry. */
3764
3765 static void
3766 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3767 {
3768 long where;
3769 char *ptr;
3770 valueT val;
3771 unsigned int marked_pr_dependency;
3772
3773 demand_empty_rest_of_line ();
3774
3775 if (!unwind.proc_start)
3776 {
3777 as_bad (_(".fnend directive without .fnstart"));
3778 return;
3779 }
3780
3781 /* Add eh table entry. */
3782 if (unwind.table_entry == NULL)
3783 val = create_unwind_entry (0);
3784 else
3785 val = 0;
3786
3787 /* Add index table entry. This is two words. */
3788 start_unwind_section (unwind.saved_seg, 1);
3789 frag_align (2, 0, 0);
3790 record_alignment (now_seg, 2);
3791
3792 ptr = frag_more (8);
3793 memset (ptr, 0, 8);
3794 where = frag_now_fix () - 8;
3795
3796 /* Self relative offset of the function start. */
3797 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3798 BFD_RELOC_ARM_PREL31);
3799
3800 /* Indicate dependency on EHABI-defined personality routines to the
3801 linker, if it hasn't been done already. */
3802 marked_pr_dependency
3803 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3804 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3805 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3806 {
3807 static const char *const name[] =
3808 {
3809 "__aeabi_unwind_cpp_pr0",
3810 "__aeabi_unwind_cpp_pr1",
3811 "__aeabi_unwind_cpp_pr2"
3812 };
3813 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3814 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3815 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3816 |= 1 << unwind.personality_index;
3817 }
3818
3819 if (val)
3820 /* Inline exception table entry. */
3821 md_number_to_chars (ptr + 4, val, 4);
3822 else
3823 /* Self relative offset of the table entry. */
3824 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3825 BFD_RELOC_ARM_PREL31);
3826
3827 /* Restore the original section. */
3828 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3829
3830 unwind.proc_start = NULL;
3831 }
3832
3833
3834 /* Parse an unwind_cantunwind directive. */
3835
3836 static void
3837 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3838 {
3839 demand_empty_rest_of_line ();
3840 if (!unwind.proc_start)
3841 as_bad (MISSING_FNSTART);
3842
3843 if (unwind.personality_routine || unwind.personality_index != -1)
3844 as_bad (_("personality routine specified for cantunwind frame"));
3845
3846 unwind.personality_index = -2;
3847 }
3848
3849
3850 /* Parse a personalityindex directive. */
3851
3852 static void
3853 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3854 {
3855 expressionS exp;
3856
3857 if (!unwind.proc_start)
3858 as_bad (MISSING_FNSTART);
3859
3860 if (unwind.personality_routine || unwind.personality_index != -1)
3861 as_bad (_("duplicate .personalityindex directive"));
3862
3863 expression (&exp);
3864
3865 if (exp.X_op != O_constant
3866 || exp.X_add_number < 0 || exp.X_add_number > 15)
3867 {
3868 as_bad (_("bad personality routine number"));
3869 ignore_rest_of_line ();
3870 return;
3871 }
3872
3873 unwind.personality_index = exp.X_add_number;
3874
3875 demand_empty_rest_of_line ();
3876 }
3877
3878
3879 /* Parse a personality directive. */
3880
3881 static void
3882 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3883 {
3884 char *name, *p, c;
3885
3886 if (!unwind.proc_start)
3887 as_bad (MISSING_FNSTART);
3888
3889 if (unwind.personality_routine || unwind.personality_index != -1)
3890 as_bad (_("duplicate .personality directive"));
3891
3892 c = get_symbol_name (& name);
3893 p = input_line_pointer;
3894 if (c == '"')
3895 ++ input_line_pointer;
3896 unwind.personality_routine = symbol_find_or_make (name);
3897 *p = c;
3898 demand_empty_rest_of_line ();
3899 }
3900
3901
3902 /* Parse a directive saving core registers. */
3903
3904 static void
3905 s_arm_unwind_save_core (void)
3906 {
3907 valueT op;
3908 long range;
3909 int n;
3910
3911 range = parse_reg_list (&input_line_pointer);
3912 if (range == FAIL)
3913 {
3914 as_bad (_("expected register list"));
3915 ignore_rest_of_line ();
3916 return;
3917 }
3918
3919 demand_empty_rest_of_line ();
3920
3921 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3922 into .unwind_save {..., sp...}. We aren't bothered about the value of
3923 ip because it is clobbered by calls. */
3924 if (unwind.sp_restored && unwind.fp_reg == 12
3925 && (range & 0x3000) == 0x1000)
3926 {
3927 unwind.opcode_count--;
3928 unwind.sp_restored = 0;
3929 range = (range | 0x2000) & ~0x1000;
3930 unwind.pending_offset = 0;
3931 }
3932
3933 /* Pop r4-r15. */
3934 if (range & 0xfff0)
3935 {
3936 /* See if we can use the short opcodes. These pop a block of up to 8
3937 registers starting with r4, plus maybe r14. */
3938 for (n = 0; n < 8; n++)
3939 {
3940 /* Break at the first non-saved register. */
3941 if ((range & (1 << (n + 4))) == 0)
3942 break;
3943 }
3944 /* See if there are any other bits set. */
3945 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3946 {
3947 /* Use the long form. */
3948 op = 0x8000 | ((range >> 4) & 0xfff);
3949 add_unwind_opcode (op, 2);
3950 }
3951 else
3952 {
3953 /* Use the short form. */
3954 if (range & 0x4000)
3955 op = 0xa8; /* Pop r14. */
3956 else
3957 op = 0xa0; /* Do not pop r14. */
3958 op |= (n - 1);
3959 add_unwind_opcode (op, 1);
3960 }
3961 }
3962
3963 /* Pop r0-r3. */
3964 if (range & 0xf)
3965 {
3966 op = 0xb100 | (range & 0xf);
3967 add_unwind_opcode (op, 2);
3968 }
3969
3970 /* Record the number of bytes pushed. */
3971 for (n = 0; n < 16; n++)
3972 {
3973 if (range & (1 << n))
3974 unwind.frame_size += 4;
3975 }
3976 }
3977
3978
3979 /* Parse a directive saving FPA registers. */
3980
3981 static void
3982 s_arm_unwind_save_fpa (int reg)
3983 {
3984 expressionS exp;
3985 int num_regs;
3986 valueT op;
3987
3988 /* Get Number of registers to transfer. */
3989 if (skip_past_comma (&input_line_pointer) != FAIL)
3990 expression (&exp);
3991 else
3992 exp.X_op = O_illegal;
3993
3994 if (exp.X_op != O_constant)
3995 {
3996 as_bad (_("expected , <constant>"));
3997 ignore_rest_of_line ();
3998 return;
3999 }
4000
4001 num_regs = exp.X_add_number;
4002
4003 if (num_regs < 1 || num_regs > 4)
4004 {
4005 as_bad (_("number of registers must be in the range [1:4]"));
4006 ignore_rest_of_line ();
4007 return;
4008 }
4009
4010 demand_empty_rest_of_line ();
4011
4012 if (reg == 4)
4013 {
4014 /* Short form. */
4015 op = 0xb4 | (num_regs - 1);
4016 add_unwind_opcode (op, 1);
4017 }
4018 else
4019 {
4020 /* Long form. */
4021 op = 0xc800 | (reg << 4) | (num_regs - 1);
4022 add_unwind_opcode (op, 2);
4023 }
4024 unwind.frame_size += num_regs * 12;
4025 }
4026
4027
4028 /* Parse a directive saving VFP registers for ARMv6 and above. */
4029
4030 static void
4031 s_arm_unwind_save_vfp_armv6 (void)
4032 {
4033 int count;
4034 unsigned int start;
4035 valueT op;
4036 int num_vfpv3_regs = 0;
4037 int num_regs_below_16;
4038
4039 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4040 if (count == FAIL)
4041 {
4042 as_bad (_("expected register list"));
4043 ignore_rest_of_line ();
4044 return;
4045 }
4046
4047 demand_empty_rest_of_line ();
4048
4049 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4050 than FSTMX/FLDMX-style ones). */
4051
4052 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4053 if (start >= 16)
4054 num_vfpv3_regs = count;
4055 else if (start + count > 16)
4056 num_vfpv3_regs = start + count - 16;
4057
4058 if (num_vfpv3_regs > 0)
4059 {
4060 int start_offset = start > 16 ? start - 16 : 0;
4061 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4062 add_unwind_opcode (op, 2);
4063 }
4064
4065 /* Generate opcode for registers numbered in the range 0 .. 15. */
4066 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4067 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4068 if (num_regs_below_16 > 0)
4069 {
4070 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4071 add_unwind_opcode (op, 2);
4072 }
4073
4074 unwind.frame_size += count * 8;
4075 }
4076
4077
4078 /* Parse a directive saving VFP registers for pre-ARMv6. */
4079
4080 static void
4081 s_arm_unwind_save_vfp (void)
4082 {
4083 int count;
4084 unsigned int reg;
4085 valueT op;
4086
4087 count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D);
4088 if (count == FAIL)
4089 {
4090 as_bad (_("expected register list"));
4091 ignore_rest_of_line ();
4092 return;
4093 }
4094
4095 demand_empty_rest_of_line ();
4096
4097 if (reg == 8)
4098 {
4099 /* Short form. */
4100 op = 0xb8 | (count - 1);
4101 add_unwind_opcode (op, 1);
4102 }
4103 else
4104 {
4105 /* Long form. */
4106 op = 0xb300 | (reg << 4) | (count - 1);
4107 add_unwind_opcode (op, 2);
4108 }
4109 unwind.frame_size += count * 8 + 4;
4110 }
4111
4112
4113 /* Parse a directive saving iWMMXt data registers. */
4114
4115 static void
4116 s_arm_unwind_save_mmxwr (void)
4117 {
4118 int reg;
4119 int hi_reg;
4120 int i;
4121 unsigned mask = 0;
4122 valueT op;
4123
4124 if (*input_line_pointer == '{')
4125 input_line_pointer++;
4126
4127 do
4128 {
4129 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4130
4131 if (reg == FAIL)
4132 {
4133 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4134 goto error;
4135 }
4136
4137 if (mask >> reg)
4138 as_tsktsk (_("register list not in ascending order"));
4139 mask |= 1 << reg;
4140
4141 if (*input_line_pointer == '-')
4142 {
4143 input_line_pointer++;
4144 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4145 if (hi_reg == FAIL)
4146 {
4147 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4148 goto error;
4149 }
4150 else if (reg >= hi_reg)
4151 {
4152 as_bad (_("bad register range"));
4153 goto error;
4154 }
4155 for (; reg < hi_reg; reg++)
4156 mask |= 1 << reg;
4157 }
4158 }
4159 while (skip_past_comma (&input_line_pointer) != FAIL);
4160
4161 skip_past_char (&input_line_pointer, '}');
4162
4163 demand_empty_rest_of_line ();
4164
4165 /* Generate any deferred opcodes because we're going to be looking at
4166 the list. */
4167 flush_pending_unwind ();
4168
4169 for (i = 0; i < 16; i++)
4170 {
4171 if (mask & (1 << i))
4172 unwind.frame_size += 8;
4173 }
4174
4175 /* Attempt to combine with a previous opcode. We do this because gcc
4176 likes to output separate unwind directives for a single block of
4177 registers. */
4178 if (unwind.opcode_count > 0)
4179 {
4180 i = unwind.opcodes[unwind.opcode_count - 1];
4181 if ((i & 0xf8) == 0xc0)
4182 {
4183 i &= 7;
4184 /* Only merge if the blocks are contiguous. */
4185 if (i < 6)
4186 {
4187 if ((mask & 0xfe00) == (1 << 9))
4188 {
4189 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4190 unwind.opcode_count--;
4191 }
4192 }
4193 else if (i == 6 && unwind.opcode_count >= 2)
4194 {
4195 i = unwind.opcodes[unwind.opcode_count - 2];
4196 reg = i >> 4;
4197 i &= 0xf;
4198
4199 op = 0xffff << (reg - 1);
4200 if (reg > 0
4201 && ((mask & op) == (1u << (reg - 1))))
4202 {
4203 op = (1 << (reg + i + 1)) - 1;
4204 op &= ~((1 << reg) - 1);
4205 mask |= op;
4206 unwind.opcode_count -= 2;
4207 }
4208 }
4209 }
4210 }
4211
4212 hi_reg = 15;
4213 /* We want to generate opcodes in the order the registers have been
4214 saved, ie. descending order. */
4215 for (reg = 15; reg >= -1; reg--)
4216 {
4217 /* Save registers in blocks. */
4218 if (reg < 0
4219 || !(mask & (1 << reg)))
4220 {
4221 /* We found an unsaved reg. Generate opcodes to save the
4222 preceding block. */
4223 if (reg != hi_reg)
4224 {
4225 if (reg == 9)
4226 {
4227 /* Short form. */
4228 op = 0xc0 | (hi_reg - 10);
4229 add_unwind_opcode (op, 1);
4230 }
4231 else
4232 {
4233 /* Long form. */
4234 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4235 add_unwind_opcode (op, 2);
4236 }
4237 }
4238 hi_reg = reg - 1;
4239 }
4240 }
4241
4242 return;
4243 error:
4244 ignore_rest_of_line ();
4245 }
4246
4247 static void
4248 s_arm_unwind_save_mmxwcg (void)
4249 {
4250 int reg;
4251 int hi_reg;
4252 unsigned mask = 0;
4253 valueT op;
4254
4255 if (*input_line_pointer == '{')
4256 input_line_pointer++;
4257
4258 skip_whitespace (input_line_pointer);
4259
4260 do
4261 {
4262 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4263
4264 if (reg == FAIL)
4265 {
4266 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4267 goto error;
4268 }
4269
4270 reg -= 8;
4271 if (mask >> reg)
4272 as_tsktsk (_("register list not in ascending order"));
4273 mask |= 1 << reg;
4274
4275 if (*input_line_pointer == '-')
4276 {
4277 input_line_pointer++;
4278 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4279 if (hi_reg == FAIL)
4280 {
4281 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4282 goto error;
4283 }
4284 else if (reg >= hi_reg)
4285 {
4286 as_bad (_("bad register range"));
4287 goto error;
4288 }
4289 for (; reg < hi_reg; reg++)
4290 mask |= 1 << reg;
4291 }
4292 }
4293 while (skip_past_comma (&input_line_pointer) != FAIL);
4294
4295 skip_past_char (&input_line_pointer, '}');
4296
4297 demand_empty_rest_of_line ();
4298
4299 /* Generate any deferred opcodes because we're going to be looking at
4300 the list. */
4301 flush_pending_unwind ();
4302
4303 for (reg = 0; reg < 16; reg++)
4304 {
4305 if (mask & (1 << reg))
4306 unwind.frame_size += 4;
4307 }
4308 op = 0xc700 | mask;
4309 add_unwind_opcode (op, 2);
4310 return;
4311 error:
4312 ignore_rest_of_line ();
4313 }
4314
4315
4316 /* Parse an unwind_save directive.
4317 If the argument is non-zero, this is a .vsave directive. */
4318
4319 static void
4320 s_arm_unwind_save (int arch_v6)
4321 {
4322 char *peek;
4323 struct reg_entry *reg;
4324 bfd_boolean had_brace = FALSE;
4325
4326 if (!unwind.proc_start)
4327 as_bad (MISSING_FNSTART);
4328
4329 /* Figure out what sort of save we have. */
4330 peek = input_line_pointer;
4331
4332 if (*peek == '{')
4333 {
4334 had_brace = TRUE;
4335 peek++;
4336 }
4337
4338 reg = arm_reg_parse_multi (&peek);
4339
4340 if (!reg)
4341 {
4342 as_bad (_("register expected"));
4343 ignore_rest_of_line ();
4344 return;
4345 }
4346
4347 switch (reg->type)
4348 {
4349 case REG_TYPE_FN:
4350 if (had_brace)
4351 {
4352 as_bad (_("FPA .unwind_save does not take a register list"));
4353 ignore_rest_of_line ();
4354 return;
4355 }
4356 input_line_pointer = peek;
4357 s_arm_unwind_save_fpa (reg->number);
4358 return;
4359
4360 case REG_TYPE_RN:
4361 s_arm_unwind_save_core ();
4362 return;
4363
4364 case REG_TYPE_VFD:
4365 if (arch_v6)
4366 s_arm_unwind_save_vfp_armv6 ();
4367 else
4368 s_arm_unwind_save_vfp ();
4369 return;
4370
4371 case REG_TYPE_MMXWR:
4372 s_arm_unwind_save_mmxwr ();
4373 return;
4374
4375 case REG_TYPE_MMXWCG:
4376 s_arm_unwind_save_mmxwcg ();
4377 return;
4378
4379 default:
4380 as_bad (_(".unwind_save does not support this kind of register"));
4381 ignore_rest_of_line ();
4382 }
4383 }
4384
4385
4386 /* Parse an unwind_movsp directive. */
4387
4388 static void
4389 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4390 {
4391 int reg;
4392 valueT op;
4393 int offset;
4394
4395 if (!unwind.proc_start)
4396 as_bad (MISSING_FNSTART);
4397
4398 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4399 if (reg == FAIL)
4400 {
4401 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4402 ignore_rest_of_line ();
4403 return;
4404 }
4405
4406 /* Optional constant. */
4407 if (skip_past_comma (&input_line_pointer) != FAIL)
4408 {
4409 if (immediate_for_directive (&offset) == FAIL)
4410 return;
4411 }
4412 else
4413 offset = 0;
4414
4415 demand_empty_rest_of_line ();
4416
4417 if (reg == REG_SP || reg == REG_PC)
4418 {
4419 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4420 return;
4421 }
4422
4423 if (unwind.fp_reg != REG_SP)
4424 as_bad (_("unexpected .unwind_movsp directive"));
4425
4426 /* Generate opcode to restore the value. */
4427 op = 0x90 | reg;
4428 add_unwind_opcode (op, 1);
4429
4430 /* Record the information for later. */
4431 unwind.fp_reg = reg;
4432 unwind.fp_offset = unwind.frame_size - offset;
4433 unwind.sp_restored = 1;
4434 }
4435
4436 /* Parse an unwind_pad directive. */
4437
4438 static void
4439 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4440 {
4441 int offset;
4442
4443 if (!unwind.proc_start)
4444 as_bad (MISSING_FNSTART);
4445
4446 if (immediate_for_directive (&offset) == FAIL)
4447 return;
4448
4449 if (offset & 3)
4450 {
4451 as_bad (_("stack increment must be multiple of 4"));
4452 ignore_rest_of_line ();
4453 return;
4454 }
4455
4456 /* Don't generate any opcodes, just record the details for later. */
4457 unwind.frame_size += offset;
4458 unwind.pending_offset += offset;
4459
4460 demand_empty_rest_of_line ();
4461 }
4462
4463 /* Parse an unwind_setfp directive. */
4464
4465 static void
4466 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4467 {
4468 int sp_reg;
4469 int fp_reg;
4470 int offset;
4471
4472 if (!unwind.proc_start)
4473 as_bad (MISSING_FNSTART);
4474
4475 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4476 if (skip_past_comma (&input_line_pointer) == FAIL)
4477 sp_reg = FAIL;
4478 else
4479 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4480
4481 if (fp_reg == FAIL || sp_reg == FAIL)
4482 {
4483 as_bad (_("expected <reg>, <reg>"));
4484 ignore_rest_of_line ();
4485 return;
4486 }
4487
4488 /* Optional constant. */
4489 if (skip_past_comma (&input_line_pointer) != FAIL)
4490 {
4491 if (immediate_for_directive (&offset) == FAIL)
4492 return;
4493 }
4494 else
4495 offset = 0;
4496
4497 demand_empty_rest_of_line ();
4498
4499 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4500 {
4501 as_bad (_("register must be either sp or set by a previous"
4502 "unwind_movsp directive"));
4503 return;
4504 }
4505
4506 /* Don't generate any opcodes, just record the information for later. */
4507 unwind.fp_reg = fp_reg;
4508 unwind.fp_used = 1;
4509 if (sp_reg == REG_SP)
4510 unwind.fp_offset = unwind.frame_size - offset;
4511 else
4512 unwind.fp_offset -= offset;
4513 }
4514
4515 /* Parse an unwind_raw directive. */
4516
4517 static void
4518 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4519 {
4520 expressionS exp;
4521 /* This is an arbitrary limit. */
4522 unsigned char op[16];
4523 int count;
4524
4525 if (!unwind.proc_start)
4526 as_bad (MISSING_FNSTART);
4527
4528 expression (&exp);
4529 if (exp.X_op == O_constant
4530 && skip_past_comma (&input_line_pointer) != FAIL)
4531 {
4532 unwind.frame_size += exp.X_add_number;
4533 expression (&exp);
4534 }
4535 else
4536 exp.X_op = O_illegal;
4537
4538 if (exp.X_op != O_constant)
4539 {
4540 as_bad (_("expected <offset>, <opcode>"));
4541 ignore_rest_of_line ();
4542 return;
4543 }
4544
4545 count = 0;
4546
4547 /* Parse the opcode. */
4548 for (;;)
4549 {
4550 if (count >= 16)
4551 {
4552 as_bad (_("unwind opcode too long"));
4553 ignore_rest_of_line ();
4554 }
4555 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4556 {
4557 as_bad (_("invalid unwind opcode"));
4558 ignore_rest_of_line ();
4559 return;
4560 }
4561 op[count++] = exp.X_add_number;
4562
4563 /* Parse the next byte. */
4564 if (skip_past_comma (&input_line_pointer) == FAIL)
4565 break;
4566
4567 expression (&exp);
4568 }
4569
4570 /* Add the opcode bytes in reverse order. */
4571 while (count--)
4572 add_unwind_opcode (op[count], 1);
4573
4574 demand_empty_rest_of_line ();
4575 }
4576
4577
4578 /* Parse a .eabi_attribute directive. */
4579
4580 static void
4581 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4582 {
4583 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4584
4585 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4586 attributes_set_explicitly[tag] = 1;
4587 }
4588
4589 /* Emit a tls fix for the symbol. */
4590
4591 static void
4592 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4593 {
4594 char *p;
4595 expressionS exp;
4596 #ifdef md_flush_pending_output
4597 md_flush_pending_output ();
4598 #endif
4599
4600 #ifdef md_cons_align
4601 md_cons_align (4);
4602 #endif
4603
4604 /* Since we're just labelling the code, there's no need to define a
4605 mapping symbol. */
4606 expression (&exp);
4607 p = obstack_next_free (&frchain_now->frch_obstack);
4608 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4609 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4610 : BFD_RELOC_ARM_TLS_DESCSEQ);
4611 }
4612 #endif /* OBJ_ELF */
4613
4614 static void s_arm_arch (int);
4615 static void s_arm_object_arch (int);
4616 static void s_arm_cpu (int);
4617 static void s_arm_fpu (int);
4618 static void s_arm_arch_extension (int);
4619
4620 #ifdef TE_PE
4621
4622 static void
4623 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4624 {
4625 expressionS exp;
4626
4627 do
4628 {
4629 expression (&exp);
4630 if (exp.X_op == O_symbol)
4631 exp.X_op = O_secrel;
4632
4633 emit_expr (&exp, 4);
4634 }
4635 while (*input_line_pointer++ == ',');
4636
4637 input_line_pointer--;
4638 demand_empty_rest_of_line ();
4639 }
4640 #endif /* TE_PE */
4641
4642 /* This table describes all the machine specific pseudo-ops the assembler
4643 has to support. The fields are:
4644 pseudo-op name without dot
4645 function to call to execute this pseudo-op
4646 Integer arg to pass to the function. */
4647
4648 const pseudo_typeS md_pseudo_table[] =
4649 {
4650 /* Never called because '.req' does not start a line. */
4651 { "req", s_req, 0 },
4652 /* Following two are likewise never called. */
4653 { "dn", s_dn, 0 },
4654 { "qn", s_qn, 0 },
4655 { "unreq", s_unreq, 0 },
4656 { "bss", s_bss, 0 },
4657 { "align", s_align_ptwo, 2 },
4658 { "arm", s_arm, 0 },
4659 { "thumb", s_thumb, 0 },
4660 { "code", s_code, 0 },
4661 { "force_thumb", s_force_thumb, 0 },
4662 { "thumb_func", s_thumb_func, 0 },
4663 { "thumb_set", s_thumb_set, 0 },
4664 { "even", s_even, 0 },
4665 { "ltorg", s_ltorg, 0 },
4666 { "pool", s_ltorg, 0 },
4667 { "syntax", s_syntax, 0 },
4668 { "cpu", s_arm_cpu, 0 },
4669 { "arch", s_arm_arch, 0 },
4670 { "object_arch", s_arm_object_arch, 0 },
4671 { "fpu", s_arm_fpu, 0 },
4672 { "arch_extension", s_arm_arch_extension, 0 },
4673 #ifdef OBJ_ELF
4674 { "word", s_arm_elf_cons, 4 },
4675 { "long", s_arm_elf_cons, 4 },
4676 { "inst.n", s_arm_elf_inst, 2 },
4677 { "inst.w", s_arm_elf_inst, 4 },
4678 { "inst", s_arm_elf_inst, 0 },
4679 { "rel31", s_arm_rel31, 0 },
4680 { "fnstart", s_arm_unwind_fnstart, 0 },
4681 { "fnend", s_arm_unwind_fnend, 0 },
4682 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4683 { "personality", s_arm_unwind_personality, 0 },
4684 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4685 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4686 { "save", s_arm_unwind_save, 0 },
4687 { "vsave", s_arm_unwind_save, 1 },
4688 { "movsp", s_arm_unwind_movsp, 0 },
4689 { "pad", s_arm_unwind_pad, 0 },
4690 { "setfp", s_arm_unwind_setfp, 0 },
4691 { "unwind_raw", s_arm_unwind_raw, 0 },
4692 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4693 { "tlsdescseq", s_arm_tls_descseq, 0 },
4694 #else
4695 { "word", cons, 4},
4696
4697 /* These are used for dwarf. */
4698 {"2byte", cons, 2},
4699 {"4byte", cons, 4},
4700 {"8byte", cons, 8},
4701 /* These are used for dwarf2. */
4702 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4703 { "loc", dwarf2_directive_loc, 0 },
4704 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4705 #endif
4706 { "extend", float_cons, 'x' },
4707 { "ldouble", float_cons, 'x' },
4708 { "packed", float_cons, 'p' },
4709 #ifdef TE_PE
4710 {"secrel32", pe_directive_secrel, 0},
4711 #endif
4712
4713 /* These are for compatibility with CodeComposer Studio. */
4714 {"ref", s_ccs_ref, 0},
4715 {"def", s_ccs_def, 0},
4716 {"asmfunc", s_ccs_asmfunc, 0},
4717 {"endasmfunc", s_ccs_endasmfunc, 0},
4718
4719 { 0, 0, 0 }
4720 };
4721 \f
4722 /* Parser functions used exclusively in instruction operands. */
4723
4724 /* Generic immediate-value read function for use in insn parsing.
4725 STR points to the beginning of the immediate (the leading #);
4726 VAL receives the value; if the value is outside [MIN, MAX]
4727 issue an error. PREFIX_OPT is true if the immediate prefix is
4728 optional. */
4729
4730 static int
4731 parse_immediate (char **str, int *val, int min, int max,
4732 bfd_boolean prefix_opt)
4733 {
4734 expressionS exp;
4735 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4736 if (exp.X_op != O_constant)
4737 {
4738 inst.error = _("constant expression required");
4739 return FAIL;
4740 }
4741
4742 if (exp.X_add_number < min || exp.X_add_number > max)
4743 {
4744 inst.error = _("immediate value out of range");
4745 return FAIL;
4746 }
4747
4748 *val = exp.X_add_number;
4749 return SUCCESS;
4750 }
4751
4752 /* Less-generic immediate-value read function with the possibility of loading a
4753 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4754 instructions. Puts the result directly in inst.operands[i]. */
4755
4756 static int
4757 parse_big_immediate (char **str, int i, expressionS *in_exp,
4758 bfd_boolean allow_symbol_p)
4759 {
4760 expressionS exp;
4761 expressionS *exp_p = in_exp ? in_exp : &exp;
4762 char *ptr = *str;
4763
4764 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4765
4766 if (exp_p->X_op == O_constant)
4767 {
4768 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4769 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4770 O_constant. We have to be careful not to break compilation for
4771 32-bit X_add_number, though. */
4772 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4773 {
4774 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4775 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4776 & 0xffffffff);
4777 inst.operands[i].regisimm = 1;
4778 }
4779 }
4780 else if (exp_p->X_op == O_big
4781 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4782 {
4783 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4784
4785 /* Bignums have their least significant bits in
4786 generic_bignum[0]. Make sure we put 32 bits in imm and
4787 32 bits in reg, in a (hopefully) portable way. */
4788 gas_assert (parts != 0);
4789
4790 /* Make sure that the number is not too big.
4791 PR 11972: Bignums can now be sign-extended to the
4792 size of a .octa so check that the out of range bits
4793 are all zero or all one. */
4794 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4795 {
4796 LITTLENUM_TYPE m = -1;
4797
4798 if (generic_bignum[parts * 2] != 0
4799 && generic_bignum[parts * 2] != m)
4800 return FAIL;
4801
4802 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4803 if (generic_bignum[j] != generic_bignum[j-1])
4804 return FAIL;
4805 }
4806
4807 inst.operands[i].imm = 0;
4808 for (j = 0; j < parts; j++, idx++)
4809 inst.operands[i].imm |= generic_bignum[idx]
4810 << (LITTLENUM_NUMBER_OF_BITS * j);
4811 inst.operands[i].reg = 0;
4812 for (j = 0; j < parts; j++, idx++)
4813 inst.operands[i].reg |= generic_bignum[idx]
4814 << (LITTLENUM_NUMBER_OF_BITS * j);
4815 inst.operands[i].regisimm = 1;
4816 }
4817 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4818 return FAIL;
4819
4820 *str = ptr;
4821
4822 return SUCCESS;
4823 }
4824
4825 /* Returns the pseudo-register number of an FPA immediate constant,
4826 or FAIL if there isn't a valid constant here. */
4827
4828 static int
4829 parse_fpa_immediate (char ** str)
4830 {
4831 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4832 char * save_in;
4833 expressionS exp;
4834 int i;
4835 int j;
4836
4837 /* First try and match exact strings, this is to guarantee
4838 that some formats will work even for cross assembly. */
4839
4840 for (i = 0; fp_const[i]; i++)
4841 {
4842 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4843 {
4844 char *start = *str;
4845
4846 *str += strlen (fp_const[i]);
4847 if (is_end_of_line[(unsigned char) **str])
4848 return i + 8;
4849 *str = start;
4850 }
4851 }
4852
4853 /* Just because we didn't get a match doesn't mean that the constant
4854 isn't valid, just that it is in a format that we don't
4855 automatically recognize. Try parsing it with the standard
4856 expression routines. */
4857
4858 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4859
4860 /* Look for a raw floating point number. */
4861 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4862 && is_end_of_line[(unsigned char) *save_in])
4863 {
4864 for (i = 0; i < NUM_FLOAT_VALS; i++)
4865 {
4866 for (j = 0; j < MAX_LITTLENUMS; j++)
4867 {
4868 if (words[j] != fp_values[i][j])
4869 break;
4870 }
4871
4872 if (j == MAX_LITTLENUMS)
4873 {
4874 *str = save_in;
4875 return i + 8;
4876 }
4877 }
4878 }
4879
4880 /* Try and parse a more complex expression, this will probably fail
4881 unless the code uses a floating point prefix (eg "0f"). */
4882 save_in = input_line_pointer;
4883 input_line_pointer = *str;
4884 if (expression (&exp) == absolute_section
4885 && exp.X_op == O_big
4886 && exp.X_add_number < 0)
4887 {
4888 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4889 Ditto for 15. */
4890 #define X_PRECISION 5
4891 #define E_PRECISION 15L
4892 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4893 {
4894 for (i = 0; i < NUM_FLOAT_VALS; i++)
4895 {
4896 for (j = 0; j < MAX_LITTLENUMS; j++)
4897 {
4898 if (words[j] != fp_values[i][j])
4899 break;
4900 }
4901
4902 if (j == MAX_LITTLENUMS)
4903 {
4904 *str = input_line_pointer;
4905 input_line_pointer = save_in;
4906 return i + 8;
4907 }
4908 }
4909 }
4910 }
4911
4912 *str = input_line_pointer;
4913 input_line_pointer = save_in;
4914 inst.error = _("invalid FPA immediate expression");
4915 return FAIL;
4916 }
4917
4918 /* Returns 1 if a number has "quarter-precision" float format
4919 0baBbbbbbc defgh000 00000000 00000000. */
4920
4921 static int
4922 is_quarter_float (unsigned imm)
4923 {
4924 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4925 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4926 }
4927
4928
4929 /* Detect the presence of a floating point or integer zero constant,
4930 i.e. #0.0 or #0. */
4931
4932 static bfd_boolean
4933 parse_ifimm_zero (char **in)
4934 {
4935 int error_code;
4936
4937 if (!is_immediate_prefix (**in))
4938 return FALSE;
4939
4940 ++*in;
4941
4942 /* Accept #0x0 as a synonym for #0. */
4943 if (strncmp (*in, "0x", 2) == 0)
4944 {
4945 int val;
4946 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
4947 return FALSE;
4948 return TRUE;
4949 }
4950
4951 error_code = atof_generic (in, ".", EXP_CHARS,
4952 &generic_floating_point_number);
4953
4954 if (!error_code
4955 && generic_floating_point_number.sign == '+'
4956 && (generic_floating_point_number.low
4957 > generic_floating_point_number.leader))
4958 return TRUE;
4959
4960 return FALSE;
4961 }
4962
4963 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4964 0baBbbbbbc defgh000 00000000 00000000.
4965 The zero and minus-zero cases need special handling, since they can't be
4966 encoded in the "quarter-precision" float format, but can nonetheless be
4967 loaded as integer constants. */
4968
4969 static unsigned
4970 parse_qfloat_immediate (char **ccp, int *immed)
4971 {
4972 char *str = *ccp;
4973 char *fpnum;
4974 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4975 int found_fpchar = 0;
4976
4977 skip_past_char (&str, '#');
4978
4979 /* We must not accidentally parse an integer as a floating-point number. Make
4980 sure that the value we parse is not an integer by checking for special
4981 characters '.' or 'e'.
4982 FIXME: This is a horrible hack, but doing better is tricky because type
4983 information isn't in a very usable state at parse time. */
4984 fpnum = str;
4985 skip_whitespace (fpnum);
4986
4987 if (strncmp (fpnum, "0x", 2) == 0)
4988 return FAIL;
4989 else
4990 {
4991 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4992 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4993 {
4994 found_fpchar = 1;
4995 break;
4996 }
4997
4998 if (!found_fpchar)
4999 return FAIL;
5000 }
5001
5002 if ((str = atof_ieee (str, 's', words)) != NULL)
5003 {
5004 unsigned fpword = 0;
5005 int i;
5006
5007 /* Our FP word must be 32 bits (single-precision FP). */
5008 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5009 {
5010 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5011 fpword |= words[i];
5012 }
5013
5014 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5015 *immed = fpword;
5016 else
5017 return FAIL;
5018
5019 *ccp = str;
5020
5021 return SUCCESS;
5022 }
5023
5024 return FAIL;
5025 }
5026
5027 /* Shift operands. */
5028 enum shift_kind
5029 {
5030 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5031 };
5032
5033 struct asm_shift_name
5034 {
5035 const char *name;
5036 enum shift_kind kind;
5037 };
5038
5039 /* Third argument to parse_shift. */
5040 enum parse_shift_mode
5041 {
5042 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5043 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5044 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5045 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5046 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5047 };
5048
5049 /* Parse a <shift> specifier on an ARM data processing instruction.
5050 This has three forms:
5051
5052 (LSL|LSR|ASL|ASR|ROR) Rs
5053 (LSL|LSR|ASL|ASR|ROR) #imm
5054 RRX
5055
5056 Note that ASL is assimilated to LSL in the instruction encoding, and
5057 RRX to ROR #0 (which cannot be written as such). */
5058
5059 static int
5060 parse_shift (char **str, int i, enum parse_shift_mode mode)
5061 {
5062 const struct asm_shift_name *shift_name;
5063 enum shift_kind shift;
5064 char *s = *str;
5065 char *p = s;
5066 int reg;
5067
5068 for (p = *str; ISALPHA (*p); p++)
5069 ;
5070
5071 if (p == *str)
5072 {
5073 inst.error = _("shift expression expected");
5074 return FAIL;
5075 }
5076
5077 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5078 p - *str);
5079
5080 if (shift_name == NULL)
5081 {
5082 inst.error = _("shift expression expected");
5083 return FAIL;
5084 }
5085
5086 shift = shift_name->kind;
5087
5088 switch (mode)
5089 {
5090 case NO_SHIFT_RESTRICT:
5091 case SHIFT_IMMEDIATE: break;
5092
5093 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5094 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5095 {
5096 inst.error = _("'LSL' or 'ASR' required");
5097 return FAIL;
5098 }
5099 break;
5100
5101 case SHIFT_LSL_IMMEDIATE:
5102 if (shift != SHIFT_LSL)
5103 {
5104 inst.error = _("'LSL' required");
5105 return FAIL;
5106 }
5107 break;
5108
5109 case SHIFT_ASR_IMMEDIATE:
5110 if (shift != SHIFT_ASR)
5111 {
5112 inst.error = _("'ASR' required");
5113 return FAIL;
5114 }
5115 break;
5116
5117 default: abort ();
5118 }
5119
5120 if (shift != SHIFT_RRX)
5121 {
5122 /* Whitespace can appear here if the next thing is a bare digit. */
5123 skip_whitespace (p);
5124
5125 if (mode == NO_SHIFT_RESTRICT
5126 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5127 {
5128 inst.operands[i].imm = reg;
5129 inst.operands[i].immisreg = 1;
5130 }
5131 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5132 return FAIL;
5133 }
5134 inst.operands[i].shift_kind = shift;
5135 inst.operands[i].shifted = 1;
5136 *str = p;
5137 return SUCCESS;
5138 }
5139
5140 /* Parse a <shifter_operand> for an ARM data processing instruction:
5141
5142 #<immediate>
5143 #<immediate>, <rotate>
5144 <Rm>
5145 <Rm>, <shift>
5146
5147 where <shift> is defined by parse_shift above, and <rotate> is a
5148 multiple of 2 between 0 and 30. Validation of immediate operands
5149 is deferred to md_apply_fix. */
5150
5151 static int
5152 parse_shifter_operand (char **str, int i)
5153 {
5154 int value;
5155 expressionS exp;
5156
5157 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5158 {
5159 inst.operands[i].reg = value;
5160 inst.operands[i].isreg = 1;
5161
5162 /* parse_shift will override this if appropriate */
5163 inst.reloc.exp.X_op = O_constant;
5164 inst.reloc.exp.X_add_number = 0;
5165
5166 if (skip_past_comma (str) == FAIL)
5167 return SUCCESS;
5168
5169 /* Shift operation on register. */
5170 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5171 }
5172
5173 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5174 return FAIL;
5175
5176 if (skip_past_comma (str) == SUCCESS)
5177 {
5178 /* #x, y -- ie explicit rotation by Y. */
5179 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5180 return FAIL;
5181
5182 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5183 {
5184 inst.error = _("constant expression expected");
5185 return FAIL;
5186 }
5187
5188 value = exp.X_add_number;
5189 if (value < 0 || value > 30 || value % 2 != 0)
5190 {
5191 inst.error = _("invalid rotation");
5192 return FAIL;
5193 }
5194 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5195 {
5196 inst.error = _("invalid constant");
5197 return FAIL;
5198 }
5199
5200 /* Encode as specified. */
5201 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5202 return SUCCESS;
5203 }
5204
5205 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5206 inst.reloc.pc_rel = 0;
5207 return SUCCESS;
5208 }
5209
5210 /* Group relocation information. Each entry in the table contains the
5211 textual name of the relocation as may appear in assembler source
5212 and must end with a colon.
5213 Along with this textual name are the relocation codes to be used if
5214 the corresponding instruction is an ALU instruction (ADD or SUB only),
5215 an LDR, an LDRS, or an LDC. */
5216
5217 struct group_reloc_table_entry
5218 {
5219 const char *name;
5220 int alu_code;
5221 int ldr_code;
5222 int ldrs_code;
5223 int ldc_code;
5224 };
5225
5226 typedef enum
5227 {
5228 /* Varieties of non-ALU group relocation. */
5229
5230 GROUP_LDR,
5231 GROUP_LDRS,
5232 GROUP_LDC
5233 } group_reloc_type;
5234
5235 static struct group_reloc_table_entry group_reloc_table[] =
5236 { /* Program counter relative: */
5237 { "pc_g0_nc",
5238 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5239 0, /* LDR */
5240 0, /* LDRS */
5241 0 }, /* LDC */
5242 { "pc_g0",
5243 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5244 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5245 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5246 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5247 { "pc_g1_nc",
5248 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5249 0, /* LDR */
5250 0, /* LDRS */
5251 0 }, /* LDC */
5252 { "pc_g1",
5253 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5254 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5255 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5256 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5257 { "pc_g2",
5258 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5259 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5260 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5261 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5262 /* Section base relative */
5263 { "sb_g0_nc",
5264 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5265 0, /* LDR */
5266 0, /* LDRS */
5267 0 }, /* LDC */
5268 { "sb_g0",
5269 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5270 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5271 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5272 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5273 { "sb_g1_nc",
5274 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5275 0, /* LDR */
5276 0, /* LDRS */
5277 0 }, /* LDC */
5278 { "sb_g1",
5279 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5280 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5281 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5282 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5283 { "sb_g2",
5284 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5285 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5286 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5287 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5288 /* Absolute thumb alu relocations. */
5289 { "lower0_7",
5290 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5291 0, /* LDR. */
5292 0, /* LDRS. */
5293 0 }, /* LDC. */
5294 { "lower8_15",
5295 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5296 0, /* LDR. */
5297 0, /* LDRS. */
5298 0 }, /* LDC. */
5299 { "upper0_7",
5300 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5301 0, /* LDR. */
5302 0, /* LDRS. */
5303 0 }, /* LDC. */
5304 { "upper8_15",
5305 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5306 0, /* LDR. */
5307 0, /* LDRS. */
5308 0 } }; /* LDC. */
5309
5310 /* Given the address of a pointer pointing to the textual name of a group
5311 relocation as may appear in assembler source, attempt to find its details
5312 in group_reloc_table. The pointer will be updated to the character after
5313 the trailing colon. On failure, FAIL will be returned; SUCCESS
5314 otherwise. On success, *entry will be updated to point at the relevant
5315 group_reloc_table entry. */
5316
5317 static int
5318 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5319 {
5320 unsigned int i;
5321 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5322 {
5323 int length = strlen (group_reloc_table[i].name);
5324
5325 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5326 && (*str)[length] == ':')
5327 {
5328 *out = &group_reloc_table[i];
5329 *str += (length + 1);
5330 return SUCCESS;
5331 }
5332 }
5333
5334 return FAIL;
5335 }
5336
5337 /* Parse a <shifter_operand> for an ARM data processing instruction
5338 (as for parse_shifter_operand) where group relocations are allowed:
5339
5340 #<immediate>
5341 #<immediate>, <rotate>
5342 #:<group_reloc>:<expression>
5343 <Rm>
5344 <Rm>, <shift>
5345
5346 where <group_reloc> is one of the strings defined in group_reloc_table.
5347 The hashes are optional.
5348
5349 Everything else is as for parse_shifter_operand. */
5350
5351 static parse_operand_result
5352 parse_shifter_operand_group_reloc (char **str, int i)
5353 {
5354 /* Determine if we have the sequence of characters #: or just :
5355 coming next. If we do, then we check for a group relocation.
5356 If we don't, punt the whole lot to parse_shifter_operand. */
5357
5358 if (((*str)[0] == '#' && (*str)[1] == ':')
5359 || (*str)[0] == ':')
5360 {
5361 struct group_reloc_table_entry *entry;
5362
5363 if ((*str)[0] == '#')
5364 (*str) += 2;
5365 else
5366 (*str)++;
5367
5368 /* Try to parse a group relocation. Anything else is an error. */
5369 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5370 {
5371 inst.error = _("unknown group relocation");
5372 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5373 }
5374
5375 /* We now have the group relocation table entry corresponding to
5376 the name in the assembler source. Next, we parse the expression. */
5377 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5378 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5379
5380 /* Record the relocation type (always the ALU variant here). */
5381 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5382 gas_assert (inst.reloc.type != 0);
5383
5384 return PARSE_OPERAND_SUCCESS;
5385 }
5386 else
5387 return parse_shifter_operand (str, i) == SUCCESS
5388 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5389
5390 /* Never reached. */
5391 }
5392
5393 /* Parse a Neon alignment expression. Information is written to
5394 inst.operands[i]. We assume the initial ':' has been skipped.
5395
5396 align .imm = align << 8, .immisalign=1, .preind=0 */
5397 static parse_operand_result
5398 parse_neon_alignment (char **str, int i)
5399 {
5400 char *p = *str;
5401 expressionS exp;
5402
5403 my_get_expression (&exp, &p, GE_NO_PREFIX);
5404
5405 if (exp.X_op != O_constant)
5406 {
5407 inst.error = _("alignment must be constant");
5408 return PARSE_OPERAND_FAIL;
5409 }
5410
5411 inst.operands[i].imm = exp.X_add_number << 8;
5412 inst.operands[i].immisalign = 1;
5413 /* Alignments are not pre-indexes. */
5414 inst.operands[i].preind = 0;
5415
5416 *str = p;
5417 return PARSE_OPERAND_SUCCESS;
5418 }
5419
5420 /* Parse all forms of an ARM address expression. Information is written
5421 to inst.operands[i] and/or inst.reloc.
5422
5423 Preindexed addressing (.preind=1):
5424
5425 [Rn, #offset] .reg=Rn .reloc.exp=offset
5426 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5428 .shift_kind=shift .reloc.exp=shift_imm
5429
5430 These three may have a trailing ! which causes .writeback to be set also.
5431
5432 Postindexed addressing (.postind=1, .writeback=1):
5433
5434 [Rn], #offset .reg=Rn .reloc.exp=offset
5435 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5436 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5437 .shift_kind=shift .reloc.exp=shift_imm
5438
5439 Unindexed addressing (.preind=0, .postind=0):
5440
5441 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5442
5443 Other:
5444
5445 [Rn]{!} shorthand for [Rn,#0]{!}
5446 =immediate .isreg=0 .reloc.exp=immediate
5447 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5448
5449 It is the caller's responsibility to check for addressing modes not
5450 supported by the instruction, and to set inst.reloc.type. */
5451
5452 static parse_operand_result
5453 parse_address_main (char **str, int i, int group_relocations,
5454 group_reloc_type group_type)
5455 {
5456 char *p = *str;
5457 int reg;
5458
5459 if (skip_past_char (&p, '[') == FAIL)
5460 {
5461 if (skip_past_char (&p, '=') == FAIL)
5462 {
5463 /* Bare address - translate to PC-relative offset. */
5464 inst.reloc.pc_rel = 1;
5465 inst.operands[i].reg = REG_PC;
5466 inst.operands[i].isreg = 1;
5467 inst.operands[i].preind = 1;
5468
5469 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5470 return PARSE_OPERAND_FAIL;
5471 }
5472 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5473 /*allow_symbol_p=*/TRUE))
5474 return PARSE_OPERAND_FAIL;
5475
5476 *str = p;
5477 return PARSE_OPERAND_SUCCESS;
5478 }
5479
5480 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5481 skip_whitespace (p);
5482
5483 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5484 {
5485 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5486 return PARSE_OPERAND_FAIL;
5487 }
5488 inst.operands[i].reg = reg;
5489 inst.operands[i].isreg = 1;
5490
5491 if (skip_past_comma (&p) == SUCCESS)
5492 {
5493 inst.operands[i].preind = 1;
5494
5495 if (*p == '+') p++;
5496 else if (*p == '-') p++, inst.operands[i].negative = 1;
5497
5498 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5499 {
5500 inst.operands[i].imm = reg;
5501 inst.operands[i].immisreg = 1;
5502
5503 if (skip_past_comma (&p) == SUCCESS)
5504 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5505 return PARSE_OPERAND_FAIL;
5506 }
5507 else if (skip_past_char (&p, ':') == SUCCESS)
5508 {
5509 /* FIXME: '@' should be used here, but it's filtered out by generic
5510 code before we get to see it here. This may be subject to
5511 change. */
5512 parse_operand_result result = parse_neon_alignment (&p, i);
5513
5514 if (result != PARSE_OPERAND_SUCCESS)
5515 return result;
5516 }
5517 else
5518 {
5519 if (inst.operands[i].negative)
5520 {
5521 inst.operands[i].negative = 0;
5522 p--;
5523 }
5524
5525 if (group_relocations
5526 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5527 {
5528 struct group_reloc_table_entry *entry;
5529
5530 /* Skip over the #: or : sequence. */
5531 if (*p == '#')
5532 p += 2;
5533 else
5534 p++;
5535
5536 /* Try to parse a group relocation. Anything else is an
5537 error. */
5538 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5539 {
5540 inst.error = _("unknown group relocation");
5541 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5542 }
5543
5544 /* We now have the group relocation table entry corresponding to
5545 the name in the assembler source. Next, we parse the
5546 expression. */
5547 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5548 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5549
5550 /* Record the relocation type. */
5551 switch (group_type)
5552 {
5553 case GROUP_LDR:
5554 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5555 break;
5556
5557 case GROUP_LDRS:
5558 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5559 break;
5560
5561 case GROUP_LDC:
5562 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5563 break;
5564
5565 default:
5566 gas_assert (0);
5567 }
5568
5569 if (inst.reloc.type == 0)
5570 {
5571 inst.error = _("this group relocation is not allowed on this instruction");
5572 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5573 }
5574 }
5575 else
5576 {
5577 char *q = p;
5578 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5579 return PARSE_OPERAND_FAIL;
5580 /* If the offset is 0, find out if it's a +0 or -0. */
5581 if (inst.reloc.exp.X_op == O_constant
5582 && inst.reloc.exp.X_add_number == 0)
5583 {
5584 skip_whitespace (q);
5585 if (*q == '#')
5586 {
5587 q++;
5588 skip_whitespace (q);
5589 }
5590 if (*q == '-')
5591 inst.operands[i].negative = 1;
5592 }
5593 }
5594 }
5595 }
5596 else if (skip_past_char (&p, ':') == SUCCESS)
5597 {
5598 /* FIXME: '@' should be used here, but it's filtered out by generic code
5599 before we get to see it here. This may be subject to change. */
5600 parse_operand_result result = parse_neon_alignment (&p, i);
5601
5602 if (result != PARSE_OPERAND_SUCCESS)
5603 return result;
5604 }
5605
5606 if (skip_past_char (&p, ']') == FAIL)
5607 {
5608 inst.error = _("']' expected");
5609 return PARSE_OPERAND_FAIL;
5610 }
5611
5612 if (skip_past_char (&p, '!') == SUCCESS)
5613 inst.operands[i].writeback = 1;
5614
5615 else if (skip_past_comma (&p) == SUCCESS)
5616 {
5617 if (skip_past_char (&p, '{') == SUCCESS)
5618 {
5619 /* [Rn], {expr} - unindexed, with option */
5620 if (parse_immediate (&p, &inst.operands[i].imm,
5621 0, 255, TRUE) == FAIL)
5622 return PARSE_OPERAND_FAIL;
5623
5624 if (skip_past_char (&p, '}') == FAIL)
5625 {
5626 inst.error = _("'}' expected at end of 'option' field");
5627 return PARSE_OPERAND_FAIL;
5628 }
5629 if (inst.operands[i].preind)
5630 {
5631 inst.error = _("cannot combine index with option");
5632 return PARSE_OPERAND_FAIL;
5633 }
5634 *str = p;
5635 return PARSE_OPERAND_SUCCESS;
5636 }
5637 else
5638 {
5639 inst.operands[i].postind = 1;
5640 inst.operands[i].writeback = 1;
5641
5642 if (inst.operands[i].preind)
5643 {
5644 inst.error = _("cannot combine pre- and post-indexing");
5645 return PARSE_OPERAND_FAIL;
5646 }
5647
5648 if (*p == '+') p++;
5649 else if (*p == '-') p++, inst.operands[i].negative = 1;
5650
5651 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5652 {
5653 /* We might be using the immediate for alignment already. If we
5654 are, OR the register number into the low-order bits. */
5655 if (inst.operands[i].immisalign)
5656 inst.operands[i].imm |= reg;
5657 else
5658 inst.operands[i].imm = reg;
5659 inst.operands[i].immisreg = 1;
5660
5661 if (skip_past_comma (&p) == SUCCESS)
5662 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5663 return PARSE_OPERAND_FAIL;
5664 }
5665 else
5666 {
5667 char *q = p;
5668 if (inst.operands[i].negative)
5669 {
5670 inst.operands[i].negative = 0;
5671 p--;
5672 }
5673 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5674 return PARSE_OPERAND_FAIL;
5675 /* If the offset is 0, find out if it's a +0 or -0. */
5676 if (inst.reloc.exp.X_op == O_constant
5677 && inst.reloc.exp.X_add_number == 0)
5678 {
5679 skip_whitespace (q);
5680 if (*q == '#')
5681 {
5682 q++;
5683 skip_whitespace (q);
5684 }
5685 if (*q == '-')
5686 inst.operands[i].negative = 1;
5687 }
5688 }
5689 }
5690 }
5691
5692 /* If at this point neither .preind nor .postind is set, we have a
5693 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5694 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5695 {
5696 inst.operands[i].preind = 1;
5697 inst.reloc.exp.X_op = O_constant;
5698 inst.reloc.exp.X_add_number = 0;
5699 }
5700 *str = p;
5701 return PARSE_OPERAND_SUCCESS;
5702 }
5703
5704 static int
5705 parse_address (char **str, int i)
5706 {
5707 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5708 ? SUCCESS : FAIL;
5709 }
5710
5711 static parse_operand_result
5712 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5713 {
5714 return parse_address_main (str, i, 1, type);
5715 }
5716
5717 /* Parse an operand for a MOVW or MOVT instruction. */
5718 static int
5719 parse_half (char **str)
5720 {
5721 char * p;
5722
5723 p = *str;
5724 skip_past_char (&p, '#');
5725 if (strncasecmp (p, ":lower16:", 9) == 0)
5726 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5727 else if (strncasecmp (p, ":upper16:", 9) == 0)
5728 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5729
5730 if (inst.reloc.type != BFD_RELOC_UNUSED)
5731 {
5732 p += 9;
5733 skip_whitespace (p);
5734 }
5735
5736 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5737 return FAIL;
5738
5739 if (inst.reloc.type == BFD_RELOC_UNUSED)
5740 {
5741 if (inst.reloc.exp.X_op != O_constant)
5742 {
5743 inst.error = _("constant expression expected");
5744 return FAIL;
5745 }
5746 if (inst.reloc.exp.X_add_number < 0
5747 || inst.reloc.exp.X_add_number > 0xffff)
5748 {
5749 inst.error = _("immediate value out of range");
5750 return FAIL;
5751 }
5752 }
5753 *str = p;
5754 return SUCCESS;
5755 }
5756
5757 /* Miscellaneous. */
5758
5759 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5760 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5761 static int
5762 parse_psr (char **str, bfd_boolean lhs)
5763 {
5764 char *p;
5765 unsigned long psr_field;
5766 const struct asm_psr *psr;
5767 char *start;
5768 bfd_boolean is_apsr = FALSE;
5769 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5770
5771 /* PR gas/12698: If the user has specified -march=all then m_profile will
5772 be TRUE, but we want to ignore it in this case as we are building for any
5773 CPU type, including non-m variants. */
5774 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5775 m_profile = FALSE;
5776
5777 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5778 feature for ease of use and backwards compatibility. */
5779 p = *str;
5780 if (strncasecmp (p, "SPSR", 4) == 0)
5781 {
5782 if (m_profile)
5783 goto unsupported_psr;
5784
5785 psr_field = SPSR_BIT;
5786 }
5787 else if (strncasecmp (p, "CPSR", 4) == 0)
5788 {
5789 if (m_profile)
5790 goto unsupported_psr;
5791
5792 psr_field = 0;
5793 }
5794 else if (strncasecmp (p, "APSR", 4) == 0)
5795 {
5796 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5797 and ARMv7-R architecture CPUs. */
5798 is_apsr = TRUE;
5799 psr_field = 0;
5800 }
5801 else if (m_profile)
5802 {
5803 start = p;
5804 do
5805 p++;
5806 while (ISALNUM (*p) || *p == '_');
5807
5808 if (strncasecmp (start, "iapsr", 5) == 0
5809 || strncasecmp (start, "eapsr", 5) == 0
5810 || strncasecmp (start, "xpsr", 4) == 0
5811 || strncasecmp (start, "psr", 3) == 0)
5812 p = start + strcspn (start, "rR") + 1;
5813
5814 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5815 p - start);
5816
5817 if (!psr)
5818 return FAIL;
5819
5820 /* If APSR is being written, a bitfield may be specified. Note that
5821 APSR itself is handled above. */
5822 if (psr->field <= 3)
5823 {
5824 psr_field = psr->field;
5825 is_apsr = TRUE;
5826 goto check_suffix;
5827 }
5828
5829 *str = p;
5830 /* M-profile MSR instructions have the mask field set to "10", except
5831 *PSR variants which modify APSR, which may use a different mask (and
5832 have been handled already). Do that by setting the PSR_f field
5833 here. */
5834 return psr->field | (lhs ? PSR_f : 0);
5835 }
5836 else
5837 goto unsupported_psr;
5838
5839 p += 4;
5840 check_suffix:
5841 if (*p == '_')
5842 {
5843 /* A suffix follows. */
5844 p++;
5845 start = p;
5846
5847 do
5848 p++;
5849 while (ISALNUM (*p) || *p == '_');
5850
5851 if (is_apsr)
5852 {
5853 /* APSR uses a notation for bits, rather than fields. */
5854 unsigned int nzcvq_bits = 0;
5855 unsigned int g_bit = 0;
5856 char *bit;
5857
5858 for (bit = start; bit != p; bit++)
5859 {
5860 switch (TOLOWER (*bit))
5861 {
5862 case 'n':
5863 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5864 break;
5865
5866 case 'z':
5867 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5868 break;
5869
5870 case 'c':
5871 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5872 break;
5873
5874 case 'v':
5875 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5876 break;
5877
5878 case 'q':
5879 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5880 break;
5881
5882 case 'g':
5883 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5884 break;
5885
5886 default:
5887 inst.error = _("unexpected bit specified after APSR");
5888 return FAIL;
5889 }
5890 }
5891
5892 if (nzcvq_bits == 0x1f)
5893 psr_field |= PSR_f;
5894
5895 if (g_bit == 0x1)
5896 {
5897 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5898 {
5899 inst.error = _("selected processor does not "
5900 "support DSP extension");
5901 return FAIL;
5902 }
5903
5904 psr_field |= PSR_s;
5905 }
5906
5907 if ((nzcvq_bits & 0x20) != 0
5908 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5909 || (g_bit & 0x2) != 0)
5910 {
5911 inst.error = _("bad bitmask specified after APSR");
5912 return FAIL;
5913 }
5914 }
5915 else
5916 {
5917 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5918 p - start);
5919 if (!psr)
5920 goto error;
5921
5922 psr_field |= psr->field;
5923 }
5924 }
5925 else
5926 {
5927 if (ISALNUM (*p))
5928 goto error; /* Garbage after "[CS]PSR". */
5929
5930 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5931 is deprecated, but allow it anyway. */
5932 if (is_apsr && lhs)
5933 {
5934 psr_field |= PSR_f;
5935 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5936 "deprecated"));
5937 }
5938 else if (!m_profile)
5939 /* These bits are never right for M-profile devices: don't set them
5940 (only code paths which read/write APSR reach here). */
5941 psr_field |= (PSR_c | PSR_f);
5942 }
5943 *str = p;
5944 return psr_field;
5945
5946 unsupported_psr:
5947 inst.error = _("selected processor does not support requested special "
5948 "purpose register");
5949 return FAIL;
5950
5951 error:
5952 inst.error = _("flag for {c}psr instruction expected");
5953 return FAIL;
5954 }
5955
5956 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5957 value suitable for splatting into the AIF field of the instruction. */
5958
5959 static int
5960 parse_cps_flags (char **str)
5961 {
5962 int val = 0;
5963 int saw_a_flag = 0;
5964 char *s = *str;
5965
5966 for (;;)
5967 switch (*s++)
5968 {
5969 case '\0': case ',':
5970 goto done;
5971
5972 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5973 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5974 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5975
5976 default:
5977 inst.error = _("unrecognized CPS flag");
5978 return FAIL;
5979 }
5980
5981 done:
5982 if (saw_a_flag == 0)
5983 {
5984 inst.error = _("missing CPS flags");
5985 return FAIL;
5986 }
5987
5988 *str = s - 1;
5989 return val;
5990 }
5991
5992 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5993 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5994
5995 static int
5996 parse_endian_specifier (char **str)
5997 {
5998 int little_endian;
5999 char *s = *str;
6000
6001 if (strncasecmp (s, "BE", 2))
6002 little_endian = 0;
6003 else if (strncasecmp (s, "LE", 2))
6004 little_endian = 1;
6005 else
6006 {
6007 inst.error = _("valid endian specifiers are be or le");
6008 return FAIL;
6009 }
6010
6011 if (ISALNUM (s[2]) || s[2] == '_')
6012 {
6013 inst.error = _("valid endian specifiers are be or le");
6014 return FAIL;
6015 }
6016
6017 *str = s + 2;
6018 return little_endian;
6019 }
6020
6021 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6022 value suitable for poking into the rotate field of an sxt or sxta
6023 instruction, or FAIL on error. */
6024
6025 static int
6026 parse_ror (char **str)
6027 {
6028 int rot;
6029 char *s = *str;
6030
6031 if (strncasecmp (s, "ROR", 3) == 0)
6032 s += 3;
6033 else
6034 {
6035 inst.error = _("missing rotation field after comma");
6036 return FAIL;
6037 }
6038
6039 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6040 return FAIL;
6041
6042 switch (rot)
6043 {
6044 case 0: *str = s; return 0x0;
6045 case 8: *str = s; return 0x1;
6046 case 16: *str = s; return 0x2;
6047 case 24: *str = s; return 0x3;
6048
6049 default:
6050 inst.error = _("rotation can only be 0, 8, 16, or 24");
6051 return FAIL;
6052 }
6053 }
6054
6055 /* Parse a conditional code (from conds[] below). The value returned is in the
6056 range 0 .. 14, or FAIL. */
6057 static int
6058 parse_cond (char **str)
6059 {
6060 char *q;
6061 const struct asm_cond *c;
6062 int n;
6063 /* Condition codes are always 2 characters, so matching up to
6064 3 characters is sufficient. */
6065 char cond[3];
6066
6067 q = *str;
6068 n = 0;
6069 while (ISALPHA (*q) && n < 3)
6070 {
6071 cond[n] = TOLOWER (*q);
6072 q++;
6073 n++;
6074 }
6075
6076 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6077 if (!c)
6078 {
6079 inst.error = _("condition required");
6080 return FAIL;
6081 }
6082
6083 *str = q;
6084 return c->value;
6085 }
6086
6087 /* If the given feature available in the selected CPU, mark it as used.
6088 Returns TRUE iff feature is available. */
6089 static bfd_boolean
6090 mark_feature_used (const arm_feature_set *feature)
6091 {
6092 /* Ensure the option is valid on the current architecture. */
6093 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6094 return FALSE;
6095
6096 /* Add the appropriate architecture feature for the barrier option used.
6097 */
6098 if (thumb_mode)
6099 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6100 else
6101 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6102
6103 return TRUE;
6104 }
6105
6106 /* Parse an option for a barrier instruction. Returns the encoding for the
6107 option, or FAIL. */
6108 static int
6109 parse_barrier (char **str)
6110 {
6111 char *p, *q;
6112 const struct asm_barrier_opt *o;
6113
6114 p = q = *str;
6115 while (ISALPHA (*q))
6116 q++;
6117
6118 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6119 q - p);
6120 if (!o)
6121 return FAIL;
6122
6123 if (!mark_feature_used (&o->arch))
6124 return FAIL;
6125
6126 *str = q;
6127 return o->value;
6128 }
6129
6130 /* Parse the operands of a table branch instruction. Similar to a memory
6131 operand. */
6132 static int
6133 parse_tb (char **str)
6134 {
6135 char * p = *str;
6136 int reg;
6137
6138 if (skip_past_char (&p, '[') == FAIL)
6139 {
6140 inst.error = _("'[' expected");
6141 return FAIL;
6142 }
6143
6144 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6145 {
6146 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6147 return FAIL;
6148 }
6149 inst.operands[0].reg = reg;
6150
6151 if (skip_past_comma (&p) == FAIL)
6152 {
6153 inst.error = _("',' expected");
6154 return FAIL;
6155 }
6156
6157 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6158 {
6159 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6160 return FAIL;
6161 }
6162 inst.operands[0].imm = reg;
6163
6164 if (skip_past_comma (&p) == SUCCESS)
6165 {
6166 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6167 return FAIL;
6168 if (inst.reloc.exp.X_add_number != 1)
6169 {
6170 inst.error = _("invalid shift");
6171 return FAIL;
6172 }
6173 inst.operands[0].shifted = 1;
6174 }
6175
6176 if (skip_past_char (&p, ']') == FAIL)
6177 {
6178 inst.error = _("']' expected");
6179 return FAIL;
6180 }
6181 *str = p;
6182 return SUCCESS;
6183 }
6184
6185 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6186 information on the types the operands can take and how they are encoded.
6187 Up to four operands may be read; this function handles setting the
6188 ".present" field for each read operand itself.
6189 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6190 else returns FAIL. */
6191
6192 static int
6193 parse_neon_mov (char **str, int *which_operand)
6194 {
6195 int i = *which_operand, val;
6196 enum arm_reg_type rtype;
6197 char *ptr = *str;
6198 struct neon_type_el optype;
6199
6200 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6201 {
6202 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6203 inst.operands[i].reg = val;
6204 inst.operands[i].isscalar = 1;
6205 inst.operands[i].vectype = optype;
6206 inst.operands[i++].present = 1;
6207
6208 if (skip_past_comma (&ptr) == FAIL)
6209 goto wanted_comma;
6210
6211 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6212 goto wanted_arm;
6213
6214 inst.operands[i].reg = val;
6215 inst.operands[i].isreg = 1;
6216 inst.operands[i].present = 1;
6217 }
6218 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6219 != FAIL)
6220 {
6221 /* Cases 0, 1, 2, 3, 5 (D only). */
6222 if (skip_past_comma (&ptr) == FAIL)
6223 goto wanted_comma;
6224
6225 inst.operands[i].reg = val;
6226 inst.operands[i].isreg = 1;
6227 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6228 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6229 inst.operands[i].isvec = 1;
6230 inst.operands[i].vectype = optype;
6231 inst.operands[i++].present = 1;
6232
6233 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6234 {
6235 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6236 Case 13: VMOV <Sd>, <Rm> */
6237 inst.operands[i].reg = val;
6238 inst.operands[i].isreg = 1;
6239 inst.operands[i].present = 1;
6240
6241 if (rtype == REG_TYPE_NQ)
6242 {
6243 first_error (_("can't use Neon quad register here"));
6244 return FAIL;
6245 }
6246 else if (rtype != REG_TYPE_VFS)
6247 {
6248 i++;
6249 if (skip_past_comma (&ptr) == FAIL)
6250 goto wanted_comma;
6251 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6252 goto wanted_arm;
6253 inst.operands[i].reg = val;
6254 inst.operands[i].isreg = 1;
6255 inst.operands[i].present = 1;
6256 }
6257 }
6258 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6259 &optype)) != FAIL)
6260 {
6261 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6262 Case 1: VMOV<c><q> <Dd>, <Dm>
6263 Case 8: VMOV.F32 <Sd>, <Sm>
6264 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6265
6266 inst.operands[i].reg = val;
6267 inst.operands[i].isreg = 1;
6268 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6269 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6270 inst.operands[i].isvec = 1;
6271 inst.operands[i].vectype = optype;
6272 inst.operands[i].present = 1;
6273
6274 if (skip_past_comma (&ptr) == SUCCESS)
6275 {
6276 /* Case 15. */
6277 i++;
6278
6279 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6280 goto wanted_arm;
6281
6282 inst.operands[i].reg = val;
6283 inst.operands[i].isreg = 1;
6284 inst.operands[i++].present = 1;
6285
6286 if (skip_past_comma (&ptr) == FAIL)
6287 goto wanted_comma;
6288
6289 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6290 goto wanted_arm;
6291
6292 inst.operands[i].reg = val;
6293 inst.operands[i].isreg = 1;
6294 inst.operands[i].present = 1;
6295 }
6296 }
6297 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6298 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6299 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6300 Case 10: VMOV.F32 <Sd>, #<imm>
6301 Case 11: VMOV.F64 <Dd>, #<imm> */
6302 inst.operands[i].immisfloat = 1;
6303 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6304 == SUCCESS)
6305 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6306 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6307 ;
6308 else
6309 {
6310 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6311 return FAIL;
6312 }
6313 }
6314 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6315 {
6316 /* Cases 6, 7. */
6317 inst.operands[i].reg = val;
6318 inst.operands[i].isreg = 1;
6319 inst.operands[i++].present = 1;
6320
6321 if (skip_past_comma (&ptr) == FAIL)
6322 goto wanted_comma;
6323
6324 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6325 {
6326 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6327 inst.operands[i].reg = val;
6328 inst.operands[i].isscalar = 1;
6329 inst.operands[i].present = 1;
6330 inst.operands[i].vectype = optype;
6331 }
6332 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6333 {
6334 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6335 inst.operands[i].reg = val;
6336 inst.operands[i].isreg = 1;
6337 inst.operands[i++].present = 1;
6338
6339 if (skip_past_comma (&ptr) == FAIL)
6340 goto wanted_comma;
6341
6342 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6343 == FAIL)
6344 {
6345 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6346 return FAIL;
6347 }
6348
6349 inst.operands[i].reg = val;
6350 inst.operands[i].isreg = 1;
6351 inst.operands[i].isvec = 1;
6352 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6353 inst.operands[i].vectype = optype;
6354 inst.operands[i].present = 1;
6355
6356 if (rtype == REG_TYPE_VFS)
6357 {
6358 /* Case 14. */
6359 i++;
6360 if (skip_past_comma (&ptr) == FAIL)
6361 goto wanted_comma;
6362 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6363 &optype)) == FAIL)
6364 {
6365 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6366 return FAIL;
6367 }
6368 inst.operands[i].reg = val;
6369 inst.operands[i].isreg = 1;
6370 inst.operands[i].isvec = 1;
6371 inst.operands[i].issingle = 1;
6372 inst.operands[i].vectype = optype;
6373 inst.operands[i].present = 1;
6374 }
6375 }
6376 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6377 != FAIL)
6378 {
6379 /* Case 13. */
6380 inst.operands[i].reg = val;
6381 inst.operands[i].isreg = 1;
6382 inst.operands[i].isvec = 1;
6383 inst.operands[i].issingle = 1;
6384 inst.operands[i].vectype = optype;
6385 inst.operands[i].present = 1;
6386 }
6387 }
6388 else
6389 {
6390 first_error (_("parse error"));
6391 return FAIL;
6392 }
6393
6394 /* Successfully parsed the operands. Update args. */
6395 *which_operand = i;
6396 *str = ptr;
6397 return SUCCESS;
6398
6399 wanted_comma:
6400 first_error (_("expected comma"));
6401 return FAIL;
6402
6403 wanted_arm:
6404 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6405 return FAIL;
6406 }
6407
6408 /* Use this macro when the operand constraints are different
6409 for ARM and THUMB (e.g. ldrd). */
6410 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6411 ((arm_operand) | ((thumb_operand) << 16))
6412
6413 /* Matcher codes for parse_operands. */
6414 enum operand_parse_code
6415 {
6416 OP_stop, /* end of line */
6417
6418 OP_RR, /* ARM register */
6419 OP_RRnpc, /* ARM register, not r15 */
6420 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6421 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6422 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6423 optional trailing ! */
6424 OP_RRw, /* ARM register, not r15, optional trailing ! */
6425 OP_RCP, /* Coprocessor number */
6426 OP_RCN, /* Coprocessor register */
6427 OP_RF, /* FPA register */
6428 OP_RVS, /* VFP single precision register */
6429 OP_RVD, /* VFP double precision register (0..15) */
6430 OP_RND, /* Neon double precision register (0..31) */
6431 OP_RNQ, /* Neon quad precision register */
6432 OP_RVSD, /* VFP single or double precision register */
6433 OP_RNDQ, /* Neon double or quad precision register */
6434 OP_RNSDQ, /* Neon single, double or quad precision register */
6435 OP_RNSC, /* Neon scalar D[X] */
6436 OP_RVC, /* VFP control register */
6437 OP_RMF, /* Maverick F register */
6438 OP_RMD, /* Maverick D register */
6439 OP_RMFX, /* Maverick FX register */
6440 OP_RMDX, /* Maverick DX register */
6441 OP_RMAX, /* Maverick AX register */
6442 OP_RMDS, /* Maverick DSPSC register */
6443 OP_RIWR, /* iWMMXt wR register */
6444 OP_RIWC, /* iWMMXt wC register */
6445 OP_RIWG, /* iWMMXt wCG register */
6446 OP_RXA, /* XScale accumulator register */
6447
6448 OP_REGLST, /* ARM register list */
6449 OP_VRSLST, /* VFP single-precision register list */
6450 OP_VRDLST, /* VFP double-precision register list */
6451 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6452 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6453 OP_NSTRLST, /* Neon element/structure list */
6454
6455 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6456 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6457 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6458 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6459 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6460 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6461 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6462 OP_VMOV, /* Neon VMOV operands. */
6463 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6464 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6465 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6466
6467 OP_I0, /* immediate zero */
6468 OP_I7, /* immediate value 0 .. 7 */
6469 OP_I15, /* 0 .. 15 */
6470 OP_I16, /* 1 .. 16 */
6471 OP_I16z, /* 0 .. 16 */
6472 OP_I31, /* 0 .. 31 */
6473 OP_I31w, /* 0 .. 31, optional trailing ! */
6474 OP_I32, /* 1 .. 32 */
6475 OP_I32z, /* 0 .. 32 */
6476 OP_I63, /* 0 .. 63 */
6477 OP_I63s, /* -64 .. 63 */
6478 OP_I64, /* 1 .. 64 */
6479 OP_I64z, /* 0 .. 64 */
6480 OP_I255, /* 0 .. 255 */
6481
6482 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6483 OP_I7b, /* 0 .. 7 */
6484 OP_I15b, /* 0 .. 15 */
6485 OP_I31b, /* 0 .. 31 */
6486
6487 OP_SH, /* shifter operand */
6488 OP_SHG, /* shifter operand with possible group relocation */
6489 OP_ADDR, /* Memory address expression (any mode) */
6490 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6491 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6492 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6493 OP_EXP, /* arbitrary expression */
6494 OP_EXPi, /* same, with optional immediate prefix */
6495 OP_EXPr, /* same, with optional relocation suffix */
6496 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6497
6498 OP_CPSF, /* CPS flags */
6499 OP_ENDI, /* Endianness specifier */
6500 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6501 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6502 OP_COND, /* conditional code */
6503 OP_TB, /* Table branch. */
6504
6505 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6506
6507 OP_RRnpc_I0, /* ARM register or literal 0 */
6508 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6509 OP_RR_EXi, /* ARM register or expression with imm prefix */
6510 OP_RF_IF, /* FPA register or immediate */
6511 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6512 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6513
6514 /* Optional operands. */
6515 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6516 OP_oI31b, /* 0 .. 31 */
6517 OP_oI32b, /* 1 .. 32 */
6518 OP_oI32z, /* 0 .. 32 */
6519 OP_oIffffb, /* 0 .. 65535 */
6520 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6521
6522 OP_oRR, /* ARM register */
6523 OP_oRRnpc, /* ARM register, not the PC */
6524 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6525 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6526 OP_oRND, /* Optional Neon double precision register */
6527 OP_oRNQ, /* Optional Neon quad precision register */
6528 OP_oRNDQ, /* Optional Neon double or quad precision register */
6529 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6530 OP_oSHll, /* LSL immediate */
6531 OP_oSHar, /* ASR immediate */
6532 OP_oSHllar, /* LSL or ASR immediate */
6533 OP_oROR, /* ROR 0/8/16/24 */
6534 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6535
6536 /* Some pre-defined mixed (ARM/THUMB) operands. */
6537 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6538 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6539 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6540
6541 OP_FIRST_OPTIONAL = OP_oI7b
6542 };
6543
6544 /* Generic instruction operand parser. This does no encoding and no
6545 semantic validation; it merely squirrels values away in the inst
6546 structure. Returns SUCCESS or FAIL depending on whether the
6547 specified grammar matched. */
6548 static int
6549 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6550 {
6551 unsigned const int *upat = pattern;
6552 char *backtrack_pos = 0;
6553 const char *backtrack_error = 0;
6554 int i, val = 0, backtrack_index = 0;
6555 enum arm_reg_type rtype;
6556 parse_operand_result result;
6557 unsigned int op_parse_code;
6558
6559 #define po_char_or_fail(chr) \
6560 do \
6561 { \
6562 if (skip_past_char (&str, chr) == FAIL) \
6563 goto bad_args; \
6564 } \
6565 while (0)
6566
6567 #define po_reg_or_fail(regtype) \
6568 do \
6569 { \
6570 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6571 & inst.operands[i].vectype); \
6572 if (val == FAIL) \
6573 { \
6574 first_error (_(reg_expected_msgs[regtype])); \
6575 goto failure; \
6576 } \
6577 inst.operands[i].reg = val; \
6578 inst.operands[i].isreg = 1; \
6579 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6580 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6581 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6582 || rtype == REG_TYPE_VFD \
6583 || rtype == REG_TYPE_NQ); \
6584 } \
6585 while (0)
6586
6587 #define po_reg_or_goto(regtype, label) \
6588 do \
6589 { \
6590 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6591 & inst.operands[i].vectype); \
6592 if (val == FAIL) \
6593 goto label; \
6594 \
6595 inst.operands[i].reg = val; \
6596 inst.operands[i].isreg = 1; \
6597 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6598 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6599 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6600 || rtype == REG_TYPE_VFD \
6601 || rtype == REG_TYPE_NQ); \
6602 } \
6603 while (0)
6604
6605 #define po_imm_or_fail(min, max, popt) \
6606 do \
6607 { \
6608 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6609 goto failure; \
6610 inst.operands[i].imm = val; \
6611 } \
6612 while (0)
6613
6614 #define po_scalar_or_goto(elsz, label) \
6615 do \
6616 { \
6617 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6618 if (val == FAIL) \
6619 goto label; \
6620 inst.operands[i].reg = val; \
6621 inst.operands[i].isscalar = 1; \
6622 } \
6623 while (0)
6624
6625 #define po_misc_or_fail(expr) \
6626 do \
6627 { \
6628 if (expr) \
6629 goto failure; \
6630 } \
6631 while (0)
6632
6633 #define po_misc_or_fail_no_backtrack(expr) \
6634 do \
6635 { \
6636 result = expr; \
6637 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6638 backtrack_pos = 0; \
6639 if (result != PARSE_OPERAND_SUCCESS) \
6640 goto failure; \
6641 } \
6642 while (0)
6643
6644 #define po_barrier_or_imm(str) \
6645 do \
6646 { \
6647 val = parse_barrier (&str); \
6648 if (val == FAIL && ! ISALPHA (*str)) \
6649 goto immediate; \
6650 if (val == FAIL \
6651 /* ISB can only take SY as an option. */ \
6652 || ((inst.instruction & 0xf0) == 0x60 \
6653 && val != 0xf)) \
6654 { \
6655 inst.error = _("invalid barrier type"); \
6656 backtrack_pos = 0; \
6657 goto failure; \
6658 } \
6659 } \
6660 while (0)
6661
6662 skip_whitespace (str);
6663
6664 for (i = 0; upat[i] != OP_stop; i++)
6665 {
6666 op_parse_code = upat[i];
6667 if (op_parse_code >= 1<<16)
6668 op_parse_code = thumb ? (op_parse_code >> 16)
6669 : (op_parse_code & ((1<<16)-1));
6670
6671 if (op_parse_code >= OP_FIRST_OPTIONAL)
6672 {
6673 /* Remember where we are in case we need to backtrack. */
6674 gas_assert (!backtrack_pos);
6675 backtrack_pos = str;
6676 backtrack_error = inst.error;
6677 backtrack_index = i;
6678 }
6679
6680 if (i > 0 && (i > 1 || inst.operands[0].present))
6681 po_char_or_fail (',');
6682
6683 switch (op_parse_code)
6684 {
6685 /* Registers */
6686 case OP_oRRnpc:
6687 case OP_oRRnpcsp:
6688 case OP_RRnpc:
6689 case OP_RRnpcsp:
6690 case OP_oRR:
6691 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6692 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6693 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6694 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6695 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6696 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6697 case OP_oRND:
6698 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6699 case OP_RVC:
6700 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6701 break;
6702 /* Also accept generic coprocessor regs for unknown registers. */
6703 coproc_reg:
6704 po_reg_or_fail (REG_TYPE_CN);
6705 break;
6706 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6707 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6708 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6709 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6710 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6711 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6712 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6713 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6714 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6715 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6716 case OP_oRNQ:
6717 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6718 case OP_oRNDQ:
6719 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6720 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6721 case OP_oRNSDQ:
6722 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6723
6724 /* Neon scalar. Using an element size of 8 means that some invalid
6725 scalars are accepted here, so deal with those in later code. */
6726 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6727
6728 case OP_RNDQ_I0:
6729 {
6730 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6731 break;
6732 try_imm0:
6733 po_imm_or_fail (0, 0, TRUE);
6734 }
6735 break;
6736
6737 case OP_RVSD_I0:
6738 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6739 break;
6740
6741 case OP_RSVD_FI0:
6742 {
6743 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6744 break;
6745 try_ifimm0:
6746 if (parse_ifimm_zero (&str))
6747 inst.operands[i].imm = 0;
6748 else
6749 {
6750 inst.error
6751 = _("only floating point zero is allowed as immediate value");
6752 goto failure;
6753 }
6754 }
6755 break;
6756
6757 case OP_RR_RNSC:
6758 {
6759 po_scalar_or_goto (8, try_rr);
6760 break;
6761 try_rr:
6762 po_reg_or_fail (REG_TYPE_RN);
6763 }
6764 break;
6765
6766 case OP_RNSDQ_RNSC:
6767 {
6768 po_scalar_or_goto (8, try_nsdq);
6769 break;
6770 try_nsdq:
6771 po_reg_or_fail (REG_TYPE_NSDQ);
6772 }
6773 break;
6774
6775 case OP_RNDQ_RNSC:
6776 {
6777 po_scalar_or_goto (8, try_ndq);
6778 break;
6779 try_ndq:
6780 po_reg_or_fail (REG_TYPE_NDQ);
6781 }
6782 break;
6783
6784 case OP_RND_RNSC:
6785 {
6786 po_scalar_or_goto (8, try_vfd);
6787 break;
6788 try_vfd:
6789 po_reg_or_fail (REG_TYPE_VFD);
6790 }
6791 break;
6792
6793 case OP_VMOV:
6794 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6795 not careful then bad things might happen. */
6796 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6797 break;
6798
6799 case OP_RNDQ_Ibig:
6800 {
6801 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6802 break;
6803 try_immbig:
6804 /* There's a possibility of getting a 64-bit immediate here, so
6805 we need special handling. */
6806 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6807 == FAIL)
6808 {
6809 inst.error = _("immediate value is out of range");
6810 goto failure;
6811 }
6812 }
6813 break;
6814
6815 case OP_RNDQ_I63b:
6816 {
6817 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6818 break;
6819 try_shimm:
6820 po_imm_or_fail (0, 63, TRUE);
6821 }
6822 break;
6823
6824 case OP_RRnpcb:
6825 po_char_or_fail ('[');
6826 po_reg_or_fail (REG_TYPE_RN);
6827 po_char_or_fail (']');
6828 break;
6829
6830 case OP_RRnpctw:
6831 case OP_RRw:
6832 case OP_oRRw:
6833 po_reg_or_fail (REG_TYPE_RN);
6834 if (skip_past_char (&str, '!') == SUCCESS)
6835 inst.operands[i].writeback = 1;
6836 break;
6837
6838 /* Immediates */
6839 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6840 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6841 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6842 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6843 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6844 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6845 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6846 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6847 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6848 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6849 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6850 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6851
6852 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6853 case OP_oI7b:
6854 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6855 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6856 case OP_oI31b:
6857 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6858 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6859 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6860 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6861
6862 /* Immediate variants */
6863 case OP_oI255c:
6864 po_char_or_fail ('{');
6865 po_imm_or_fail (0, 255, TRUE);
6866 po_char_or_fail ('}');
6867 break;
6868
6869 case OP_I31w:
6870 /* The expression parser chokes on a trailing !, so we have
6871 to find it first and zap it. */
6872 {
6873 char *s = str;
6874 while (*s && *s != ',')
6875 s++;
6876 if (s[-1] == '!')
6877 {
6878 s[-1] = '\0';
6879 inst.operands[i].writeback = 1;
6880 }
6881 po_imm_or_fail (0, 31, TRUE);
6882 if (str == s - 1)
6883 str = s;
6884 }
6885 break;
6886
6887 /* Expressions */
6888 case OP_EXPi: EXPi:
6889 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6890 GE_OPT_PREFIX));
6891 break;
6892
6893 case OP_EXP:
6894 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6895 GE_NO_PREFIX));
6896 break;
6897
6898 case OP_EXPr: EXPr:
6899 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6900 GE_NO_PREFIX));
6901 if (inst.reloc.exp.X_op == O_symbol)
6902 {
6903 val = parse_reloc (&str);
6904 if (val == -1)
6905 {
6906 inst.error = _("unrecognized relocation suffix");
6907 goto failure;
6908 }
6909 else if (val != BFD_RELOC_UNUSED)
6910 {
6911 inst.operands[i].imm = val;
6912 inst.operands[i].hasreloc = 1;
6913 }
6914 }
6915 break;
6916
6917 /* Operand for MOVW or MOVT. */
6918 case OP_HALF:
6919 po_misc_or_fail (parse_half (&str));
6920 break;
6921
6922 /* Register or expression. */
6923 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6924 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6925
6926 /* Register or immediate. */
6927 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6928 I0: po_imm_or_fail (0, 0, FALSE); break;
6929
6930 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6931 IF:
6932 if (!is_immediate_prefix (*str))
6933 goto bad_args;
6934 str++;
6935 val = parse_fpa_immediate (&str);
6936 if (val == FAIL)
6937 goto failure;
6938 /* FPA immediates are encoded as registers 8-15.
6939 parse_fpa_immediate has already applied the offset. */
6940 inst.operands[i].reg = val;
6941 inst.operands[i].isreg = 1;
6942 break;
6943
6944 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6945 I32z: po_imm_or_fail (0, 32, FALSE); break;
6946
6947 /* Two kinds of register. */
6948 case OP_RIWR_RIWC:
6949 {
6950 struct reg_entry *rege = arm_reg_parse_multi (&str);
6951 if (!rege
6952 || (rege->type != REG_TYPE_MMXWR
6953 && rege->type != REG_TYPE_MMXWC
6954 && rege->type != REG_TYPE_MMXWCG))
6955 {
6956 inst.error = _("iWMMXt data or control register expected");
6957 goto failure;
6958 }
6959 inst.operands[i].reg = rege->number;
6960 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6961 }
6962 break;
6963
6964 case OP_RIWC_RIWG:
6965 {
6966 struct reg_entry *rege = arm_reg_parse_multi (&str);
6967 if (!rege
6968 || (rege->type != REG_TYPE_MMXWC
6969 && rege->type != REG_TYPE_MMXWCG))
6970 {
6971 inst.error = _("iWMMXt control register expected");
6972 goto failure;
6973 }
6974 inst.operands[i].reg = rege->number;
6975 inst.operands[i].isreg = 1;
6976 }
6977 break;
6978
6979 /* Misc */
6980 case OP_CPSF: val = parse_cps_flags (&str); break;
6981 case OP_ENDI: val = parse_endian_specifier (&str); break;
6982 case OP_oROR: val = parse_ror (&str); break;
6983 case OP_COND: val = parse_cond (&str); break;
6984 case OP_oBARRIER_I15:
6985 po_barrier_or_imm (str); break;
6986 immediate:
6987 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6988 goto failure;
6989 break;
6990
6991 case OP_wPSR:
6992 case OP_rPSR:
6993 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6994 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6995 {
6996 inst.error = _("Banked registers are not available with this "
6997 "architecture.");
6998 goto failure;
6999 }
7000 break;
7001 try_psr:
7002 val = parse_psr (&str, op_parse_code == OP_wPSR);
7003 break;
7004
7005 case OP_APSR_RR:
7006 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7007 break;
7008 try_apsr:
7009 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7010 instruction). */
7011 if (strncasecmp (str, "APSR_", 5) == 0)
7012 {
7013 unsigned found = 0;
7014 str += 5;
7015 while (found < 15)
7016 switch (*str++)
7017 {
7018 case 'c': found = (found & 1) ? 16 : found | 1; break;
7019 case 'n': found = (found & 2) ? 16 : found | 2; break;
7020 case 'z': found = (found & 4) ? 16 : found | 4; break;
7021 case 'v': found = (found & 8) ? 16 : found | 8; break;
7022 default: found = 16;
7023 }
7024 if (found != 15)
7025 goto failure;
7026 inst.operands[i].isvec = 1;
7027 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7028 inst.operands[i].reg = REG_PC;
7029 }
7030 else
7031 goto failure;
7032 break;
7033
7034 case OP_TB:
7035 po_misc_or_fail (parse_tb (&str));
7036 break;
7037
7038 /* Register lists. */
7039 case OP_REGLST:
7040 val = parse_reg_list (&str);
7041 if (*str == '^')
7042 {
7043 inst.operands[i].writeback = 1;
7044 str++;
7045 }
7046 break;
7047
7048 case OP_VRSLST:
7049 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7050 break;
7051
7052 case OP_VRDLST:
7053 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7054 break;
7055
7056 case OP_VRSDLST:
7057 /* Allow Q registers too. */
7058 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7059 REGLIST_NEON_D);
7060 if (val == FAIL)
7061 {
7062 inst.error = NULL;
7063 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7064 REGLIST_VFP_S);
7065 inst.operands[i].issingle = 1;
7066 }
7067 break;
7068
7069 case OP_NRDLST:
7070 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7071 REGLIST_NEON_D);
7072 break;
7073
7074 case OP_NSTRLST:
7075 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7076 &inst.operands[i].vectype);
7077 break;
7078
7079 /* Addressing modes */
7080 case OP_ADDR:
7081 po_misc_or_fail (parse_address (&str, i));
7082 break;
7083
7084 case OP_ADDRGLDR:
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str, i, GROUP_LDR));
7087 break;
7088
7089 case OP_ADDRGLDRS:
7090 po_misc_or_fail_no_backtrack (
7091 parse_address_group_reloc (&str, i, GROUP_LDRS));
7092 break;
7093
7094 case OP_ADDRGLDC:
7095 po_misc_or_fail_no_backtrack (
7096 parse_address_group_reloc (&str, i, GROUP_LDC));
7097 break;
7098
7099 case OP_SH:
7100 po_misc_or_fail (parse_shifter_operand (&str, i));
7101 break;
7102
7103 case OP_SHG:
7104 po_misc_or_fail_no_backtrack (
7105 parse_shifter_operand_group_reloc (&str, i));
7106 break;
7107
7108 case OP_oSHll:
7109 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7110 break;
7111
7112 case OP_oSHar:
7113 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7114 break;
7115
7116 case OP_oSHllar:
7117 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7118 break;
7119
7120 default:
7121 as_fatal (_("unhandled operand code %d"), op_parse_code);
7122 }
7123
7124 /* Various value-based sanity checks and shared operations. We
7125 do not signal immediate failures for the register constraints;
7126 this allows a syntax error to take precedence. */
7127 switch (op_parse_code)
7128 {
7129 case OP_oRRnpc:
7130 case OP_RRnpc:
7131 case OP_RRnpcb:
7132 case OP_RRw:
7133 case OP_oRRw:
7134 case OP_RRnpc_I0:
7135 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7136 inst.error = BAD_PC;
7137 break;
7138
7139 case OP_oRRnpcsp:
7140 case OP_RRnpcsp:
7141 if (inst.operands[i].isreg)
7142 {
7143 if (inst.operands[i].reg == REG_PC)
7144 inst.error = BAD_PC;
7145 else if (inst.operands[i].reg == REG_SP)
7146 inst.error = BAD_SP;
7147 }
7148 break;
7149
7150 case OP_RRnpctw:
7151 if (inst.operands[i].isreg
7152 && inst.operands[i].reg == REG_PC
7153 && (inst.operands[i].writeback || thumb))
7154 inst.error = BAD_PC;
7155 break;
7156
7157 case OP_CPSF:
7158 case OP_ENDI:
7159 case OP_oROR:
7160 case OP_wPSR:
7161 case OP_rPSR:
7162 case OP_COND:
7163 case OP_oBARRIER_I15:
7164 case OP_REGLST:
7165 case OP_VRSLST:
7166 case OP_VRDLST:
7167 case OP_VRSDLST:
7168 case OP_NRDLST:
7169 case OP_NSTRLST:
7170 if (val == FAIL)
7171 goto failure;
7172 inst.operands[i].imm = val;
7173 break;
7174
7175 default:
7176 break;
7177 }
7178
7179 /* If we get here, this operand was successfully parsed. */
7180 inst.operands[i].present = 1;
7181 continue;
7182
7183 bad_args:
7184 inst.error = BAD_ARGS;
7185
7186 failure:
7187 if (!backtrack_pos)
7188 {
7189 /* The parse routine should already have set inst.error, but set a
7190 default here just in case. */
7191 if (!inst.error)
7192 inst.error = _("syntax error");
7193 return FAIL;
7194 }
7195
7196 /* Do not backtrack over a trailing optional argument that
7197 absorbed some text. We will only fail again, with the
7198 'garbage following instruction' error message, which is
7199 probably less helpful than the current one. */
7200 if (backtrack_index == i && backtrack_pos != str
7201 && upat[i+1] == OP_stop)
7202 {
7203 if (!inst.error)
7204 inst.error = _("syntax error");
7205 return FAIL;
7206 }
7207
7208 /* Try again, skipping the optional argument at backtrack_pos. */
7209 str = backtrack_pos;
7210 inst.error = backtrack_error;
7211 inst.operands[backtrack_index].present = 0;
7212 i = backtrack_index;
7213 backtrack_pos = 0;
7214 }
7215
7216 /* Check that we have parsed all the arguments. */
7217 if (*str != '\0' && !inst.error)
7218 inst.error = _("garbage following instruction");
7219
7220 return inst.error ? FAIL : SUCCESS;
7221 }
7222
7223 #undef po_char_or_fail
7224 #undef po_reg_or_fail
7225 #undef po_reg_or_goto
7226 #undef po_imm_or_fail
7227 #undef po_scalar_or_fail
7228 #undef po_barrier_or_imm
7229
7230 /* Shorthand macro for instruction encoding functions issuing errors. */
7231 #define constraint(expr, err) \
7232 do \
7233 { \
7234 if (expr) \
7235 { \
7236 inst.error = err; \
7237 return; \
7238 } \
7239 } \
7240 while (0)
7241
7242 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7243 instructions are unpredictable if these registers are used. This
7244 is the BadReg predicate in ARM's Thumb-2 documentation. */
7245 #define reject_bad_reg(reg) \
7246 do \
7247 if (reg == REG_SP || reg == REG_PC) \
7248 { \
7249 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7250 return; \
7251 } \
7252 while (0)
7253
7254 /* If REG is R13 (the stack pointer), warn that its use is
7255 deprecated. */
7256 #define warn_deprecated_sp(reg) \
7257 do \
7258 if (warn_on_deprecated && reg == REG_SP) \
7259 as_tsktsk (_("use of r13 is deprecated")); \
7260 while (0)
7261
7262 /* Functions for operand encoding. ARM, then Thumb. */
7263
7264 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7265
7266 /* If VAL can be encoded in the immediate field of an ARM instruction,
7267 return the encoded form. Otherwise, return FAIL. */
7268
7269 static unsigned int
7270 encode_arm_immediate (unsigned int val)
7271 {
7272 unsigned int a, i;
7273
7274 if (val <= 0xff)
7275 return val;
7276
7277 for (i = 2; i < 32; i += 2)
7278 if ((a = rotate_left (val, i)) <= 0xff)
7279 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7280
7281 return FAIL;
7282 }
7283
7284 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7285 return the encoded form. Otherwise, return FAIL. */
7286 static unsigned int
7287 encode_thumb32_immediate (unsigned int val)
7288 {
7289 unsigned int a, i;
7290
7291 if (val <= 0xff)
7292 return val;
7293
7294 for (i = 1; i <= 24; i++)
7295 {
7296 a = val >> i;
7297 if ((val & ~(0xff << i)) == 0)
7298 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7299 }
7300
7301 a = val & 0xff;
7302 if (val == ((a << 16) | a))
7303 return 0x100 | a;
7304 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7305 return 0x300 | a;
7306
7307 a = val & 0xff00;
7308 if (val == ((a << 16) | a))
7309 return 0x200 | (a >> 8);
7310
7311 return FAIL;
7312 }
7313 /* Encode a VFP SP or DP register number into inst.instruction. */
7314
7315 static void
7316 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7317 {
7318 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7319 && reg > 15)
7320 {
7321 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7322 {
7323 if (thumb_mode)
7324 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7325 fpu_vfp_ext_d32);
7326 else
7327 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7328 fpu_vfp_ext_d32);
7329 }
7330 else
7331 {
7332 first_error (_("D register out of range for selected VFP version"));
7333 return;
7334 }
7335 }
7336
7337 switch (pos)
7338 {
7339 case VFP_REG_Sd:
7340 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7341 break;
7342
7343 case VFP_REG_Sn:
7344 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7345 break;
7346
7347 case VFP_REG_Sm:
7348 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7349 break;
7350
7351 case VFP_REG_Dd:
7352 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7353 break;
7354
7355 case VFP_REG_Dn:
7356 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7357 break;
7358
7359 case VFP_REG_Dm:
7360 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7361 break;
7362
7363 default:
7364 abort ();
7365 }
7366 }
7367
7368 /* Encode a <shift> in an ARM-format instruction. The immediate,
7369 if any, is handled by md_apply_fix. */
7370 static void
7371 encode_arm_shift (int i)
7372 {
7373 if (inst.operands[i].shift_kind == SHIFT_RRX)
7374 inst.instruction |= SHIFT_ROR << 5;
7375 else
7376 {
7377 inst.instruction |= inst.operands[i].shift_kind << 5;
7378 if (inst.operands[i].immisreg)
7379 {
7380 inst.instruction |= SHIFT_BY_REG;
7381 inst.instruction |= inst.operands[i].imm << 8;
7382 }
7383 else
7384 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7385 }
7386 }
7387
7388 static void
7389 encode_arm_shifter_operand (int i)
7390 {
7391 if (inst.operands[i].isreg)
7392 {
7393 inst.instruction |= inst.operands[i].reg;
7394 encode_arm_shift (i);
7395 }
7396 else
7397 {
7398 inst.instruction |= INST_IMMEDIATE;
7399 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7400 inst.instruction |= inst.operands[i].imm;
7401 }
7402 }
7403
7404 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7405 static void
7406 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7407 {
7408 /* PR 14260:
7409 Generate an error if the operand is not a register. */
7410 constraint (!inst.operands[i].isreg,
7411 _("Instruction does not support =N addresses"));
7412
7413 inst.instruction |= inst.operands[i].reg << 16;
7414
7415 if (inst.operands[i].preind)
7416 {
7417 if (is_t)
7418 {
7419 inst.error = _("instruction does not accept preindexed addressing");
7420 return;
7421 }
7422 inst.instruction |= PRE_INDEX;
7423 if (inst.operands[i].writeback)
7424 inst.instruction |= WRITE_BACK;
7425
7426 }
7427 else if (inst.operands[i].postind)
7428 {
7429 gas_assert (inst.operands[i].writeback);
7430 if (is_t)
7431 inst.instruction |= WRITE_BACK;
7432 }
7433 else /* unindexed - only for coprocessor */
7434 {
7435 inst.error = _("instruction does not accept unindexed addressing");
7436 return;
7437 }
7438
7439 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7440 && (((inst.instruction & 0x000f0000) >> 16)
7441 == ((inst.instruction & 0x0000f000) >> 12)))
7442 as_warn ((inst.instruction & LOAD_BIT)
7443 ? _("destination register same as write-back base")
7444 : _("source register same as write-back base"));
7445 }
7446
7447 /* inst.operands[i] was set up by parse_address. Encode it into an
7448 ARM-format mode 2 load or store instruction. If is_t is true,
7449 reject forms that cannot be used with a T instruction (i.e. not
7450 post-indexed). */
7451 static void
7452 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7453 {
7454 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7455
7456 encode_arm_addr_mode_common (i, is_t);
7457
7458 if (inst.operands[i].immisreg)
7459 {
7460 constraint ((inst.operands[i].imm == REG_PC
7461 || (is_pc && inst.operands[i].writeback)),
7462 BAD_PC_ADDRESSING);
7463 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7464 inst.instruction |= inst.operands[i].imm;
7465 if (!inst.operands[i].negative)
7466 inst.instruction |= INDEX_UP;
7467 if (inst.operands[i].shifted)
7468 {
7469 if (inst.operands[i].shift_kind == SHIFT_RRX)
7470 inst.instruction |= SHIFT_ROR << 5;
7471 else
7472 {
7473 inst.instruction |= inst.operands[i].shift_kind << 5;
7474 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7475 }
7476 }
7477 }
7478 else /* immediate offset in inst.reloc */
7479 {
7480 if (is_pc && !inst.reloc.pc_rel)
7481 {
7482 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7483
7484 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7485 cannot use PC in addressing.
7486 PC cannot be used in writeback addressing, either. */
7487 constraint ((is_t || inst.operands[i].writeback),
7488 BAD_PC_ADDRESSING);
7489
7490 /* Use of PC in str is deprecated for ARMv7. */
7491 if (warn_on_deprecated
7492 && !is_load
7493 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7494 as_tsktsk (_("use of PC in this instruction is deprecated"));
7495 }
7496
7497 if (inst.reloc.type == BFD_RELOC_UNUSED)
7498 {
7499 /* Prefer + for zero encoded value. */
7500 if (!inst.operands[i].negative)
7501 inst.instruction |= INDEX_UP;
7502 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7503 }
7504 }
7505 }
7506
7507 /* inst.operands[i] was set up by parse_address. Encode it into an
7508 ARM-format mode 3 load or store instruction. Reject forms that
7509 cannot be used with such instructions. If is_t is true, reject
7510 forms that cannot be used with a T instruction (i.e. not
7511 post-indexed). */
7512 static void
7513 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7514 {
7515 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7516 {
7517 inst.error = _("instruction does not accept scaled register index");
7518 return;
7519 }
7520
7521 encode_arm_addr_mode_common (i, is_t);
7522
7523 if (inst.operands[i].immisreg)
7524 {
7525 constraint ((inst.operands[i].imm == REG_PC
7526 || (is_t && inst.operands[i].reg == REG_PC)),
7527 BAD_PC_ADDRESSING);
7528 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7529 BAD_PC_WRITEBACK);
7530 inst.instruction |= inst.operands[i].imm;
7531 if (!inst.operands[i].negative)
7532 inst.instruction |= INDEX_UP;
7533 }
7534 else /* immediate offset in inst.reloc */
7535 {
7536 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7537 && inst.operands[i].writeback),
7538 BAD_PC_WRITEBACK);
7539 inst.instruction |= HWOFFSET_IMM;
7540 if (inst.reloc.type == BFD_RELOC_UNUSED)
7541 {
7542 /* Prefer + for zero encoded value. */
7543 if (!inst.operands[i].negative)
7544 inst.instruction |= INDEX_UP;
7545
7546 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7547 }
7548 }
7549 }
7550
7551 /* Write immediate bits [7:0] to the following locations:
7552
7553 |28/24|23 19|18 16|15 4|3 0|
7554 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7555
7556 This function is used by VMOV/VMVN/VORR/VBIC. */
7557
7558 static void
7559 neon_write_immbits (unsigned immbits)
7560 {
7561 inst.instruction |= immbits & 0xf;
7562 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7563 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7564 }
7565
7566 /* Invert low-order SIZE bits of XHI:XLO. */
7567
7568 static void
7569 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7570 {
7571 unsigned immlo = xlo ? *xlo : 0;
7572 unsigned immhi = xhi ? *xhi : 0;
7573
7574 switch (size)
7575 {
7576 case 8:
7577 immlo = (~immlo) & 0xff;
7578 break;
7579
7580 case 16:
7581 immlo = (~immlo) & 0xffff;
7582 break;
7583
7584 case 64:
7585 immhi = (~immhi) & 0xffffffff;
7586 /* fall through. */
7587
7588 case 32:
7589 immlo = (~immlo) & 0xffffffff;
7590 break;
7591
7592 default:
7593 abort ();
7594 }
7595
7596 if (xlo)
7597 *xlo = immlo;
7598
7599 if (xhi)
7600 *xhi = immhi;
7601 }
7602
7603 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7604 A, B, C, D. */
7605
7606 static int
7607 neon_bits_same_in_bytes (unsigned imm)
7608 {
7609 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7610 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7611 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7612 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7613 }
7614
7615 /* For immediate of above form, return 0bABCD. */
7616
7617 static unsigned
7618 neon_squash_bits (unsigned imm)
7619 {
7620 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7621 | ((imm & 0x01000000) >> 21);
7622 }
7623
7624 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7625
7626 static unsigned
7627 neon_qfloat_bits (unsigned imm)
7628 {
7629 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7630 }
7631
7632 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7633 the instruction. *OP is passed as the initial value of the op field, and
7634 may be set to a different value depending on the constant (i.e.
7635 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7636 MVN). If the immediate looks like a repeated pattern then also
7637 try smaller element sizes. */
7638
7639 static int
7640 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7641 unsigned *immbits, int *op, int size,
7642 enum neon_el_type type)
7643 {
7644 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7645 float. */
7646 if (type == NT_float && !float_p)
7647 return FAIL;
7648
7649 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7650 {
7651 if (size != 32 || *op == 1)
7652 return FAIL;
7653 *immbits = neon_qfloat_bits (immlo);
7654 return 0xf;
7655 }
7656
7657 if (size == 64)
7658 {
7659 if (neon_bits_same_in_bytes (immhi)
7660 && neon_bits_same_in_bytes (immlo))
7661 {
7662 if (*op == 1)
7663 return FAIL;
7664 *immbits = (neon_squash_bits (immhi) << 4)
7665 | neon_squash_bits (immlo);
7666 *op = 1;
7667 return 0xe;
7668 }
7669
7670 if (immhi != immlo)
7671 return FAIL;
7672 }
7673
7674 if (size >= 32)
7675 {
7676 if (immlo == (immlo & 0x000000ff))
7677 {
7678 *immbits = immlo;
7679 return 0x0;
7680 }
7681 else if (immlo == (immlo & 0x0000ff00))
7682 {
7683 *immbits = immlo >> 8;
7684 return 0x2;
7685 }
7686 else if (immlo == (immlo & 0x00ff0000))
7687 {
7688 *immbits = immlo >> 16;
7689 return 0x4;
7690 }
7691 else if (immlo == (immlo & 0xff000000))
7692 {
7693 *immbits = immlo >> 24;
7694 return 0x6;
7695 }
7696 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7697 {
7698 *immbits = (immlo >> 8) & 0xff;
7699 return 0xc;
7700 }
7701 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7702 {
7703 *immbits = (immlo >> 16) & 0xff;
7704 return 0xd;
7705 }
7706
7707 if ((immlo & 0xffff) != (immlo >> 16))
7708 return FAIL;
7709 immlo &= 0xffff;
7710 }
7711
7712 if (size >= 16)
7713 {
7714 if (immlo == (immlo & 0x000000ff))
7715 {
7716 *immbits = immlo;
7717 return 0x8;
7718 }
7719 else if (immlo == (immlo & 0x0000ff00))
7720 {
7721 *immbits = immlo >> 8;
7722 return 0xa;
7723 }
7724
7725 if ((immlo & 0xff) != (immlo >> 8))
7726 return FAIL;
7727 immlo &= 0xff;
7728 }
7729
7730 if (immlo == (immlo & 0x000000ff))
7731 {
7732 /* Don't allow MVN with 8-bit immediate. */
7733 if (*op == 1)
7734 return FAIL;
7735 *immbits = immlo;
7736 return 0xe;
7737 }
7738
7739 return FAIL;
7740 }
7741
7742 #if defined BFD_HOST_64_BIT
7743 /* Returns TRUE if double precision value V may be cast
7744 to single precision without loss of accuracy. */
7745
7746 static bfd_boolean
7747 is_double_a_single (bfd_int64_t v)
7748 {
7749 int exp = (int)((v >> 52) & 0x7FF);
7750 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7751
7752 return (exp == 0 || exp == 0x7FF
7753 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7754 && (mantissa & 0x1FFFFFFFl) == 0;
7755 }
7756
7757 /* Returns a double precision value casted to single precision
7758 (ignoring the least significant bits in exponent and mantissa). */
7759
7760 static int
7761 double_to_single (bfd_int64_t v)
7762 {
7763 int sign = (int) ((v >> 63) & 1l);
7764 int exp = (int) ((v >> 52) & 0x7FF);
7765 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7766
7767 if (exp == 0x7FF)
7768 exp = 0xFF;
7769 else
7770 {
7771 exp = exp - 1023 + 127;
7772 if (exp >= 0xFF)
7773 {
7774 /* Infinity. */
7775 exp = 0x7F;
7776 mantissa = 0;
7777 }
7778 else if (exp < 0)
7779 {
7780 /* No denormalized numbers. */
7781 exp = 0;
7782 mantissa = 0;
7783 }
7784 }
7785 mantissa >>= 29;
7786 return (sign << 31) | (exp << 23) | mantissa;
7787 }
7788 #endif /* BFD_HOST_64_BIT */
7789
7790 enum lit_type
7791 {
7792 CONST_THUMB,
7793 CONST_ARM,
7794 CONST_VEC
7795 };
7796
7797 static void do_vfp_nsyn_opcode (const char *);
7798
7799 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7800 Determine whether it can be performed with a move instruction; if
7801 it can, convert inst.instruction to that move instruction and
7802 return TRUE; if it can't, convert inst.instruction to a literal-pool
7803 load and return FALSE. If this is not a valid thing to do in the
7804 current context, set inst.error and return TRUE.
7805
7806 inst.operands[i] describes the destination register. */
7807
7808 static bfd_boolean
7809 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7810 {
7811 unsigned long tbit;
7812 bfd_boolean thumb_p = (t == CONST_THUMB);
7813 bfd_boolean arm_p = (t == CONST_ARM);
7814
7815 if (thumb_p)
7816 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7817 else
7818 tbit = LOAD_BIT;
7819
7820 if ((inst.instruction & tbit) == 0)
7821 {
7822 inst.error = _("invalid pseudo operation");
7823 return TRUE;
7824 }
7825
7826 if (inst.reloc.exp.X_op != O_constant
7827 && inst.reloc.exp.X_op != O_symbol
7828 && inst.reloc.exp.X_op != O_big)
7829 {
7830 inst.error = _("constant expression expected");
7831 return TRUE;
7832 }
7833
7834 if (inst.reloc.exp.X_op == O_constant
7835 || inst.reloc.exp.X_op == O_big)
7836 {
7837 #if defined BFD_HOST_64_BIT
7838 bfd_int64_t v;
7839 #else
7840 offsetT v;
7841 #endif
7842 if (inst.reloc.exp.X_op == O_big)
7843 {
7844 LITTLENUM_TYPE w[X_PRECISION];
7845 LITTLENUM_TYPE * l;
7846
7847 if (inst.reloc.exp.X_add_number == -1)
7848 {
7849 gen_to_words (w, X_PRECISION, E_PRECISION);
7850 l = w;
7851 /* FIXME: Should we check words w[2..5] ? */
7852 }
7853 else
7854 l = generic_bignum;
7855
7856 #if defined BFD_HOST_64_BIT
7857 v =
7858 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
7859 << LITTLENUM_NUMBER_OF_BITS)
7860 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
7861 << LITTLENUM_NUMBER_OF_BITS)
7862 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
7863 << LITTLENUM_NUMBER_OF_BITS)
7864 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
7865 #else
7866 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
7867 | (l[0] & LITTLENUM_MASK);
7868 #endif
7869 }
7870 else
7871 v = inst.reloc.exp.X_add_number;
7872
7873 if (!inst.operands[i].issingle)
7874 {
7875 if (thumb_p)
7876 {
7877 /* This can be encoded only for a low register. */
7878 if ((v & ~0xFF) == 0 && (inst.operands[i].reg < 8))
7879 {
7880 /* This can be done with a mov(1) instruction. */
7881 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7882 inst.instruction |= v;
7883 return TRUE;
7884 }
7885
7886 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
7887 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7888 {
7889 /* Check if on thumb2 it can be done with a mov.w, mvn or
7890 movw instruction. */
7891 unsigned int newimm;
7892 bfd_boolean isNegated;
7893
7894 newimm = encode_thumb32_immediate (v);
7895 if (newimm != (unsigned int) FAIL)
7896 isNegated = FALSE;
7897 else
7898 {
7899 newimm = encode_thumb32_immediate (~v);
7900 if (newimm != (unsigned int) FAIL)
7901 isNegated = TRUE;
7902 }
7903
7904 /* The number can be loaded with a mov.w or mvn
7905 instruction. */
7906 if (newimm != (unsigned int) FAIL
7907 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
7908 {
7909 inst.instruction = (0xf04f0000 /* MOV.W. */
7910 | (inst.operands[i].reg << 8));
7911 /* Change to MOVN. */
7912 inst.instruction |= (isNegated ? 0x200000 : 0);
7913 inst.instruction |= (newimm & 0x800) << 15;
7914 inst.instruction |= (newimm & 0x700) << 4;
7915 inst.instruction |= (newimm & 0x0ff);
7916 return TRUE;
7917 }
7918 /* The number can be loaded with a movw instruction. */
7919 else if ((v & ~0xFFFF) == 0
7920 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
7921 {
7922 int imm = v & 0xFFFF;
7923
7924 inst.instruction = 0xf2400000; /* MOVW. */
7925 inst.instruction |= (inst.operands[i].reg << 8);
7926 inst.instruction |= (imm & 0xf000) << 4;
7927 inst.instruction |= (imm & 0x0800) << 15;
7928 inst.instruction |= (imm & 0x0700) << 4;
7929 inst.instruction |= (imm & 0x00ff);
7930 return TRUE;
7931 }
7932 }
7933 }
7934 else if (arm_p)
7935 {
7936 int value = encode_arm_immediate (v);
7937
7938 if (value != FAIL)
7939 {
7940 /* This can be done with a mov instruction. */
7941 inst.instruction &= LITERAL_MASK;
7942 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7943 inst.instruction |= value & 0xfff;
7944 return TRUE;
7945 }
7946
7947 value = encode_arm_immediate (~ v);
7948 if (value != FAIL)
7949 {
7950 /* This can be done with a mvn instruction. */
7951 inst.instruction &= LITERAL_MASK;
7952 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7953 inst.instruction |= value & 0xfff;
7954 return TRUE;
7955 }
7956 }
7957 else if (t == CONST_VEC)
7958 {
7959 int op = 0;
7960 unsigned immbits = 0;
7961 unsigned immlo = inst.operands[1].imm;
7962 unsigned immhi = inst.operands[1].regisimm
7963 ? inst.operands[1].reg
7964 : inst.reloc.exp.X_unsigned
7965 ? 0
7966 : ((bfd_int64_t)((int) immlo)) >> 32;
7967 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7968 &op, 64, NT_invtype);
7969
7970 if (cmode == FAIL)
7971 {
7972 neon_invert_size (&immlo, &immhi, 64);
7973 op = !op;
7974 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7975 &op, 64, NT_invtype);
7976 }
7977
7978 if (cmode != FAIL)
7979 {
7980 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7981 | (1 << 23)
7982 | (cmode << 8)
7983 | (op << 5)
7984 | (1 << 4);
7985
7986 /* Fill other bits in vmov encoding for both thumb and arm. */
7987 if (thumb_mode)
7988 inst.instruction |= (0x7U << 29) | (0xF << 24);
7989 else
7990 inst.instruction |= (0xFU << 28) | (0x1 << 25);
7991 neon_write_immbits (immbits);
7992 return TRUE;
7993 }
7994 }
7995 }
7996
7997 if (t == CONST_VEC)
7998 {
7999 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8000 if (inst.operands[i].issingle
8001 && is_quarter_float (inst.operands[1].imm)
8002 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8003 {
8004 inst.operands[1].imm =
8005 neon_qfloat_bits (v);
8006 do_vfp_nsyn_opcode ("fconsts");
8007 return TRUE;
8008 }
8009
8010 /* If our host does not support a 64-bit type then we cannot perform
8011 the following optimization. This mean that there will be a
8012 discrepancy between the output produced by an assembler built for
8013 a 32-bit-only host and the output produced from a 64-bit host, but
8014 this cannot be helped. */
8015 #if defined BFD_HOST_64_BIT
8016 else if (!inst.operands[1].issingle
8017 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8018 {
8019 if (is_double_a_single (v)
8020 && is_quarter_float (double_to_single (v)))
8021 {
8022 inst.operands[1].imm =
8023 neon_qfloat_bits (double_to_single (v));
8024 do_vfp_nsyn_opcode ("fconstd");
8025 return TRUE;
8026 }
8027 }
8028 #endif
8029 }
8030 }
8031
8032 if (add_to_lit_pool ((!inst.operands[i].isvec
8033 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8034 return TRUE;
8035
8036 inst.operands[1].reg = REG_PC;
8037 inst.operands[1].isreg = 1;
8038 inst.operands[1].preind = 1;
8039 inst.reloc.pc_rel = 1;
8040 inst.reloc.type = (thumb_p
8041 ? BFD_RELOC_ARM_THUMB_OFFSET
8042 : (mode_3
8043 ? BFD_RELOC_ARM_HWLITERAL
8044 : BFD_RELOC_ARM_LITERAL));
8045 return FALSE;
8046 }
8047
8048 /* inst.operands[i] was set up by parse_address. Encode it into an
8049 ARM-format instruction. Reject all forms which cannot be encoded
8050 into a coprocessor load/store instruction. If wb_ok is false,
8051 reject use of writeback; if unind_ok is false, reject use of
8052 unindexed addressing. If reloc_override is not 0, use it instead
8053 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8054 (in which case it is preserved). */
8055
8056 static int
8057 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8058 {
8059 if (!inst.operands[i].isreg)
8060 {
8061 /* PR 18256 */
8062 if (! inst.operands[0].isvec)
8063 {
8064 inst.error = _("invalid co-processor operand");
8065 return FAIL;
8066 }
8067 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8068 return SUCCESS;
8069 }
8070
8071 inst.instruction |= inst.operands[i].reg << 16;
8072
8073 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8074
8075 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8076 {
8077 gas_assert (!inst.operands[i].writeback);
8078 if (!unind_ok)
8079 {
8080 inst.error = _("instruction does not support unindexed addressing");
8081 return FAIL;
8082 }
8083 inst.instruction |= inst.operands[i].imm;
8084 inst.instruction |= INDEX_UP;
8085 return SUCCESS;
8086 }
8087
8088 if (inst.operands[i].preind)
8089 inst.instruction |= PRE_INDEX;
8090
8091 if (inst.operands[i].writeback)
8092 {
8093 if (inst.operands[i].reg == REG_PC)
8094 {
8095 inst.error = _("pc may not be used with write-back");
8096 return FAIL;
8097 }
8098 if (!wb_ok)
8099 {
8100 inst.error = _("instruction does not support writeback");
8101 return FAIL;
8102 }
8103 inst.instruction |= WRITE_BACK;
8104 }
8105
8106 if (reloc_override)
8107 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8108 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8109 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8110 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8111 {
8112 if (thumb_mode)
8113 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8114 else
8115 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8116 }
8117
8118 /* Prefer + for zero encoded value. */
8119 if (!inst.operands[i].negative)
8120 inst.instruction |= INDEX_UP;
8121
8122 return SUCCESS;
8123 }
8124
8125 /* Functions for instruction encoding, sorted by sub-architecture.
8126 First some generics; their names are taken from the conventional
8127 bit positions for register arguments in ARM format instructions. */
8128
8129 static void
8130 do_noargs (void)
8131 {
8132 }
8133
8134 static void
8135 do_rd (void)
8136 {
8137 inst.instruction |= inst.operands[0].reg << 12;
8138 }
8139
8140 static void
8141 do_rd_rm (void)
8142 {
8143 inst.instruction |= inst.operands[0].reg << 12;
8144 inst.instruction |= inst.operands[1].reg;
8145 }
8146
8147 static void
8148 do_rm_rn (void)
8149 {
8150 inst.instruction |= inst.operands[0].reg;
8151 inst.instruction |= inst.operands[1].reg << 16;
8152 }
8153
8154 static void
8155 do_rd_rn (void)
8156 {
8157 inst.instruction |= inst.operands[0].reg << 12;
8158 inst.instruction |= inst.operands[1].reg << 16;
8159 }
8160
8161 static void
8162 do_rn_rd (void)
8163 {
8164 inst.instruction |= inst.operands[0].reg << 16;
8165 inst.instruction |= inst.operands[1].reg << 12;
8166 }
8167
8168 static void
8169 do_tt (void)
8170 {
8171 inst.instruction |= inst.operands[0].reg << 8;
8172 inst.instruction |= inst.operands[1].reg << 16;
8173 }
8174
8175 static bfd_boolean
8176 check_obsolete (const arm_feature_set *feature, const char *msg)
8177 {
8178 if (ARM_CPU_IS_ANY (cpu_variant))
8179 {
8180 as_tsktsk ("%s", msg);
8181 return TRUE;
8182 }
8183 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8184 {
8185 as_bad ("%s", msg);
8186 return TRUE;
8187 }
8188
8189 return FALSE;
8190 }
8191
8192 static void
8193 do_rd_rm_rn (void)
8194 {
8195 unsigned Rn = inst.operands[2].reg;
8196 /* Enforce restrictions on SWP instruction. */
8197 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8198 {
8199 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8200 _("Rn must not overlap other operands"));
8201
8202 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8203 */
8204 if (!check_obsolete (&arm_ext_v8,
8205 _("swp{b} use is obsoleted for ARMv8 and later"))
8206 && warn_on_deprecated
8207 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8208 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8209 }
8210
8211 inst.instruction |= inst.operands[0].reg << 12;
8212 inst.instruction |= inst.operands[1].reg;
8213 inst.instruction |= Rn << 16;
8214 }
8215
8216 static void
8217 do_rd_rn_rm (void)
8218 {
8219 inst.instruction |= inst.operands[0].reg << 12;
8220 inst.instruction |= inst.operands[1].reg << 16;
8221 inst.instruction |= inst.operands[2].reg;
8222 }
8223
8224 static void
8225 do_rm_rd_rn (void)
8226 {
8227 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8228 constraint (((inst.reloc.exp.X_op != O_constant
8229 && inst.reloc.exp.X_op != O_illegal)
8230 || inst.reloc.exp.X_add_number != 0),
8231 BAD_ADDR_MODE);
8232 inst.instruction |= inst.operands[0].reg;
8233 inst.instruction |= inst.operands[1].reg << 12;
8234 inst.instruction |= inst.operands[2].reg << 16;
8235 }
8236
8237 static void
8238 do_imm0 (void)
8239 {
8240 inst.instruction |= inst.operands[0].imm;
8241 }
8242
8243 static void
8244 do_rd_cpaddr (void)
8245 {
8246 inst.instruction |= inst.operands[0].reg << 12;
8247 encode_arm_cp_address (1, TRUE, TRUE, 0);
8248 }
8249
8250 /* ARM instructions, in alphabetical order by function name (except
8251 that wrapper functions appear immediately after the function they
8252 wrap). */
8253
8254 /* This is a pseudo-op of the form "adr rd, label" to be converted
8255 into a relative address of the form "add rd, pc, #label-.-8". */
8256
8257 static void
8258 do_adr (void)
8259 {
8260 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8261
8262 /* Frag hacking will turn this into a sub instruction if the offset turns
8263 out to be negative. */
8264 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8265 inst.reloc.pc_rel = 1;
8266 inst.reloc.exp.X_add_number -= 8;
8267 }
8268
8269 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8270 into a relative address of the form:
8271 add rd, pc, #low(label-.-8)"
8272 add rd, rd, #high(label-.-8)" */
8273
8274 static void
8275 do_adrl (void)
8276 {
8277 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8278
8279 /* Frag hacking will turn this into a sub instruction if the offset turns
8280 out to be negative. */
8281 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8282 inst.reloc.pc_rel = 1;
8283 inst.size = INSN_SIZE * 2;
8284 inst.reloc.exp.X_add_number -= 8;
8285 }
8286
8287 static void
8288 do_arit (void)
8289 {
8290 if (!inst.operands[1].present)
8291 inst.operands[1].reg = inst.operands[0].reg;
8292 inst.instruction |= inst.operands[0].reg << 12;
8293 inst.instruction |= inst.operands[1].reg << 16;
8294 encode_arm_shifter_operand (2);
8295 }
8296
8297 static void
8298 do_barrier (void)
8299 {
8300 if (inst.operands[0].present)
8301 inst.instruction |= inst.operands[0].imm;
8302 else
8303 inst.instruction |= 0xf;
8304 }
8305
8306 static void
8307 do_bfc (void)
8308 {
8309 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8310 constraint (msb > 32, _("bit-field extends past end of register"));
8311 /* The instruction encoding stores the LSB and MSB,
8312 not the LSB and width. */
8313 inst.instruction |= inst.operands[0].reg << 12;
8314 inst.instruction |= inst.operands[1].imm << 7;
8315 inst.instruction |= (msb - 1) << 16;
8316 }
8317
8318 static void
8319 do_bfi (void)
8320 {
8321 unsigned int msb;
8322
8323 /* #0 in second position is alternative syntax for bfc, which is
8324 the same instruction but with REG_PC in the Rm field. */
8325 if (!inst.operands[1].isreg)
8326 inst.operands[1].reg = REG_PC;
8327
8328 msb = inst.operands[2].imm + inst.operands[3].imm;
8329 constraint (msb > 32, _("bit-field extends past end of register"));
8330 /* The instruction encoding stores the LSB and MSB,
8331 not the LSB and width. */
8332 inst.instruction |= inst.operands[0].reg << 12;
8333 inst.instruction |= inst.operands[1].reg;
8334 inst.instruction |= inst.operands[2].imm << 7;
8335 inst.instruction |= (msb - 1) << 16;
8336 }
8337
8338 static void
8339 do_bfx (void)
8340 {
8341 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8342 _("bit-field extends past end of register"));
8343 inst.instruction |= inst.operands[0].reg << 12;
8344 inst.instruction |= inst.operands[1].reg;
8345 inst.instruction |= inst.operands[2].imm << 7;
8346 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8347 }
8348
8349 /* ARM V5 breakpoint instruction (argument parse)
8350 BKPT <16 bit unsigned immediate>
8351 Instruction is not conditional.
8352 The bit pattern given in insns[] has the COND_ALWAYS condition,
8353 and it is an error if the caller tried to override that. */
8354
8355 static void
8356 do_bkpt (void)
8357 {
8358 /* Top 12 of 16 bits to bits 19:8. */
8359 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8360
8361 /* Bottom 4 of 16 bits to bits 3:0. */
8362 inst.instruction |= inst.operands[0].imm & 0xf;
8363 }
8364
8365 static void
8366 encode_branch (int default_reloc)
8367 {
8368 if (inst.operands[0].hasreloc)
8369 {
8370 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8371 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8372 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8373 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8374 ? BFD_RELOC_ARM_PLT32
8375 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8376 }
8377 else
8378 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8379 inst.reloc.pc_rel = 1;
8380 }
8381
8382 static void
8383 do_branch (void)
8384 {
8385 #ifdef OBJ_ELF
8386 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8387 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8388 else
8389 #endif
8390 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8391 }
8392
8393 static void
8394 do_bl (void)
8395 {
8396 #ifdef OBJ_ELF
8397 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8398 {
8399 if (inst.cond == COND_ALWAYS)
8400 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8401 else
8402 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8403 }
8404 else
8405 #endif
8406 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8407 }
8408
8409 /* ARM V5 branch-link-exchange instruction (argument parse)
8410 BLX <target_addr> ie BLX(1)
8411 BLX{<condition>} <Rm> ie BLX(2)
8412 Unfortunately, there are two different opcodes for this mnemonic.
8413 So, the insns[].value is not used, and the code here zaps values
8414 into inst.instruction.
8415 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8416
8417 static void
8418 do_blx (void)
8419 {
8420 if (inst.operands[0].isreg)
8421 {
8422 /* Arg is a register; the opcode provided by insns[] is correct.
8423 It is not illegal to do "blx pc", just useless. */
8424 if (inst.operands[0].reg == REG_PC)
8425 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8426
8427 inst.instruction |= inst.operands[0].reg;
8428 }
8429 else
8430 {
8431 /* Arg is an address; this instruction cannot be executed
8432 conditionally, and the opcode must be adjusted.
8433 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8434 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8435 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8436 inst.instruction = 0xfa000000;
8437 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8438 }
8439 }
8440
8441 static void
8442 do_bx (void)
8443 {
8444 bfd_boolean want_reloc;
8445
8446 if (inst.operands[0].reg == REG_PC)
8447 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8448
8449 inst.instruction |= inst.operands[0].reg;
8450 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8451 it is for ARMv4t or earlier. */
8452 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8453 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8454 want_reloc = TRUE;
8455
8456 #ifdef OBJ_ELF
8457 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8458 #endif
8459 want_reloc = FALSE;
8460
8461 if (want_reloc)
8462 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8463 }
8464
8465
8466 /* ARM v5TEJ. Jump to Jazelle code. */
8467
8468 static void
8469 do_bxj (void)
8470 {
8471 if (inst.operands[0].reg == REG_PC)
8472 as_tsktsk (_("use of r15 in bxj is not really useful"));
8473
8474 inst.instruction |= inst.operands[0].reg;
8475 }
8476
8477 /* Co-processor data operation:
8478 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8479 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8480 static void
8481 do_cdp (void)
8482 {
8483 inst.instruction |= inst.operands[0].reg << 8;
8484 inst.instruction |= inst.operands[1].imm << 20;
8485 inst.instruction |= inst.operands[2].reg << 12;
8486 inst.instruction |= inst.operands[3].reg << 16;
8487 inst.instruction |= inst.operands[4].reg;
8488 inst.instruction |= inst.operands[5].imm << 5;
8489 }
8490
8491 static void
8492 do_cmp (void)
8493 {
8494 inst.instruction |= inst.operands[0].reg << 16;
8495 encode_arm_shifter_operand (1);
8496 }
8497
8498 /* Transfer between coprocessor and ARM registers.
8499 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8500 MRC2
8501 MCR{cond}
8502 MCR2
8503
8504 No special properties. */
8505
8506 struct deprecated_coproc_regs_s
8507 {
8508 unsigned cp;
8509 int opc1;
8510 unsigned crn;
8511 unsigned crm;
8512 int opc2;
8513 arm_feature_set deprecated;
8514 arm_feature_set obsoleted;
8515 const char *dep_msg;
8516 const char *obs_msg;
8517 };
8518
8519 #define DEPR_ACCESS_V8 \
8520 N_("This coprocessor register access is deprecated in ARMv8")
8521
8522 /* Table of all deprecated coprocessor registers. */
8523 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8524 {
8525 {15, 0, 7, 10, 5, /* CP15DMB. */
8526 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8527 DEPR_ACCESS_V8, NULL},
8528 {15, 0, 7, 10, 4, /* CP15DSB. */
8529 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8530 DEPR_ACCESS_V8, NULL},
8531 {15, 0, 7, 5, 4, /* CP15ISB. */
8532 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8533 DEPR_ACCESS_V8, NULL},
8534 {14, 6, 1, 0, 0, /* TEEHBR. */
8535 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8536 DEPR_ACCESS_V8, NULL},
8537 {14, 6, 0, 0, 0, /* TEECR. */
8538 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8539 DEPR_ACCESS_V8, NULL},
8540 };
8541
8542 #undef DEPR_ACCESS_V8
8543
8544 static const size_t deprecated_coproc_reg_count =
8545 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8546
8547 static void
8548 do_co_reg (void)
8549 {
8550 unsigned Rd;
8551 size_t i;
8552
8553 Rd = inst.operands[2].reg;
8554 if (thumb_mode)
8555 {
8556 if (inst.instruction == 0xee000010
8557 || inst.instruction == 0xfe000010)
8558 /* MCR, MCR2 */
8559 reject_bad_reg (Rd);
8560 else
8561 /* MRC, MRC2 */
8562 constraint (Rd == REG_SP, BAD_SP);
8563 }
8564 else
8565 {
8566 /* MCR */
8567 if (inst.instruction == 0xe000010)
8568 constraint (Rd == REG_PC, BAD_PC);
8569 }
8570
8571 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8572 {
8573 const struct deprecated_coproc_regs_s *r =
8574 deprecated_coproc_regs + i;
8575
8576 if (inst.operands[0].reg == r->cp
8577 && inst.operands[1].imm == r->opc1
8578 && inst.operands[3].reg == r->crn
8579 && inst.operands[4].reg == r->crm
8580 && inst.operands[5].imm == r->opc2)
8581 {
8582 if (! ARM_CPU_IS_ANY (cpu_variant)
8583 && warn_on_deprecated
8584 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8585 as_tsktsk ("%s", r->dep_msg);
8586 }
8587 }
8588
8589 inst.instruction |= inst.operands[0].reg << 8;
8590 inst.instruction |= inst.operands[1].imm << 21;
8591 inst.instruction |= Rd << 12;
8592 inst.instruction |= inst.operands[3].reg << 16;
8593 inst.instruction |= inst.operands[4].reg;
8594 inst.instruction |= inst.operands[5].imm << 5;
8595 }
8596
8597 /* Transfer between coprocessor register and pair of ARM registers.
8598 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8599 MCRR2
8600 MRRC{cond}
8601 MRRC2
8602
8603 Two XScale instructions are special cases of these:
8604
8605 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8606 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8607
8608 Result unpredictable if Rd or Rn is R15. */
8609
8610 static void
8611 do_co_reg2c (void)
8612 {
8613 unsigned Rd, Rn;
8614
8615 Rd = inst.operands[2].reg;
8616 Rn = inst.operands[3].reg;
8617
8618 if (thumb_mode)
8619 {
8620 reject_bad_reg (Rd);
8621 reject_bad_reg (Rn);
8622 }
8623 else
8624 {
8625 constraint (Rd == REG_PC, BAD_PC);
8626 constraint (Rn == REG_PC, BAD_PC);
8627 }
8628
8629 inst.instruction |= inst.operands[0].reg << 8;
8630 inst.instruction |= inst.operands[1].imm << 4;
8631 inst.instruction |= Rd << 12;
8632 inst.instruction |= Rn << 16;
8633 inst.instruction |= inst.operands[4].reg;
8634 }
8635
8636 static void
8637 do_cpsi (void)
8638 {
8639 inst.instruction |= inst.operands[0].imm << 6;
8640 if (inst.operands[1].present)
8641 {
8642 inst.instruction |= CPSI_MMOD;
8643 inst.instruction |= inst.operands[1].imm;
8644 }
8645 }
8646
8647 static void
8648 do_dbg (void)
8649 {
8650 inst.instruction |= inst.operands[0].imm;
8651 }
8652
8653 static void
8654 do_div (void)
8655 {
8656 unsigned Rd, Rn, Rm;
8657
8658 Rd = inst.operands[0].reg;
8659 Rn = (inst.operands[1].present
8660 ? inst.operands[1].reg : Rd);
8661 Rm = inst.operands[2].reg;
8662
8663 constraint ((Rd == REG_PC), BAD_PC);
8664 constraint ((Rn == REG_PC), BAD_PC);
8665 constraint ((Rm == REG_PC), BAD_PC);
8666
8667 inst.instruction |= Rd << 16;
8668 inst.instruction |= Rn << 0;
8669 inst.instruction |= Rm << 8;
8670 }
8671
8672 static void
8673 do_it (void)
8674 {
8675 /* There is no IT instruction in ARM mode. We
8676 process it to do the validation as if in
8677 thumb mode, just in case the code gets
8678 assembled for thumb using the unified syntax. */
8679
8680 inst.size = 0;
8681 if (unified_syntax)
8682 {
8683 set_it_insn_type (IT_INSN);
8684 now_it.mask = (inst.instruction & 0xf) | 0x10;
8685 now_it.cc = inst.operands[0].imm;
8686 }
8687 }
8688
8689 /* If there is only one register in the register list,
8690 then return its register number. Otherwise return -1. */
8691 static int
8692 only_one_reg_in_list (int range)
8693 {
8694 int i = ffs (range) - 1;
8695 return (i > 15 || range != (1 << i)) ? -1 : i;
8696 }
8697
8698 static void
8699 encode_ldmstm(int from_push_pop_mnem)
8700 {
8701 int base_reg = inst.operands[0].reg;
8702 int range = inst.operands[1].imm;
8703 int one_reg;
8704
8705 inst.instruction |= base_reg << 16;
8706 inst.instruction |= range;
8707
8708 if (inst.operands[1].writeback)
8709 inst.instruction |= LDM_TYPE_2_OR_3;
8710
8711 if (inst.operands[0].writeback)
8712 {
8713 inst.instruction |= WRITE_BACK;
8714 /* Check for unpredictable uses of writeback. */
8715 if (inst.instruction & LOAD_BIT)
8716 {
8717 /* Not allowed in LDM type 2. */
8718 if ((inst.instruction & LDM_TYPE_2_OR_3)
8719 && ((range & (1 << REG_PC)) == 0))
8720 as_warn (_("writeback of base register is UNPREDICTABLE"));
8721 /* Only allowed if base reg not in list for other types. */
8722 else if (range & (1 << base_reg))
8723 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8724 }
8725 else /* STM. */
8726 {
8727 /* Not allowed for type 2. */
8728 if (inst.instruction & LDM_TYPE_2_OR_3)
8729 as_warn (_("writeback of base register is UNPREDICTABLE"));
8730 /* Only allowed if base reg not in list, or first in list. */
8731 else if ((range & (1 << base_reg))
8732 && (range & ((1 << base_reg) - 1)))
8733 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8734 }
8735 }
8736
8737 /* If PUSH/POP has only one register, then use the A2 encoding. */
8738 one_reg = only_one_reg_in_list (range);
8739 if (from_push_pop_mnem && one_reg >= 0)
8740 {
8741 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8742
8743 inst.instruction &= A_COND_MASK;
8744 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8745 inst.instruction |= one_reg << 12;
8746 }
8747 }
8748
8749 static void
8750 do_ldmstm (void)
8751 {
8752 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8753 }
8754
8755 /* ARMv5TE load-consecutive (argument parse)
8756 Mode is like LDRH.
8757
8758 LDRccD R, mode
8759 STRccD R, mode. */
8760
8761 static void
8762 do_ldrd (void)
8763 {
8764 constraint (inst.operands[0].reg % 2 != 0,
8765 _("first transfer register must be even"));
8766 constraint (inst.operands[1].present
8767 && inst.operands[1].reg != inst.operands[0].reg + 1,
8768 _("can only transfer two consecutive registers"));
8769 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8770 constraint (!inst.operands[2].isreg, _("'[' expected"));
8771
8772 if (!inst.operands[1].present)
8773 inst.operands[1].reg = inst.operands[0].reg + 1;
8774
8775 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8776 register and the first register written; we have to diagnose
8777 overlap between the base and the second register written here. */
8778
8779 if (inst.operands[2].reg == inst.operands[1].reg
8780 && (inst.operands[2].writeback || inst.operands[2].postind))
8781 as_warn (_("base register written back, and overlaps "
8782 "second transfer register"));
8783
8784 if (!(inst.instruction & V4_STR_BIT))
8785 {
8786 /* For an index-register load, the index register must not overlap the
8787 destination (even if not write-back). */
8788 if (inst.operands[2].immisreg
8789 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8790 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8791 as_warn (_("index register overlaps transfer register"));
8792 }
8793 inst.instruction |= inst.operands[0].reg << 12;
8794 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8795 }
8796
8797 static void
8798 do_ldrex (void)
8799 {
8800 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8801 || inst.operands[1].postind || inst.operands[1].writeback
8802 || inst.operands[1].immisreg || inst.operands[1].shifted
8803 || inst.operands[1].negative
8804 /* This can arise if the programmer has written
8805 strex rN, rM, foo
8806 or if they have mistakenly used a register name as the last
8807 operand, eg:
8808 strex rN, rM, rX
8809 It is very difficult to distinguish between these two cases
8810 because "rX" might actually be a label. ie the register
8811 name has been occluded by a symbol of the same name. So we
8812 just generate a general 'bad addressing mode' type error
8813 message and leave it up to the programmer to discover the
8814 true cause and fix their mistake. */
8815 || (inst.operands[1].reg == REG_PC),
8816 BAD_ADDR_MODE);
8817
8818 constraint (inst.reloc.exp.X_op != O_constant
8819 || inst.reloc.exp.X_add_number != 0,
8820 _("offset must be zero in ARM encoding"));
8821
8822 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8823
8824 inst.instruction |= inst.operands[0].reg << 12;
8825 inst.instruction |= inst.operands[1].reg << 16;
8826 inst.reloc.type = BFD_RELOC_UNUSED;
8827 }
8828
8829 static void
8830 do_ldrexd (void)
8831 {
8832 constraint (inst.operands[0].reg % 2 != 0,
8833 _("even register required"));
8834 constraint (inst.operands[1].present
8835 && inst.operands[1].reg != inst.operands[0].reg + 1,
8836 _("can only load two consecutive registers"));
8837 /* If op 1 were present and equal to PC, this function wouldn't
8838 have been called in the first place. */
8839 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8840
8841 inst.instruction |= inst.operands[0].reg << 12;
8842 inst.instruction |= inst.operands[2].reg << 16;
8843 }
8844
8845 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8846 which is not a multiple of four is UNPREDICTABLE. */
8847 static void
8848 check_ldr_r15_aligned (void)
8849 {
8850 constraint (!(inst.operands[1].immisreg)
8851 && (inst.operands[0].reg == REG_PC
8852 && inst.operands[1].reg == REG_PC
8853 && (inst.reloc.exp.X_add_number & 0x3)),
8854 _("ldr to register 15 must be 4-byte alligned"));
8855 }
8856
8857 static void
8858 do_ldst (void)
8859 {
8860 inst.instruction |= inst.operands[0].reg << 12;
8861 if (!inst.operands[1].isreg)
8862 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8863 return;
8864 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8865 check_ldr_r15_aligned ();
8866 }
8867
8868 static void
8869 do_ldstt (void)
8870 {
8871 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8872 reject [Rn,...]. */
8873 if (inst.operands[1].preind)
8874 {
8875 constraint (inst.reloc.exp.X_op != O_constant
8876 || inst.reloc.exp.X_add_number != 0,
8877 _("this instruction requires a post-indexed address"));
8878
8879 inst.operands[1].preind = 0;
8880 inst.operands[1].postind = 1;
8881 inst.operands[1].writeback = 1;
8882 }
8883 inst.instruction |= inst.operands[0].reg << 12;
8884 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8885 }
8886
8887 /* Halfword and signed-byte load/store operations. */
8888
8889 static void
8890 do_ldstv4 (void)
8891 {
8892 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8893 inst.instruction |= inst.operands[0].reg << 12;
8894 if (!inst.operands[1].isreg)
8895 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8896 return;
8897 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8898 }
8899
8900 static void
8901 do_ldsttv4 (void)
8902 {
8903 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8904 reject [Rn,...]. */
8905 if (inst.operands[1].preind)
8906 {
8907 constraint (inst.reloc.exp.X_op != O_constant
8908 || inst.reloc.exp.X_add_number != 0,
8909 _("this instruction requires a post-indexed address"));
8910
8911 inst.operands[1].preind = 0;
8912 inst.operands[1].postind = 1;
8913 inst.operands[1].writeback = 1;
8914 }
8915 inst.instruction |= inst.operands[0].reg << 12;
8916 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8917 }
8918
8919 /* Co-processor register load/store.
8920 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8921 static void
8922 do_lstc (void)
8923 {
8924 inst.instruction |= inst.operands[0].reg << 8;
8925 inst.instruction |= inst.operands[1].reg << 12;
8926 encode_arm_cp_address (2, TRUE, TRUE, 0);
8927 }
8928
8929 static void
8930 do_mlas (void)
8931 {
8932 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8933 if (inst.operands[0].reg == inst.operands[1].reg
8934 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8935 && !(inst.instruction & 0x00400000))
8936 as_tsktsk (_("Rd and Rm should be different in mla"));
8937
8938 inst.instruction |= inst.operands[0].reg << 16;
8939 inst.instruction |= inst.operands[1].reg;
8940 inst.instruction |= inst.operands[2].reg << 8;
8941 inst.instruction |= inst.operands[3].reg << 12;
8942 }
8943
8944 static void
8945 do_mov (void)
8946 {
8947 inst.instruction |= inst.operands[0].reg << 12;
8948 encode_arm_shifter_operand (1);
8949 }
8950
8951 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8952 static void
8953 do_mov16 (void)
8954 {
8955 bfd_vma imm;
8956 bfd_boolean top;
8957
8958 top = (inst.instruction & 0x00400000) != 0;
8959 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8960 _(":lower16: not allowed this instruction"));
8961 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8962 _(":upper16: not allowed instruction"));
8963 inst.instruction |= inst.operands[0].reg << 12;
8964 if (inst.reloc.type == BFD_RELOC_UNUSED)
8965 {
8966 imm = inst.reloc.exp.X_add_number;
8967 /* The value is in two pieces: 0:11, 16:19. */
8968 inst.instruction |= (imm & 0x00000fff);
8969 inst.instruction |= (imm & 0x0000f000) << 4;
8970 }
8971 }
8972
8973 static int
8974 do_vfp_nsyn_mrs (void)
8975 {
8976 if (inst.operands[0].isvec)
8977 {
8978 if (inst.operands[1].reg != 1)
8979 first_error (_("operand 1 must be FPSCR"));
8980 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8981 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8982 do_vfp_nsyn_opcode ("fmstat");
8983 }
8984 else if (inst.operands[1].isvec)
8985 do_vfp_nsyn_opcode ("fmrx");
8986 else
8987 return FAIL;
8988
8989 return SUCCESS;
8990 }
8991
8992 static int
8993 do_vfp_nsyn_msr (void)
8994 {
8995 if (inst.operands[0].isvec)
8996 do_vfp_nsyn_opcode ("fmxr");
8997 else
8998 return FAIL;
8999
9000 return SUCCESS;
9001 }
9002
9003 static void
9004 do_vmrs (void)
9005 {
9006 unsigned Rt = inst.operands[0].reg;
9007
9008 if (thumb_mode && Rt == REG_SP)
9009 {
9010 inst.error = BAD_SP;
9011 return;
9012 }
9013
9014 /* APSR_ sets isvec. All other refs to PC are illegal. */
9015 if (!inst.operands[0].isvec && Rt == REG_PC)
9016 {
9017 inst.error = BAD_PC;
9018 return;
9019 }
9020
9021 /* If we get through parsing the register name, we just insert the number
9022 generated into the instruction without further validation. */
9023 inst.instruction |= (inst.operands[1].reg << 16);
9024 inst.instruction |= (Rt << 12);
9025 }
9026
9027 static void
9028 do_vmsr (void)
9029 {
9030 unsigned Rt = inst.operands[1].reg;
9031
9032 if (thumb_mode)
9033 reject_bad_reg (Rt);
9034 else if (Rt == REG_PC)
9035 {
9036 inst.error = BAD_PC;
9037 return;
9038 }
9039
9040 /* If we get through parsing the register name, we just insert the number
9041 generated into the instruction without further validation. */
9042 inst.instruction |= (inst.operands[0].reg << 16);
9043 inst.instruction |= (Rt << 12);
9044 }
9045
9046 static void
9047 do_mrs (void)
9048 {
9049 unsigned br;
9050
9051 if (do_vfp_nsyn_mrs () == SUCCESS)
9052 return;
9053
9054 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9055 inst.instruction |= inst.operands[0].reg << 12;
9056
9057 if (inst.operands[1].isreg)
9058 {
9059 br = inst.operands[1].reg;
9060 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
9061 as_bad (_("bad register for mrs"));
9062 }
9063 else
9064 {
9065 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9066 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9067 != (PSR_c|PSR_f),
9068 _("'APSR', 'CPSR' or 'SPSR' expected"));
9069 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9070 }
9071
9072 inst.instruction |= br;
9073 }
9074
9075 /* Two possible forms:
9076 "{C|S}PSR_<field>, Rm",
9077 "{C|S}PSR_f, #expression". */
9078
9079 static void
9080 do_msr (void)
9081 {
9082 if (do_vfp_nsyn_msr () == SUCCESS)
9083 return;
9084
9085 inst.instruction |= inst.operands[0].imm;
9086 if (inst.operands[1].isreg)
9087 inst.instruction |= inst.operands[1].reg;
9088 else
9089 {
9090 inst.instruction |= INST_IMMEDIATE;
9091 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9092 inst.reloc.pc_rel = 0;
9093 }
9094 }
9095
9096 static void
9097 do_mul (void)
9098 {
9099 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9100
9101 if (!inst.operands[2].present)
9102 inst.operands[2].reg = inst.operands[0].reg;
9103 inst.instruction |= inst.operands[0].reg << 16;
9104 inst.instruction |= inst.operands[1].reg;
9105 inst.instruction |= inst.operands[2].reg << 8;
9106
9107 if (inst.operands[0].reg == inst.operands[1].reg
9108 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9109 as_tsktsk (_("Rd and Rm should be different in mul"));
9110 }
9111
9112 /* Long Multiply Parser
9113 UMULL RdLo, RdHi, Rm, Rs
9114 SMULL RdLo, RdHi, Rm, Rs
9115 UMLAL RdLo, RdHi, Rm, Rs
9116 SMLAL RdLo, RdHi, Rm, Rs. */
9117
9118 static void
9119 do_mull (void)
9120 {
9121 inst.instruction |= inst.operands[0].reg << 12;
9122 inst.instruction |= inst.operands[1].reg << 16;
9123 inst.instruction |= inst.operands[2].reg;
9124 inst.instruction |= inst.operands[3].reg << 8;
9125
9126 /* rdhi and rdlo must be different. */
9127 if (inst.operands[0].reg == inst.operands[1].reg)
9128 as_tsktsk (_("rdhi and rdlo must be different"));
9129
9130 /* rdhi, rdlo and rm must all be different before armv6. */
9131 if ((inst.operands[0].reg == inst.operands[2].reg
9132 || inst.operands[1].reg == inst.operands[2].reg)
9133 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9134 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9135 }
9136
9137 static void
9138 do_nop (void)
9139 {
9140 if (inst.operands[0].present
9141 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9142 {
9143 /* Architectural NOP hints are CPSR sets with no bits selected. */
9144 inst.instruction &= 0xf0000000;
9145 inst.instruction |= 0x0320f000;
9146 if (inst.operands[0].present)
9147 inst.instruction |= inst.operands[0].imm;
9148 }
9149 }
9150
9151 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9152 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9153 Condition defaults to COND_ALWAYS.
9154 Error if Rd, Rn or Rm are R15. */
9155
9156 static void
9157 do_pkhbt (void)
9158 {
9159 inst.instruction |= inst.operands[0].reg << 12;
9160 inst.instruction |= inst.operands[1].reg << 16;
9161 inst.instruction |= inst.operands[2].reg;
9162 if (inst.operands[3].present)
9163 encode_arm_shift (3);
9164 }
9165
9166 /* ARM V6 PKHTB (Argument Parse). */
9167
9168 static void
9169 do_pkhtb (void)
9170 {
9171 if (!inst.operands[3].present)
9172 {
9173 /* If the shift specifier is omitted, turn the instruction
9174 into pkhbt rd, rm, rn. */
9175 inst.instruction &= 0xfff00010;
9176 inst.instruction |= inst.operands[0].reg << 12;
9177 inst.instruction |= inst.operands[1].reg;
9178 inst.instruction |= inst.operands[2].reg << 16;
9179 }
9180 else
9181 {
9182 inst.instruction |= inst.operands[0].reg << 12;
9183 inst.instruction |= inst.operands[1].reg << 16;
9184 inst.instruction |= inst.operands[2].reg;
9185 encode_arm_shift (3);
9186 }
9187 }
9188
9189 /* ARMv5TE: Preload-Cache
9190 MP Extensions: Preload for write
9191
9192 PLD(W) <addr_mode>
9193
9194 Syntactically, like LDR with B=1, W=0, L=1. */
9195
9196 static void
9197 do_pld (void)
9198 {
9199 constraint (!inst.operands[0].isreg,
9200 _("'[' expected after PLD mnemonic"));
9201 constraint (inst.operands[0].postind,
9202 _("post-indexed expression used in preload instruction"));
9203 constraint (inst.operands[0].writeback,
9204 _("writeback used in preload instruction"));
9205 constraint (!inst.operands[0].preind,
9206 _("unindexed addressing used in preload instruction"));
9207 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9208 }
9209
9210 /* ARMv7: PLI <addr_mode> */
9211 static void
9212 do_pli (void)
9213 {
9214 constraint (!inst.operands[0].isreg,
9215 _("'[' expected after PLI mnemonic"));
9216 constraint (inst.operands[0].postind,
9217 _("post-indexed expression used in preload instruction"));
9218 constraint (inst.operands[0].writeback,
9219 _("writeback used in preload instruction"));
9220 constraint (!inst.operands[0].preind,
9221 _("unindexed addressing used in preload instruction"));
9222 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9223 inst.instruction &= ~PRE_INDEX;
9224 }
9225
9226 static void
9227 do_push_pop (void)
9228 {
9229 constraint (inst.operands[0].writeback,
9230 _("push/pop do not support {reglist}^"));
9231 inst.operands[1] = inst.operands[0];
9232 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9233 inst.operands[0].isreg = 1;
9234 inst.operands[0].writeback = 1;
9235 inst.operands[0].reg = REG_SP;
9236 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9237 }
9238
9239 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9240 word at the specified address and the following word
9241 respectively.
9242 Unconditionally executed.
9243 Error if Rn is R15. */
9244
9245 static void
9246 do_rfe (void)
9247 {
9248 inst.instruction |= inst.operands[0].reg << 16;
9249 if (inst.operands[0].writeback)
9250 inst.instruction |= WRITE_BACK;
9251 }
9252
9253 /* ARM V6 ssat (argument parse). */
9254
9255 static void
9256 do_ssat (void)
9257 {
9258 inst.instruction |= inst.operands[0].reg << 12;
9259 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9260 inst.instruction |= inst.operands[2].reg;
9261
9262 if (inst.operands[3].present)
9263 encode_arm_shift (3);
9264 }
9265
9266 /* ARM V6 usat (argument parse). */
9267
9268 static void
9269 do_usat (void)
9270 {
9271 inst.instruction |= inst.operands[0].reg << 12;
9272 inst.instruction |= inst.operands[1].imm << 16;
9273 inst.instruction |= inst.operands[2].reg;
9274
9275 if (inst.operands[3].present)
9276 encode_arm_shift (3);
9277 }
9278
9279 /* ARM V6 ssat16 (argument parse). */
9280
9281 static void
9282 do_ssat16 (void)
9283 {
9284 inst.instruction |= inst.operands[0].reg << 12;
9285 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9286 inst.instruction |= inst.operands[2].reg;
9287 }
9288
9289 static void
9290 do_usat16 (void)
9291 {
9292 inst.instruction |= inst.operands[0].reg << 12;
9293 inst.instruction |= inst.operands[1].imm << 16;
9294 inst.instruction |= inst.operands[2].reg;
9295 }
9296
9297 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9298 preserving the other bits.
9299
9300 setend <endian_specifier>, where <endian_specifier> is either
9301 BE or LE. */
9302
9303 static void
9304 do_setend (void)
9305 {
9306 if (warn_on_deprecated
9307 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9308 as_tsktsk (_("setend use is deprecated for ARMv8"));
9309
9310 if (inst.operands[0].imm)
9311 inst.instruction |= 0x200;
9312 }
9313
9314 static void
9315 do_shift (void)
9316 {
9317 unsigned int Rm = (inst.operands[1].present
9318 ? inst.operands[1].reg
9319 : inst.operands[0].reg);
9320
9321 inst.instruction |= inst.operands[0].reg << 12;
9322 inst.instruction |= Rm;
9323 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9324 {
9325 inst.instruction |= inst.operands[2].reg << 8;
9326 inst.instruction |= SHIFT_BY_REG;
9327 /* PR 12854: Error on extraneous shifts. */
9328 constraint (inst.operands[2].shifted,
9329 _("extraneous shift as part of operand to shift insn"));
9330 }
9331 else
9332 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9333 }
9334
9335 static void
9336 do_smc (void)
9337 {
9338 inst.reloc.type = BFD_RELOC_ARM_SMC;
9339 inst.reloc.pc_rel = 0;
9340 }
9341
9342 static void
9343 do_hvc (void)
9344 {
9345 inst.reloc.type = BFD_RELOC_ARM_HVC;
9346 inst.reloc.pc_rel = 0;
9347 }
9348
9349 static void
9350 do_swi (void)
9351 {
9352 inst.reloc.type = BFD_RELOC_ARM_SWI;
9353 inst.reloc.pc_rel = 0;
9354 }
9355
9356 static void
9357 do_setpan (void)
9358 {
9359 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9360 _("selected processor does not support SETPAN instruction"));
9361
9362 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9363 }
9364
9365 static void
9366 do_t_setpan (void)
9367 {
9368 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9369 _("selected processor does not support SETPAN instruction"));
9370
9371 inst.instruction |= (inst.operands[0].imm << 3);
9372 }
9373
9374 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9375 SMLAxy{cond} Rd,Rm,Rs,Rn
9376 SMLAWy{cond} Rd,Rm,Rs,Rn
9377 Error if any register is R15. */
9378
9379 static void
9380 do_smla (void)
9381 {
9382 inst.instruction |= inst.operands[0].reg << 16;
9383 inst.instruction |= inst.operands[1].reg;
9384 inst.instruction |= inst.operands[2].reg << 8;
9385 inst.instruction |= inst.operands[3].reg << 12;
9386 }
9387
9388 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9389 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9390 Error if any register is R15.
9391 Warning if Rdlo == Rdhi. */
9392
9393 static void
9394 do_smlal (void)
9395 {
9396 inst.instruction |= inst.operands[0].reg << 12;
9397 inst.instruction |= inst.operands[1].reg << 16;
9398 inst.instruction |= inst.operands[2].reg;
9399 inst.instruction |= inst.operands[3].reg << 8;
9400
9401 if (inst.operands[0].reg == inst.operands[1].reg)
9402 as_tsktsk (_("rdhi and rdlo must be different"));
9403 }
9404
9405 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9406 SMULxy{cond} Rd,Rm,Rs
9407 Error if any register is R15. */
9408
9409 static void
9410 do_smul (void)
9411 {
9412 inst.instruction |= inst.operands[0].reg << 16;
9413 inst.instruction |= inst.operands[1].reg;
9414 inst.instruction |= inst.operands[2].reg << 8;
9415 }
9416
9417 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9418 the same for both ARM and Thumb-2. */
9419
9420 static void
9421 do_srs (void)
9422 {
9423 int reg;
9424
9425 if (inst.operands[0].present)
9426 {
9427 reg = inst.operands[0].reg;
9428 constraint (reg != REG_SP, _("SRS base register must be r13"));
9429 }
9430 else
9431 reg = REG_SP;
9432
9433 inst.instruction |= reg << 16;
9434 inst.instruction |= inst.operands[1].imm;
9435 if (inst.operands[0].writeback || inst.operands[1].writeback)
9436 inst.instruction |= WRITE_BACK;
9437 }
9438
9439 /* ARM V6 strex (argument parse). */
9440
9441 static void
9442 do_strex (void)
9443 {
9444 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9445 || inst.operands[2].postind || inst.operands[2].writeback
9446 || inst.operands[2].immisreg || inst.operands[2].shifted
9447 || inst.operands[2].negative
9448 /* See comment in do_ldrex(). */
9449 || (inst.operands[2].reg == REG_PC),
9450 BAD_ADDR_MODE);
9451
9452 constraint (inst.operands[0].reg == inst.operands[1].reg
9453 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9454
9455 constraint (inst.reloc.exp.X_op != O_constant
9456 || inst.reloc.exp.X_add_number != 0,
9457 _("offset must be zero in ARM encoding"));
9458
9459 inst.instruction |= inst.operands[0].reg << 12;
9460 inst.instruction |= inst.operands[1].reg;
9461 inst.instruction |= inst.operands[2].reg << 16;
9462 inst.reloc.type = BFD_RELOC_UNUSED;
9463 }
9464
9465 static void
9466 do_t_strexbh (void)
9467 {
9468 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9469 || inst.operands[2].postind || inst.operands[2].writeback
9470 || inst.operands[2].immisreg || inst.operands[2].shifted
9471 || inst.operands[2].negative,
9472 BAD_ADDR_MODE);
9473
9474 constraint (inst.operands[0].reg == inst.operands[1].reg
9475 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9476
9477 do_rm_rd_rn ();
9478 }
9479
9480 static void
9481 do_strexd (void)
9482 {
9483 constraint (inst.operands[1].reg % 2 != 0,
9484 _("even register required"));
9485 constraint (inst.operands[2].present
9486 && inst.operands[2].reg != inst.operands[1].reg + 1,
9487 _("can only store two consecutive registers"));
9488 /* If op 2 were present and equal to PC, this function wouldn't
9489 have been called in the first place. */
9490 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9491
9492 constraint (inst.operands[0].reg == inst.operands[1].reg
9493 || inst.operands[0].reg == inst.operands[1].reg + 1
9494 || inst.operands[0].reg == inst.operands[3].reg,
9495 BAD_OVERLAP);
9496
9497 inst.instruction |= inst.operands[0].reg << 12;
9498 inst.instruction |= inst.operands[1].reg;
9499 inst.instruction |= inst.operands[3].reg << 16;
9500 }
9501
9502 /* ARM V8 STRL. */
9503 static void
9504 do_stlex (void)
9505 {
9506 constraint (inst.operands[0].reg == inst.operands[1].reg
9507 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9508
9509 do_rd_rm_rn ();
9510 }
9511
9512 static void
9513 do_t_stlex (void)
9514 {
9515 constraint (inst.operands[0].reg == inst.operands[1].reg
9516 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9517
9518 do_rm_rd_rn ();
9519 }
9520
9521 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9522 extends it to 32-bits, and adds the result to a value in another
9523 register. You can specify a rotation by 0, 8, 16, or 24 bits
9524 before extracting the 16-bit value.
9525 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9526 Condition defaults to COND_ALWAYS.
9527 Error if any register uses R15. */
9528
9529 static void
9530 do_sxtah (void)
9531 {
9532 inst.instruction |= inst.operands[0].reg << 12;
9533 inst.instruction |= inst.operands[1].reg << 16;
9534 inst.instruction |= inst.operands[2].reg;
9535 inst.instruction |= inst.operands[3].imm << 10;
9536 }
9537
9538 /* ARM V6 SXTH.
9539
9540 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9541 Condition defaults to COND_ALWAYS.
9542 Error if any register uses R15. */
9543
9544 static void
9545 do_sxth (void)
9546 {
9547 inst.instruction |= inst.operands[0].reg << 12;
9548 inst.instruction |= inst.operands[1].reg;
9549 inst.instruction |= inst.operands[2].imm << 10;
9550 }
9551 \f
9552 /* VFP instructions. In a logical order: SP variant first, monad
9553 before dyad, arithmetic then move then load/store. */
9554
9555 static void
9556 do_vfp_sp_monadic (void)
9557 {
9558 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9559 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9560 }
9561
9562 static void
9563 do_vfp_sp_dyadic (void)
9564 {
9565 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9566 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9567 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9568 }
9569
9570 static void
9571 do_vfp_sp_compare_z (void)
9572 {
9573 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9574 }
9575
9576 static void
9577 do_vfp_dp_sp_cvt (void)
9578 {
9579 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9580 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9581 }
9582
9583 static void
9584 do_vfp_sp_dp_cvt (void)
9585 {
9586 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9587 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9588 }
9589
9590 static void
9591 do_vfp_reg_from_sp (void)
9592 {
9593 inst.instruction |= inst.operands[0].reg << 12;
9594 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9595 }
9596
9597 static void
9598 do_vfp_reg2_from_sp2 (void)
9599 {
9600 constraint (inst.operands[2].imm != 2,
9601 _("only two consecutive VFP SP registers allowed here"));
9602 inst.instruction |= inst.operands[0].reg << 12;
9603 inst.instruction |= inst.operands[1].reg << 16;
9604 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9605 }
9606
9607 static void
9608 do_vfp_sp_from_reg (void)
9609 {
9610 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9611 inst.instruction |= inst.operands[1].reg << 12;
9612 }
9613
9614 static void
9615 do_vfp_sp2_from_reg2 (void)
9616 {
9617 constraint (inst.operands[0].imm != 2,
9618 _("only two consecutive VFP SP registers allowed here"));
9619 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9620 inst.instruction |= inst.operands[1].reg << 12;
9621 inst.instruction |= inst.operands[2].reg << 16;
9622 }
9623
9624 static void
9625 do_vfp_sp_ldst (void)
9626 {
9627 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9628 encode_arm_cp_address (1, FALSE, TRUE, 0);
9629 }
9630
9631 static void
9632 do_vfp_dp_ldst (void)
9633 {
9634 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9635 encode_arm_cp_address (1, FALSE, TRUE, 0);
9636 }
9637
9638
9639 static void
9640 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9641 {
9642 if (inst.operands[0].writeback)
9643 inst.instruction |= WRITE_BACK;
9644 else
9645 constraint (ldstm_type != VFP_LDSTMIA,
9646 _("this addressing mode requires base-register writeback"));
9647 inst.instruction |= inst.operands[0].reg << 16;
9648 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9649 inst.instruction |= inst.operands[1].imm;
9650 }
9651
9652 static void
9653 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9654 {
9655 int count;
9656
9657 if (inst.operands[0].writeback)
9658 inst.instruction |= WRITE_BACK;
9659 else
9660 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9661 _("this addressing mode requires base-register writeback"));
9662
9663 inst.instruction |= inst.operands[0].reg << 16;
9664 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9665
9666 count = inst.operands[1].imm << 1;
9667 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9668 count += 1;
9669
9670 inst.instruction |= count;
9671 }
9672
9673 static void
9674 do_vfp_sp_ldstmia (void)
9675 {
9676 vfp_sp_ldstm (VFP_LDSTMIA);
9677 }
9678
9679 static void
9680 do_vfp_sp_ldstmdb (void)
9681 {
9682 vfp_sp_ldstm (VFP_LDSTMDB);
9683 }
9684
9685 static void
9686 do_vfp_dp_ldstmia (void)
9687 {
9688 vfp_dp_ldstm (VFP_LDSTMIA);
9689 }
9690
9691 static void
9692 do_vfp_dp_ldstmdb (void)
9693 {
9694 vfp_dp_ldstm (VFP_LDSTMDB);
9695 }
9696
9697 static void
9698 do_vfp_xp_ldstmia (void)
9699 {
9700 vfp_dp_ldstm (VFP_LDSTMIAX);
9701 }
9702
9703 static void
9704 do_vfp_xp_ldstmdb (void)
9705 {
9706 vfp_dp_ldstm (VFP_LDSTMDBX);
9707 }
9708
9709 static void
9710 do_vfp_dp_rd_rm (void)
9711 {
9712 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9713 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9714 }
9715
9716 static void
9717 do_vfp_dp_rn_rd (void)
9718 {
9719 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9720 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9721 }
9722
9723 static void
9724 do_vfp_dp_rd_rn (void)
9725 {
9726 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9727 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9728 }
9729
9730 static void
9731 do_vfp_dp_rd_rn_rm (void)
9732 {
9733 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9734 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9735 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9736 }
9737
9738 static void
9739 do_vfp_dp_rd (void)
9740 {
9741 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9742 }
9743
9744 static void
9745 do_vfp_dp_rm_rd_rn (void)
9746 {
9747 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9748 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9749 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9750 }
9751
9752 /* VFPv3 instructions. */
9753 static void
9754 do_vfp_sp_const (void)
9755 {
9756 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9757 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9758 inst.instruction |= (inst.operands[1].imm & 0x0f);
9759 }
9760
9761 static void
9762 do_vfp_dp_const (void)
9763 {
9764 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9765 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9766 inst.instruction |= (inst.operands[1].imm & 0x0f);
9767 }
9768
9769 static void
9770 vfp_conv (int srcsize)
9771 {
9772 int immbits = srcsize - inst.operands[1].imm;
9773
9774 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9775 {
9776 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9777 i.e. immbits must be in range 0 - 16. */
9778 inst.error = _("immediate value out of range, expected range [0, 16]");
9779 return;
9780 }
9781 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9782 {
9783 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9784 i.e. immbits must be in range 0 - 31. */
9785 inst.error = _("immediate value out of range, expected range [1, 32]");
9786 return;
9787 }
9788
9789 inst.instruction |= (immbits & 1) << 5;
9790 inst.instruction |= (immbits >> 1);
9791 }
9792
9793 static void
9794 do_vfp_sp_conv_16 (void)
9795 {
9796 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9797 vfp_conv (16);
9798 }
9799
9800 static void
9801 do_vfp_dp_conv_16 (void)
9802 {
9803 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9804 vfp_conv (16);
9805 }
9806
9807 static void
9808 do_vfp_sp_conv_32 (void)
9809 {
9810 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9811 vfp_conv (32);
9812 }
9813
9814 static void
9815 do_vfp_dp_conv_32 (void)
9816 {
9817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9818 vfp_conv (32);
9819 }
9820 \f
9821 /* FPA instructions. Also in a logical order. */
9822
9823 static void
9824 do_fpa_cmp (void)
9825 {
9826 inst.instruction |= inst.operands[0].reg << 16;
9827 inst.instruction |= inst.operands[1].reg;
9828 }
9829
9830 static void
9831 do_fpa_ldmstm (void)
9832 {
9833 inst.instruction |= inst.operands[0].reg << 12;
9834 switch (inst.operands[1].imm)
9835 {
9836 case 1: inst.instruction |= CP_T_X; break;
9837 case 2: inst.instruction |= CP_T_Y; break;
9838 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9839 case 4: break;
9840 default: abort ();
9841 }
9842
9843 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9844 {
9845 /* The instruction specified "ea" or "fd", so we can only accept
9846 [Rn]{!}. The instruction does not really support stacking or
9847 unstacking, so we have to emulate these by setting appropriate
9848 bits and offsets. */
9849 constraint (inst.reloc.exp.X_op != O_constant
9850 || inst.reloc.exp.X_add_number != 0,
9851 _("this instruction does not support indexing"));
9852
9853 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9854 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9855
9856 if (!(inst.instruction & INDEX_UP))
9857 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9858
9859 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9860 {
9861 inst.operands[2].preind = 0;
9862 inst.operands[2].postind = 1;
9863 }
9864 }
9865
9866 encode_arm_cp_address (2, TRUE, TRUE, 0);
9867 }
9868 \f
9869 /* iWMMXt instructions: strictly in alphabetical order. */
9870
9871 static void
9872 do_iwmmxt_tandorc (void)
9873 {
9874 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9875 }
9876
9877 static void
9878 do_iwmmxt_textrc (void)
9879 {
9880 inst.instruction |= inst.operands[0].reg << 12;
9881 inst.instruction |= inst.operands[1].imm;
9882 }
9883
9884 static void
9885 do_iwmmxt_textrm (void)
9886 {
9887 inst.instruction |= inst.operands[0].reg << 12;
9888 inst.instruction |= inst.operands[1].reg << 16;
9889 inst.instruction |= inst.operands[2].imm;
9890 }
9891
9892 static void
9893 do_iwmmxt_tinsr (void)
9894 {
9895 inst.instruction |= inst.operands[0].reg << 16;
9896 inst.instruction |= inst.operands[1].reg << 12;
9897 inst.instruction |= inst.operands[2].imm;
9898 }
9899
9900 static void
9901 do_iwmmxt_tmia (void)
9902 {
9903 inst.instruction |= inst.operands[0].reg << 5;
9904 inst.instruction |= inst.operands[1].reg;
9905 inst.instruction |= inst.operands[2].reg << 12;
9906 }
9907
9908 static void
9909 do_iwmmxt_waligni (void)
9910 {
9911 inst.instruction |= inst.operands[0].reg << 12;
9912 inst.instruction |= inst.operands[1].reg << 16;
9913 inst.instruction |= inst.operands[2].reg;
9914 inst.instruction |= inst.operands[3].imm << 20;
9915 }
9916
9917 static void
9918 do_iwmmxt_wmerge (void)
9919 {
9920 inst.instruction |= inst.operands[0].reg << 12;
9921 inst.instruction |= inst.operands[1].reg << 16;
9922 inst.instruction |= inst.operands[2].reg;
9923 inst.instruction |= inst.operands[3].imm << 21;
9924 }
9925
9926 static void
9927 do_iwmmxt_wmov (void)
9928 {
9929 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9930 inst.instruction |= inst.operands[0].reg << 12;
9931 inst.instruction |= inst.operands[1].reg << 16;
9932 inst.instruction |= inst.operands[1].reg;
9933 }
9934
9935 static void
9936 do_iwmmxt_wldstbh (void)
9937 {
9938 int reloc;
9939 inst.instruction |= inst.operands[0].reg << 12;
9940 if (thumb_mode)
9941 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9942 else
9943 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9944 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9945 }
9946
9947 static void
9948 do_iwmmxt_wldstw (void)
9949 {
9950 /* RIWR_RIWC clears .isreg for a control register. */
9951 if (!inst.operands[0].isreg)
9952 {
9953 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9954 inst.instruction |= 0xf0000000;
9955 }
9956
9957 inst.instruction |= inst.operands[0].reg << 12;
9958 encode_arm_cp_address (1, TRUE, TRUE, 0);
9959 }
9960
9961 static void
9962 do_iwmmxt_wldstd (void)
9963 {
9964 inst.instruction |= inst.operands[0].reg << 12;
9965 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9966 && inst.operands[1].immisreg)
9967 {
9968 inst.instruction &= ~0x1a000ff;
9969 inst.instruction |= (0xfU << 28);
9970 if (inst.operands[1].preind)
9971 inst.instruction |= PRE_INDEX;
9972 if (!inst.operands[1].negative)
9973 inst.instruction |= INDEX_UP;
9974 if (inst.operands[1].writeback)
9975 inst.instruction |= WRITE_BACK;
9976 inst.instruction |= inst.operands[1].reg << 16;
9977 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9978 inst.instruction |= inst.operands[1].imm;
9979 }
9980 else
9981 encode_arm_cp_address (1, TRUE, FALSE, 0);
9982 }
9983
9984 static void
9985 do_iwmmxt_wshufh (void)
9986 {
9987 inst.instruction |= inst.operands[0].reg << 12;
9988 inst.instruction |= inst.operands[1].reg << 16;
9989 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9990 inst.instruction |= (inst.operands[2].imm & 0x0f);
9991 }
9992
9993 static void
9994 do_iwmmxt_wzero (void)
9995 {
9996 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9997 inst.instruction |= inst.operands[0].reg;
9998 inst.instruction |= inst.operands[0].reg << 12;
9999 inst.instruction |= inst.operands[0].reg << 16;
10000 }
10001
10002 static void
10003 do_iwmmxt_wrwrwr_or_imm5 (void)
10004 {
10005 if (inst.operands[2].isreg)
10006 do_rd_rn_rm ();
10007 else {
10008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10009 _("immediate operand requires iWMMXt2"));
10010 do_rd_rn ();
10011 if (inst.operands[2].imm == 0)
10012 {
10013 switch ((inst.instruction >> 20) & 0xf)
10014 {
10015 case 4:
10016 case 5:
10017 case 6:
10018 case 7:
10019 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10020 inst.operands[2].imm = 16;
10021 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10022 break;
10023 case 8:
10024 case 9:
10025 case 10:
10026 case 11:
10027 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10028 inst.operands[2].imm = 32;
10029 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10030 break;
10031 case 12:
10032 case 13:
10033 case 14:
10034 case 15:
10035 {
10036 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10037 unsigned long wrn;
10038 wrn = (inst.instruction >> 16) & 0xf;
10039 inst.instruction &= 0xff0fff0f;
10040 inst.instruction |= wrn;
10041 /* Bail out here; the instruction is now assembled. */
10042 return;
10043 }
10044 }
10045 }
10046 /* Map 32 -> 0, etc. */
10047 inst.operands[2].imm &= 0x1f;
10048 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10049 }
10050 }
10051 \f
10052 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10053 operations first, then control, shift, and load/store. */
10054
10055 /* Insns like "foo X,Y,Z". */
10056
10057 static void
10058 do_mav_triple (void)
10059 {
10060 inst.instruction |= inst.operands[0].reg << 16;
10061 inst.instruction |= inst.operands[1].reg;
10062 inst.instruction |= inst.operands[2].reg << 12;
10063 }
10064
10065 /* Insns like "foo W,X,Y,Z".
10066 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10067
10068 static void
10069 do_mav_quad (void)
10070 {
10071 inst.instruction |= inst.operands[0].reg << 5;
10072 inst.instruction |= inst.operands[1].reg << 12;
10073 inst.instruction |= inst.operands[2].reg << 16;
10074 inst.instruction |= inst.operands[3].reg;
10075 }
10076
10077 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10078 static void
10079 do_mav_dspsc (void)
10080 {
10081 inst.instruction |= inst.operands[1].reg << 12;
10082 }
10083
10084 /* Maverick shift immediate instructions.
10085 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10086 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10087
10088 static void
10089 do_mav_shift (void)
10090 {
10091 int imm = inst.operands[2].imm;
10092
10093 inst.instruction |= inst.operands[0].reg << 12;
10094 inst.instruction |= inst.operands[1].reg << 16;
10095
10096 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10097 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10098 Bit 4 should be 0. */
10099 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10100
10101 inst.instruction |= imm;
10102 }
10103 \f
10104 /* XScale instructions. Also sorted arithmetic before move. */
10105
10106 /* Xscale multiply-accumulate (argument parse)
10107 MIAcc acc0,Rm,Rs
10108 MIAPHcc acc0,Rm,Rs
10109 MIAxycc acc0,Rm,Rs. */
10110
10111 static void
10112 do_xsc_mia (void)
10113 {
10114 inst.instruction |= inst.operands[1].reg;
10115 inst.instruction |= inst.operands[2].reg << 12;
10116 }
10117
10118 /* Xscale move-accumulator-register (argument parse)
10119
10120 MARcc acc0,RdLo,RdHi. */
10121
10122 static void
10123 do_xsc_mar (void)
10124 {
10125 inst.instruction |= inst.operands[1].reg << 12;
10126 inst.instruction |= inst.operands[2].reg << 16;
10127 }
10128
10129 /* Xscale move-register-accumulator (argument parse)
10130
10131 MRAcc RdLo,RdHi,acc0. */
10132
10133 static void
10134 do_xsc_mra (void)
10135 {
10136 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10137 inst.instruction |= inst.operands[0].reg << 12;
10138 inst.instruction |= inst.operands[1].reg << 16;
10139 }
10140 \f
10141 /* Encoding functions relevant only to Thumb. */
10142
10143 /* inst.operands[i] is a shifted-register operand; encode
10144 it into inst.instruction in the format used by Thumb32. */
10145
10146 static void
10147 encode_thumb32_shifted_operand (int i)
10148 {
10149 unsigned int value = inst.reloc.exp.X_add_number;
10150 unsigned int shift = inst.operands[i].shift_kind;
10151
10152 constraint (inst.operands[i].immisreg,
10153 _("shift by register not allowed in thumb mode"));
10154 inst.instruction |= inst.operands[i].reg;
10155 if (shift == SHIFT_RRX)
10156 inst.instruction |= SHIFT_ROR << 4;
10157 else
10158 {
10159 constraint (inst.reloc.exp.X_op != O_constant,
10160 _("expression too complex"));
10161
10162 constraint (value > 32
10163 || (value == 32 && (shift == SHIFT_LSL
10164 || shift == SHIFT_ROR)),
10165 _("shift expression is too large"));
10166
10167 if (value == 0)
10168 shift = SHIFT_LSL;
10169 else if (value == 32)
10170 value = 0;
10171
10172 inst.instruction |= shift << 4;
10173 inst.instruction |= (value & 0x1c) << 10;
10174 inst.instruction |= (value & 0x03) << 6;
10175 }
10176 }
10177
10178
10179 /* inst.operands[i] was set up by parse_address. Encode it into a
10180 Thumb32 format load or store instruction. Reject forms that cannot
10181 be used with such instructions. If is_t is true, reject forms that
10182 cannot be used with a T instruction; if is_d is true, reject forms
10183 that cannot be used with a D instruction. If it is a store insn,
10184 reject PC in Rn. */
10185
10186 static void
10187 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10188 {
10189 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10190
10191 constraint (!inst.operands[i].isreg,
10192 _("Instruction does not support =N addresses"));
10193
10194 inst.instruction |= inst.operands[i].reg << 16;
10195 if (inst.operands[i].immisreg)
10196 {
10197 constraint (is_pc, BAD_PC_ADDRESSING);
10198 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10199 constraint (inst.operands[i].negative,
10200 _("Thumb does not support negative register indexing"));
10201 constraint (inst.operands[i].postind,
10202 _("Thumb does not support register post-indexing"));
10203 constraint (inst.operands[i].writeback,
10204 _("Thumb does not support register indexing with writeback"));
10205 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10206 _("Thumb supports only LSL in shifted register indexing"));
10207
10208 inst.instruction |= inst.operands[i].imm;
10209 if (inst.operands[i].shifted)
10210 {
10211 constraint (inst.reloc.exp.X_op != O_constant,
10212 _("expression too complex"));
10213 constraint (inst.reloc.exp.X_add_number < 0
10214 || inst.reloc.exp.X_add_number > 3,
10215 _("shift out of range"));
10216 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10217 }
10218 inst.reloc.type = BFD_RELOC_UNUSED;
10219 }
10220 else if (inst.operands[i].preind)
10221 {
10222 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10223 constraint (is_t && inst.operands[i].writeback,
10224 _("cannot use writeback with this instruction"));
10225 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10226 BAD_PC_ADDRESSING);
10227
10228 if (is_d)
10229 {
10230 inst.instruction |= 0x01000000;
10231 if (inst.operands[i].writeback)
10232 inst.instruction |= 0x00200000;
10233 }
10234 else
10235 {
10236 inst.instruction |= 0x00000c00;
10237 if (inst.operands[i].writeback)
10238 inst.instruction |= 0x00000100;
10239 }
10240 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10241 }
10242 else if (inst.operands[i].postind)
10243 {
10244 gas_assert (inst.operands[i].writeback);
10245 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10246 constraint (is_t, _("cannot use post-indexing with this instruction"));
10247
10248 if (is_d)
10249 inst.instruction |= 0x00200000;
10250 else
10251 inst.instruction |= 0x00000900;
10252 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10253 }
10254 else /* unindexed - only for coprocessor */
10255 inst.error = _("instruction does not accept unindexed addressing");
10256 }
10257
10258 /* Table of Thumb instructions which exist in both 16- and 32-bit
10259 encodings (the latter only in post-V6T2 cores). The index is the
10260 value used in the insns table below. When there is more than one
10261 possible 16-bit encoding for the instruction, this table always
10262 holds variant (1).
10263 Also contains several pseudo-instructions used during relaxation. */
10264 #define T16_32_TAB \
10265 X(_adc, 4140, eb400000), \
10266 X(_adcs, 4140, eb500000), \
10267 X(_add, 1c00, eb000000), \
10268 X(_adds, 1c00, eb100000), \
10269 X(_addi, 0000, f1000000), \
10270 X(_addis, 0000, f1100000), \
10271 X(_add_pc,000f, f20f0000), \
10272 X(_add_sp,000d, f10d0000), \
10273 X(_adr, 000f, f20f0000), \
10274 X(_and, 4000, ea000000), \
10275 X(_ands, 4000, ea100000), \
10276 X(_asr, 1000, fa40f000), \
10277 X(_asrs, 1000, fa50f000), \
10278 X(_b, e000, f000b000), \
10279 X(_bcond, d000, f0008000), \
10280 X(_bic, 4380, ea200000), \
10281 X(_bics, 4380, ea300000), \
10282 X(_cmn, 42c0, eb100f00), \
10283 X(_cmp, 2800, ebb00f00), \
10284 X(_cpsie, b660, f3af8400), \
10285 X(_cpsid, b670, f3af8600), \
10286 X(_cpy, 4600, ea4f0000), \
10287 X(_dec_sp,80dd, f1ad0d00), \
10288 X(_eor, 4040, ea800000), \
10289 X(_eors, 4040, ea900000), \
10290 X(_inc_sp,00dd, f10d0d00), \
10291 X(_ldmia, c800, e8900000), \
10292 X(_ldr, 6800, f8500000), \
10293 X(_ldrb, 7800, f8100000), \
10294 X(_ldrh, 8800, f8300000), \
10295 X(_ldrsb, 5600, f9100000), \
10296 X(_ldrsh, 5e00, f9300000), \
10297 X(_ldr_pc,4800, f85f0000), \
10298 X(_ldr_pc2,4800, f85f0000), \
10299 X(_ldr_sp,9800, f85d0000), \
10300 X(_lsl, 0000, fa00f000), \
10301 X(_lsls, 0000, fa10f000), \
10302 X(_lsr, 0800, fa20f000), \
10303 X(_lsrs, 0800, fa30f000), \
10304 X(_mov, 2000, ea4f0000), \
10305 X(_movs, 2000, ea5f0000), \
10306 X(_mul, 4340, fb00f000), \
10307 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10308 X(_mvn, 43c0, ea6f0000), \
10309 X(_mvns, 43c0, ea7f0000), \
10310 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10311 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10312 X(_orr, 4300, ea400000), \
10313 X(_orrs, 4300, ea500000), \
10314 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10315 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10316 X(_rev, ba00, fa90f080), \
10317 X(_rev16, ba40, fa90f090), \
10318 X(_revsh, bac0, fa90f0b0), \
10319 X(_ror, 41c0, fa60f000), \
10320 X(_rors, 41c0, fa70f000), \
10321 X(_sbc, 4180, eb600000), \
10322 X(_sbcs, 4180, eb700000), \
10323 X(_stmia, c000, e8800000), \
10324 X(_str, 6000, f8400000), \
10325 X(_strb, 7000, f8000000), \
10326 X(_strh, 8000, f8200000), \
10327 X(_str_sp,9000, f84d0000), \
10328 X(_sub, 1e00, eba00000), \
10329 X(_subs, 1e00, ebb00000), \
10330 X(_subi, 8000, f1a00000), \
10331 X(_subis, 8000, f1b00000), \
10332 X(_sxtb, b240, fa4ff080), \
10333 X(_sxth, b200, fa0ff080), \
10334 X(_tst, 4200, ea100f00), \
10335 X(_uxtb, b2c0, fa5ff080), \
10336 X(_uxth, b280, fa1ff080), \
10337 X(_nop, bf00, f3af8000), \
10338 X(_yield, bf10, f3af8001), \
10339 X(_wfe, bf20, f3af8002), \
10340 X(_wfi, bf30, f3af8003), \
10341 X(_sev, bf40, f3af8004), \
10342 X(_sevl, bf50, f3af8005), \
10343 X(_udf, de00, f7f0a000)
10344
10345 /* To catch errors in encoding functions, the codes are all offset by
10346 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10347 as 16-bit instructions. */
10348 #define X(a,b,c) T_MNEM##a
10349 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10350 #undef X
10351
10352 #define X(a,b,c) 0x##b
10353 static const unsigned short thumb_op16[] = { T16_32_TAB };
10354 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10355 #undef X
10356
10357 #define X(a,b,c) 0x##c
10358 static const unsigned int thumb_op32[] = { T16_32_TAB };
10359 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10360 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10361 #undef X
10362 #undef T16_32_TAB
10363
10364 /* Thumb instruction encoders, in alphabetical order. */
10365
10366 /* ADDW or SUBW. */
10367
10368 static void
10369 do_t_add_sub_w (void)
10370 {
10371 int Rd, Rn;
10372
10373 Rd = inst.operands[0].reg;
10374 Rn = inst.operands[1].reg;
10375
10376 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10377 is the SP-{plus,minus}-immediate form of the instruction. */
10378 if (Rn == REG_SP)
10379 constraint (Rd == REG_PC, BAD_PC);
10380 else
10381 reject_bad_reg (Rd);
10382
10383 inst.instruction |= (Rn << 16) | (Rd << 8);
10384 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10385 }
10386
10387 /* Parse an add or subtract instruction. We get here with inst.instruction
10388 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10389
10390 static void
10391 do_t_add_sub (void)
10392 {
10393 int Rd, Rs, Rn;
10394
10395 Rd = inst.operands[0].reg;
10396 Rs = (inst.operands[1].present
10397 ? inst.operands[1].reg /* Rd, Rs, foo */
10398 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10399
10400 if (Rd == REG_PC)
10401 set_it_insn_type_last ();
10402
10403 if (unified_syntax)
10404 {
10405 bfd_boolean flags;
10406 bfd_boolean narrow;
10407 int opcode;
10408
10409 flags = (inst.instruction == T_MNEM_adds
10410 || inst.instruction == T_MNEM_subs);
10411 if (flags)
10412 narrow = !in_it_block ();
10413 else
10414 narrow = in_it_block ();
10415 if (!inst.operands[2].isreg)
10416 {
10417 int add;
10418
10419 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10420
10421 add = (inst.instruction == T_MNEM_add
10422 || inst.instruction == T_MNEM_adds);
10423 opcode = 0;
10424 if (inst.size_req != 4)
10425 {
10426 /* Attempt to use a narrow opcode, with relaxation if
10427 appropriate. */
10428 if (Rd == REG_SP && Rs == REG_SP && !flags)
10429 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10430 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10431 opcode = T_MNEM_add_sp;
10432 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10433 opcode = T_MNEM_add_pc;
10434 else if (Rd <= 7 && Rs <= 7 && narrow)
10435 {
10436 if (flags)
10437 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10438 else
10439 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10440 }
10441 if (opcode)
10442 {
10443 inst.instruction = THUMB_OP16(opcode);
10444 inst.instruction |= (Rd << 4) | Rs;
10445 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10446 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10447 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10448 if (inst.size_req != 2)
10449 inst.relax = opcode;
10450 }
10451 else
10452 constraint (inst.size_req == 2, BAD_HIREG);
10453 }
10454 if (inst.size_req == 4
10455 || (inst.size_req != 2 && !opcode))
10456 {
10457 if (Rd == REG_PC)
10458 {
10459 constraint (add, BAD_PC);
10460 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10461 _("only SUBS PC, LR, #const allowed"));
10462 constraint (inst.reloc.exp.X_op != O_constant,
10463 _("expression too complex"));
10464 constraint (inst.reloc.exp.X_add_number < 0
10465 || inst.reloc.exp.X_add_number > 0xff,
10466 _("immediate value out of range"));
10467 inst.instruction = T2_SUBS_PC_LR
10468 | inst.reloc.exp.X_add_number;
10469 inst.reloc.type = BFD_RELOC_UNUSED;
10470 return;
10471 }
10472 else if (Rs == REG_PC)
10473 {
10474 /* Always use addw/subw. */
10475 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10476 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10477 }
10478 else
10479 {
10480 inst.instruction = THUMB_OP32 (inst.instruction);
10481 inst.instruction = (inst.instruction & 0xe1ffffff)
10482 | 0x10000000;
10483 if (flags)
10484 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10485 else
10486 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10487 }
10488 inst.instruction |= Rd << 8;
10489 inst.instruction |= Rs << 16;
10490 }
10491 }
10492 else
10493 {
10494 unsigned int value = inst.reloc.exp.X_add_number;
10495 unsigned int shift = inst.operands[2].shift_kind;
10496
10497 Rn = inst.operands[2].reg;
10498 /* See if we can do this with a 16-bit instruction. */
10499 if (!inst.operands[2].shifted && inst.size_req != 4)
10500 {
10501 if (Rd > 7 || Rs > 7 || Rn > 7)
10502 narrow = FALSE;
10503
10504 if (narrow)
10505 {
10506 inst.instruction = ((inst.instruction == T_MNEM_adds
10507 || inst.instruction == T_MNEM_add)
10508 ? T_OPCODE_ADD_R3
10509 : T_OPCODE_SUB_R3);
10510 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10511 return;
10512 }
10513
10514 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10515 {
10516 /* Thumb-1 cores (except v6-M) require at least one high
10517 register in a narrow non flag setting add. */
10518 if (Rd > 7 || Rn > 7
10519 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10520 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10521 {
10522 if (Rd == Rn)
10523 {
10524 Rn = Rs;
10525 Rs = Rd;
10526 }
10527 inst.instruction = T_OPCODE_ADD_HI;
10528 inst.instruction |= (Rd & 8) << 4;
10529 inst.instruction |= (Rd & 7);
10530 inst.instruction |= Rn << 3;
10531 return;
10532 }
10533 }
10534 }
10535
10536 constraint (Rd == REG_PC, BAD_PC);
10537 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10538 constraint (Rs == REG_PC, BAD_PC);
10539 reject_bad_reg (Rn);
10540
10541 /* If we get here, it can't be done in 16 bits. */
10542 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10543 _("shift must be constant"));
10544 inst.instruction = THUMB_OP32 (inst.instruction);
10545 inst.instruction |= Rd << 8;
10546 inst.instruction |= Rs << 16;
10547 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10548 _("shift value over 3 not allowed in thumb mode"));
10549 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10550 _("only LSL shift allowed in thumb mode"));
10551 encode_thumb32_shifted_operand (2);
10552 }
10553 }
10554 else
10555 {
10556 constraint (inst.instruction == T_MNEM_adds
10557 || inst.instruction == T_MNEM_subs,
10558 BAD_THUMB32);
10559
10560 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10561 {
10562 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10563 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10564 BAD_HIREG);
10565
10566 inst.instruction = (inst.instruction == T_MNEM_add
10567 ? 0x0000 : 0x8000);
10568 inst.instruction |= (Rd << 4) | Rs;
10569 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10570 return;
10571 }
10572
10573 Rn = inst.operands[2].reg;
10574 constraint (inst.operands[2].shifted, _("unshifted register required"));
10575
10576 /* We now have Rd, Rs, and Rn set to registers. */
10577 if (Rd > 7 || Rs > 7 || Rn > 7)
10578 {
10579 /* Can't do this for SUB. */
10580 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10581 inst.instruction = T_OPCODE_ADD_HI;
10582 inst.instruction |= (Rd & 8) << 4;
10583 inst.instruction |= (Rd & 7);
10584 if (Rs == Rd)
10585 inst.instruction |= Rn << 3;
10586 else if (Rn == Rd)
10587 inst.instruction |= Rs << 3;
10588 else
10589 constraint (1, _("dest must overlap one source register"));
10590 }
10591 else
10592 {
10593 inst.instruction = (inst.instruction == T_MNEM_add
10594 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10595 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10596 }
10597 }
10598 }
10599
10600 static void
10601 do_t_adr (void)
10602 {
10603 unsigned Rd;
10604
10605 Rd = inst.operands[0].reg;
10606 reject_bad_reg (Rd);
10607
10608 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10609 {
10610 /* Defer to section relaxation. */
10611 inst.relax = inst.instruction;
10612 inst.instruction = THUMB_OP16 (inst.instruction);
10613 inst.instruction |= Rd << 4;
10614 }
10615 else if (unified_syntax && inst.size_req != 2)
10616 {
10617 /* Generate a 32-bit opcode. */
10618 inst.instruction = THUMB_OP32 (inst.instruction);
10619 inst.instruction |= Rd << 8;
10620 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10621 inst.reloc.pc_rel = 1;
10622 }
10623 else
10624 {
10625 /* Generate a 16-bit opcode. */
10626 inst.instruction = THUMB_OP16 (inst.instruction);
10627 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10628 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10629 inst.reloc.pc_rel = 1;
10630
10631 inst.instruction |= Rd << 4;
10632 }
10633 }
10634
10635 /* Arithmetic instructions for which there is just one 16-bit
10636 instruction encoding, and it allows only two low registers.
10637 For maximal compatibility with ARM syntax, we allow three register
10638 operands even when Thumb-32 instructions are not available, as long
10639 as the first two are identical. For instance, both "sbc r0,r1" and
10640 "sbc r0,r0,r1" are allowed. */
10641 static void
10642 do_t_arit3 (void)
10643 {
10644 int Rd, Rs, Rn;
10645
10646 Rd = inst.operands[0].reg;
10647 Rs = (inst.operands[1].present
10648 ? inst.operands[1].reg /* Rd, Rs, foo */
10649 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10650 Rn = inst.operands[2].reg;
10651
10652 reject_bad_reg (Rd);
10653 reject_bad_reg (Rs);
10654 if (inst.operands[2].isreg)
10655 reject_bad_reg (Rn);
10656
10657 if (unified_syntax)
10658 {
10659 if (!inst.operands[2].isreg)
10660 {
10661 /* For an immediate, we always generate a 32-bit opcode;
10662 section relaxation will shrink it later if possible. */
10663 inst.instruction = THUMB_OP32 (inst.instruction);
10664 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10665 inst.instruction |= Rd << 8;
10666 inst.instruction |= Rs << 16;
10667 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10668 }
10669 else
10670 {
10671 bfd_boolean narrow;
10672
10673 /* See if we can do this with a 16-bit instruction. */
10674 if (THUMB_SETS_FLAGS (inst.instruction))
10675 narrow = !in_it_block ();
10676 else
10677 narrow = in_it_block ();
10678
10679 if (Rd > 7 || Rn > 7 || Rs > 7)
10680 narrow = FALSE;
10681 if (inst.operands[2].shifted)
10682 narrow = FALSE;
10683 if (inst.size_req == 4)
10684 narrow = FALSE;
10685
10686 if (narrow
10687 && Rd == Rs)
10688 {
10689 inst.instruction = THUMB_OP16 (inst.instruction);
10690 inst.instruction |= Rd;
10691 inst.instruction |= Rn << 3;
10692 return;
10693 }
10694
10695 /* If we get here, it can't be done in 16 bits. */
10696 constraint (inst.operands[2].shifted
10697 && inst.operands[2].immisreg,
10698 _("shift must be constant"));
10699 inst.instruction = THUMB_OP32 (inst.instruction);
10700 inst.instruction |= Rd << 8;
10701 inst.instruction |= Rs << 16;
10702 encode_thumb32_shifted_operand (2);
10703 }
10704 }
10705 else
10706 {
10707 /* On its face this is a lie - the instruction does set the
10708 flags. However, the only supported mnemonic in this mode
10709 says it doesn't. */
10710 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10711
10712 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10713 _("unshifted register required"));
10714 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10715 constraint (Rd != Rs,
10716 _("dest and source1 must be the same register"));
10717
10718 inst.instruction = THUMB_OP16 (inst.instruction);
10719 inst.instruction |= Rd;
10720 inst.instruction |= Rn << 3;
10721 }
10722 }
10723
10724 /* Similarly, but for instructions where the arithmetic operation is
10725 commutative, so we can allow either of them to be different from
10726 the destination operand in a 16-bit instruction. For instance, all
10727 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10728 accepted. */
10729 static void
10730 do_t_arit3c (void)
10731 {
10732 int Rd, Rs, Rn;
10733
10734 Rd = inst.operands[0].reg;
10735 Rs = (inst.operands[1].present
10736 ? inst.operands[1].reg /* Rd, Rs, foo */
10737 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10738 Rn = inst.operands[2].reg;
10739
10740 reject_bad_reg (Rd);
10741 reject_bad_reg (Rs);
10742 if (inst.operands[2].isreg)
10743 reject_bad_reg (Rn);
10744
10745 if (unified_syntax)
10746 {
10747 if (!inst.operands[2].isreg)
10748 {
10749 /* For an immediate, we always generate a 32-bit opcode;
10750 section relaxation will shrink it later if possible. */
10751 inst.instruction = THUMB_OP32 (inst.instruction);
10752 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10753 inst.instruction |= Rd << 8;
10754 inst.instruction |= Rs << 16;
10755 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10756 }
10757 else
10758 {
10759 bfd_boolean narrow;
10760
10761 /* See if we can do this with a 16-bit instruction. */
10762 if (THUMB_SETS_FLAGS (inst.instruction))
10763 narrow = !in_it_block ();
10764 else
10765 narrow = in_it_block ();
10766
10767 if (Rd > 7 || Rn > 7 || Rs > 7)
10768 narrow = FALSE;
10769 if (inst.operands[2].shifted)
10770 narrow = FALSE;
10771 if (inst.size_req == 4)
10772 narrow = FALSE;
10773
10774 if (narrow)
10775 {
10776 if (Rd == Rs)
10777 {
10778 inst.instruction = THUMB_OP16 (inst.instruction);
10779 inst.instruction |= Rd;
10780 inst.instruction |= Rn << 3;
10781 return;
10782 }
10783 if (Rd == Rn)
10784 {
10785 inst.instruction = THUMB_OP16 (inst.instruction);
10786 inst.instruction |= Rd;
10787 inst.instruction |= Rs << 3;
10788 return;
10789 }
10790 }
10791
10792 /* If we get here, it can't be done in 16 bits. */
10793 constraint (inst.operands[2].shifted
10794 && inst.operands[2].immisreg,
10795 _("shift must be constant"));
10796 inst.instruction = THUMB_OP32 (inst.instruction);
10797 inst.instruction |= Rd << 8;
10798 inst.instruction |= Rs << 16;
10799 encode_thumb32_shifted_operand (2);
10800 }
10801 }
10802 else
10803 {
10804 /* On its face this is a lie - the instruction does set the
10805 flags. However, the only supported mnemonic in this mode
10806 says it doesn't. */
10807 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10808
10809 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10810 _("unshifted register required"));
10811 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10812
10813 inst.instruction = THUMB_OP16 (inst.instruction);
10814 inst.instruction |= Rd;
10815
10816 if (Rd == Rs)
10817 inst.instruction |= Rn << 3;
10818 else if (Rd == Rn)
10819 inst.instruction |= Rs << 3;
10820 else
10821 constraint (1, _("dest must overlap one source register"));
10822 }
10823 }
10824
10825 static void
10826 do_t_bfc (void)
10827 {
10828 unsigned Rd;
10829 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10830 constraint (msb > 32, _("bit-field extends past end of register"));
10831 /* The instruction encoding stores the LSB and MSB,
10832 not the LSB and width. */
10833 Rd = inst.operands[0].reg;
10834 reject_bad_reg (Rd);
10835 inst.instruction |= Rd << 8;
10836 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10837 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10838 inst.instruction |= msb - 1;
10839 }
10840
10841 static void
10842 do_t_bfi (void)
10843 {
10844 int Rd, Rn;
10845 unsigned int msb;
10846
10847 Rd = inst.operands[0].reg;
10848 reject_bad_reg (Rd);
10849
10850 /* #0 in second position is alternative syntax for bfc, which is
10851 the same instruction but with REG_PC in the Rm field. */
10852 if (!inst.operands[1].isreg)
10853 Rn = REG_PC;
10854 else
10855 {
10856 Rn = inst.operands[1].reg;
10857 reject_bad_reg (Rn);
10858 }
10859
10860 msb = inst.operands[2].imm + inst.operands[3].imm;
10861 constraint (msb > 32, _("bit-field extends past end of register"));
10862 /* The instruction encoding stores the LSB and MSB,
10863 not the LSB and width. */
10864 inst.instruction |= Rd << 8;
10865 inst.instruction |= Rn << 16;
10866 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10867 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10868 inst.instruction |= msb - 1;
10869 }
10870
10871 static void
10872 do_t_bfx (void)
10873 {
10874 unsigned Rd, Rn;
10875
10876 Rd = inst.operands[0].reg;
10877 Rn = inst.operands[1].reg;
10878
10879 reject_bad_reg (Rd);
10880 reject_bad_reg (Rn);
10881
10882 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10883 _("bit-field extends past end of register"));
10884 inst.instruction |= Rd << 8;
10885 inst.instruction |= Rn << 16;
10886 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10887 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10888 inst.instruction |= inst.operands[3].imm - 1;
10889 }
10890
10891 /* ARM V5 Thumb BLX (argument parse)
10892 BLX <target_addr> which is BLX(1)
10893 BLX <Rm> which is BLX(2)
10894 Unfortunately, there are two different opcodes for this mnemonic.
10895 So, the insns[].value is not used, and the code here zaps values
10896 into inst.instruction.
10897
10898 ??? How to take advantage of the additional two bits of displacement
10899 available in Thumb32 mode? Need new relocation? */
10900
10901 static void
10902 do_t_blx (void)
10903 {
10904 set_it_insn_type_last ();
10905
10906 if (inst.operands[0].isreg)
10907 {
10908 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10909 /* We have a register, so this is BLX(2). */
10910 inst.instruction |= inst.operands[0].reg << 3;
10911 }
10912 else
10913 {
10914 /* No register. This must be BLX(1). */
10915 inst.instruction = 0xf000e800;
10916 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10917 }
10918 }
10919
10920 static void
10921 do_t_branch (void)
10922 {
10923 int opcode;
10924 int cond;
10925 int reloc;
10926
10927 cond = inst.cond;
10928 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10929
10930 if (in_it_block ())
10931 {
10932 /* Conditional branches inside IT blocks are encoded as unconditional
10933 branches. */
10934 cond = COND_ALWAYS;
10935 }
10936 else
10937 cond = inst.cond;
10938
10939 if (cond != COND_ALWAYS)
10940 opcode = T_MNEM_bcond;
10941 else
10942 opcode = inst.instruction;
10943
10944 if (unified_syntax
10945 && (inst.size_req == 4
10946 || (inst.size_req != 2
10947 && (inst.operands[0].hasreloc
10948 || inst.reloc.exp.X_op == O_constant))))
10949 {
10950 inst.instruction = THUMB_OP32(opcode);
10951 if (cond == COND_ALWAYS)
10952 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10953 else
10954 {
10955 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
10956 _("selected architecture does not support "
10957 "wide conditional branch instruction"));
10958
10959 gas_assert (cond != 0xF);
10960 inst.instruction |= cond << 22;
10961 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10962 }
10963 }
10964 else
10965 {
10966 inst.instruction = THUMB_OP16(opcode);
10967 if (cond == COND_ALWAYS)
10968 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10969 else
10970 {
10971 inst.instruction |= cond << 8;
10972 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10973 }
10974 /* Allow section relaxation. */
10975 if (unified_syntax && inst.size_req != 2)
10976 inst.relax = opcode;
10977 }
10978 inst.reloc.type = reloc;
10979 inst.reloc.pc_rel = 1;
10980 }
10981
10982 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10983 between the two is the maximum immediate allowed - which is passed in
10984 RANGE. */
10985 static void
10986 do_t_bkpt_hlt1 (int range)
10987 {
10988 constraint (inst.cond != COND_ALWAYS,
10989 _("instruction is always unconditional"));
10990 if (inst.operands[0].present)
10991 {
10992 constraint (inst.operands[0].imm > range,
10993 _("immediate value out of range"));
10994 inst.instruction |= inst.operands[0].imm;
10995 }
10996
10997 set_it_insn_type (NEUTRAL_IT_INSN);
10998 }
10999
11000 static void
11001 do_t_hlt (void)
11002 {
11003 do_t_bkpt_hlt1 (63);
11004 }
11005
11006 static void
11007 do_t_bkpt (void)
11008 {
11009 do_t_bkpt_hlt1 (255);
11010 }
11011
11012 static void
11013 do_t_branch23 (void)
11014 {
11015 set_it_insn_type_last ();
11016 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11017
11018 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11019 this file. We used to simply ignore the PLT reloc type here --
11020 the branch encoding is now needed to deal with TLSCALL relocs.
11021 So if we see a PLT reloc now, put it back to how it used to be to
11022 keep the preexisting behaviour. */
11023 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11024 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11025
11026 #if defined(OBJ_COFF)
11027 /* If the destination of the branch is a defined symbol which does not have
11028 the THUMB_FUNC attribute, then we must be calling a function which has
11029 the (interfacearm) attribute. We look for the Thumb entry point to that
11030 function and change the branch to refer to that function instead. */
11031 if ( inst.reloc.exp.X_op == O_symbol
11032 && inst.reloc.exp.X_add_symbol != NULL
11033 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11034 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11035 inst.reloc.exp.X_add_symbol =
11036 find_real_start (inst.reloc.exp.X_add_symbol);
11037 #endif
11038 }
11039
11040 static void
11041 do_t_bx (void)
11042 {
11043 set_it_insn_type_last ();
11044 inst.instruction |= inst.operands[0].reg << 3;
11045 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11046 should cause the alignment to be checked once it is known. This is
11047 because BX PC only works if the instruction is word aligned. */
11048 }
11049
11050 static void
11051 do_t_bxj (void)
11052 {
11053 int Rm;
11054
11055 set_it_insn_type_last ();
11056 Rm = inst.operands[0].reg;
11057 reject_bad_reg (Rm);
11058 inst.instruction |= Rm << 16;
11059 }
11060
11061 static void
11062 do_t_clz (void)
11063 {
11064 unsigned Rd;
11065 unsigned Rm;
11066
11067 Rd = inst.operands[0].reg;
11068 Rm = inst.operands[1].reg;
11069
11070 reject_bad_reg (Rd);
11071 reject_bad_reg (Rm);
11072
11073 inst.instruction |= Rd << 8;
11074 inst.instruction |= Rm << 16;
11075 inst.instruction |= Rm;
11076 }
11077
11078 static void
11079 do_t_cps (void)
11080 {
11081 set_it_insn_type (OUTSIDE_IT_INSN);
11082 inst.instruction |= inst.operands[0].imm;
11083 }
11084
11085 static void
11086 do_t_cpsi (void)
11087 {
11088 set_it_insn_type (OUTSIDE_IT_INSN);
11089 if (unified_syntax
11090 && (inst.operands[1].present || inst.size_req == 4)
11091 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11092 {
11093 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11094 inst.instruction = 0xf3af8000;
11095 inst.instruction |= imod << 9;
11096 inst.instruction |= inst.operands[0].imm << 5;
11097 if (inst.operands[1].present)
11098 inst.instruction |= 0x100 | inst.operands[1].imm;
11099 }
11100 else
11101 {
11102 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11103 && (inst.operands[0].imm & 4),
11104 _("selected processor does not support 'A' form "
11105 "of this instruction"));
11106 constraint (inst.operands[1].present || inst.size_req == 4,
11107 _("Thumb does not support the 2-argument "
11108 "form of this instruction"));
11109 inst.instruction |= inst.operands[0].imm;
11110 }
11111 }
11112
11113 /* THUMB CPY instruction (argument parse). */
11114
11115 static void
11116 do_t_cpy (void)
11117 {
11118 if (inst.size_req == 4)
11119 {
11120 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11121 inst.instruction |= inst.operands[0].reg << 8;
11122 inst.instruction |= inst.operands[1].reg;
11123 }
11124 else
11125 {
11126 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11127 inst.instruction |= (inst.operands[0].reg & 0x7);
11128 inst.instruction |= inst.operands[1].reg << 3;
11129 }
11130 }
11131
11132 static void
11133 do_t_cbz (void)
11134 {
11135 set_it_insn_type (OUTSIDE_IT_INSN);
11136 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11137 inst.instruction |= inst.operands[0].reg;
11138 inst.reloc.pc_rel = 1;
11139 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11140 }
11141
11142 static void
11143 do_t_dbg (void)
11144 {
11145 inst.instruction |= inst.operands[0].imm;
11146 }
11147
11148 static void
11149 do_t_div (void)
11150 {
11151 unsigned Rd, Rn, Rm;
11152
11153 Rd = inst.operands[0].reg;
11154 Rn = (inst.operands[1].present
11155 ? inst.operands[1].reg : Rd);
11156 Rm = inst.operands[2].reg;
11157
11158 reject_bad_reg (Rd);
11159 reject_bad_reg (Rn);
11160 reject_bad_reg (Rm);
11161
11162 inst.instruction |= Rd << 8;
11163 inst.instruction |= Rn << 16;
11164 inst.instruction |= Rm;
11165 }
11166
11167 static void
11168 do_t_hint (void)
11169 {
11170 if (unified_syntax && inst.size_req == 4)
11171 inst.instruction = THUMB_OP32 (inst.instruction);
11172 else
11173 inst.instruction = THUMB_OP16 (inst.instruction);
11174 }
11175
11176 static void
11177 do_t_it (void)
11178 {
11179 unsigned int cond = inst.operands[0].imm;
11180
11181 set_it_insn_type (IT_INSN);
11182 now_it.mask = (inst.instruction & 0xf) | 0x10;
11183 now_it.cc = cond;
11184 now_it.warn_deprecated = FALSE;
11185
11186 /* If the condition is a negative condition, invert the mask. */
11187 if ((cond & 0x1) == 0x0)
11188 {
11189 unsigned int mask = inst.instruction & 0x000f;
11190
11191 if ((mask & 0x7) == 0)
11192 {
11193 /* No conversion needed. */
11194 now_it.block_length = 1;
11195 }
11196 else if ((mask & 0x3) == 0)
11197 {
11198 mask ^= 0x8;
11199 now_it.block_length = 2;
11200 }
11201 else if ((mask & 0x1) == 0)
11202 {
11203 mask ^= 0xC;
11204 now_it.block_length = 3;
11205 }
11206 else
11207 {
11208 mask ^= 0xE;
11209 now_it.block_length = 4;
11210 }
11211
11212 inst.instruction &= 0xfff0;
11213 inst.instruction |= mask;
11214 }
11215
11216 inst.instruction |= cond << 4;
11217 }
11218
11219 /* Helper function used for both push/pop and ldm/stm. */
11220 static void
11221 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11222 {
11223 bfd_boolean load;
11224
11225 load = (inst.instruction & (1 << 20)) != 0;
11226
11227 if (mask & (1 << 13))
11228 inst.error = _("SP not allowed in register list");
11229
11230 if ((mask & (1 << base)) != 0
11231 && writeback)
11232 inst.error = _("having the base register in the register list when "
11233 "using write back is UNPREDICTABLE");
11234
11235 if (load)
11236 {
11237 if (mask & (1 << 15))
11238 {
11239 if (mask & (1 << 14))
11240 inst.error = _("LR and PC should not both be in register list");
11241 else
11242 set_it_insn_type_last ();
11243 }
11244 }
11245 else
11246 {
11247 if (mask & (1 << 15))
11248 inst.error = _("PC not allowed in register list");
11249 }
11250
11251 if ((mask & (mask - 1)) == 0)
11252 {
11253 /* Single register transfers implemented as str/ldr. */
11254 if (writeback)
11255 {
11256 if (inst.instruction & (1 << 23))
11257 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11258 else
11259 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11260 }
11261 else
11262 {
11263 if (inst.instruction & (1 << 23))
11264 inst.instruction = 0x00800000; /* ia -> [base] */
11265 else
11266 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11267 }
11268
11269 inst.instruction |= 0xf8400000;
11270 if (load)
11271 inst.instruction |= 0x00100000;
11272
11273 mask = ffs (mask) - 1;
11274 mask <<= 12;
11275 }
11276 else if (writeback)
11277 inst.instruction |= WRITE_BACK;
11278
11279 inst.instruction |= mask;
11280 inst.instruction |= base << 16;
11281 }
11282
11283 static void
11284 do_t_ldmstm (void)
11285 {
11286 /* This really doesn't seem worth it. */
11287 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11288 _("expression too complex"));
11289 constraint (inst.operands[1].writeback,
11290 _("Thumb load/store multiple does not support {reglist}^"));
11291
11292 if (unified_syntax)
11293 {
11294 bfd_boolean narrow;
11295 unsigned mask;
11296
11297 narrow = FALSE;
11298 /* See if we can use a 16-bit instruction. */
11299 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11300 && inst.size_req != 4
11301 && !(inst.operands[1].imm & ~0xff))
11302 {
11303 mask = 1 << inst.operands[0].reg;
11304
11305 if (inst.operands[0].reg <= 7)
11306 {
11307 if (inst.instruction == T_MNEM_stmia
11308 ? inst.operands[0].writeback
11309 : (inst.operands[0].writeback
11310 == !(inst.operands[1].imm & mask)))
11311 {
11312 if (inst.instruction == T_MNEM_stmia
11313 && (inst.operands[1].imm & mask)
11314 && (inst.operands[1].imm & (mask - 1)))
11315 as_warn (_("value stored for r%d is UNKNOWN"),
11316 inst.operands[0].reg);
11317
11318 inst.instruction = THUMB_OP16 (inst.instruction);
11319 inst.instruction |= inst.operands[0].reg << 8;
11320 inst.instruction |= inst.operands[1].imm;
11321 narrow = TRUE;
11322 }
11323 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11324 {
11325 /* This means 1 register in reg list one of 3 situations:
11326 1. Instruction is stmia, but without writeback.
11327 2. lmdia without writeback, but with Rn not in
11328 reglist.
11329 3. ldmia with writeback, but with Rn in reglist.
11330 Case 3 is UNPREDICTABLE behaviour, so we handle
11331 case 1 and 2 which can be converted into a 16-bit
11332 str or ldr. The SP cases are handled below. */
11333 unsigned long opcode;
11334 /* First, record an error for Case 3. */
11335 if (inst.operands[1].imm & mask
11336 && inst.operands[0].writeback)
11337 inst.error =
11338 _("having the base register in the register list when "
11339 "using write back is UNPREDICTABLE");
11340
11341 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11342 : T_MNEM_ldr);
11343 inst.instruction = THUMB_OP16 (opcode);
11344 inst.instruction |= inst.operands[0].reg << 3;
11345 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11346 narrow = TRUE;
11347 }
11348 }
11349 else if (inst.operands[0] .reg == REG_SP)
11350 {
11351 if (inst.operands[0].writeback)
11352 {
11353 inst.instruction =
11354 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11355 ? T_MNEM_push : T_MNEM_pop);
11356 inst.instruction |= inst.operands[1].imm;
11357 narrow = TRUE;
11358 }
11359 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11360 {
11361 inst.instruction =
11362 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11363 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11364 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11365 narrow = TRUE;
11366 }
11367 }
11368 }
11369
11370 if (!narrow)
11371 {
11372 if (inst.instruction < 0xffff)
11373 inst.instruction = THUMB_OP32 (inst.instruction);
11374
11375 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11376 inst.operands[0].writeback);
11377 }
11378 }
11379 else
11380 {
11381 constraint (inst.operands[0].reg > 7
11382 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11383 constraint (inst.instruction != T_MNEM_ldmia
11384 && inst.instruction != T_MNEM_stmia,
11385 _("Thumb-2 instruction only valid in unified syntax"));
11386 if (inst.instruction == T_MNEM_stmia)
11387 {
11388 if (!inst.operands[0].writeback)
11389 as_warn (_("this instruction will write back the base register"));
11390 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11391 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11392 as_warn (_("value stored for r%d is UNKNOWN"),
11393 inst.operands[0].reg);
11394 }
11395 else
11396 {
11397 if (!inst.operands[0].writeback
11398 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11399 as_warn (_("this instruction will write back the base register"));
11400 else if (inst.operands[0].writeback
11401 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11402 as_warn (_("this instruction will not write back the base register"));
11403 }
11404
11405 inst.instruction = THUMB_OP16 (inst.instruction);
11406 inst.instruction |= inst.operands[0].reg << 8;
11407 inst.instruction |= inst.operands[1].imm;
11408 }
11409 }
11410
11411 static void
11412 do_t_ldrex (void)
11413 {
11414 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11415 || inst.operands[1].postind || inst.operands[1].writeback
11416 || inst.operands[1].immisreg || inst.operands[1].shifted
11417 || inst.operands[1].negative,
11418 BAD_ADDR_MODE);
11419
11420 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11421
11422 inst.instruction |= inst.operands[0].reg << 12;
11423 inst.instruction |= inst.operands[1].reg << 16;
11424 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11425 }
11426
11427 static void
11428 do_t_ldrexd (void)
11429 {
11430 if (!inst.operands[1].present)
11431 {
11432 constraint (inst.operands[0].reg == REG_LR,
11433 _("r14 not allowed as first register "
11434 "when second register is omitted"));
11435 inst.operands[1].reg = inst.operands[0].reg + 1;
11436 }
11437 constraint (inst.operands[0].reg == inst.operands[1].reg,
11438 BAD_OVERLAP);
11439
11440 inst.instruction |= inst.operands[0].reg << 12;
11441 inst.instruction |= inst.operands[1].reg << 8;
11442 inst.instruction |= inst.operands[2].reg << 16;
11443 }
11444
11445 static void
11446 do_t_ldst (void)
11447 {
11448 unsigned long opcode;
11449 int Rn;
11450
11451 if (inst.operands[0].isreg
11452 && !inst.operands[0].preind
11453 && inst.operands[0].reg == REG_PC)
11454 set_it_insn_type_last ();
11455
11456 opcode = inst.instruction;
11457 if (unified_syntax)
11458 {
11459 if (!inst.operands[1].isreg)
11460 {
11461 if (opcode <= 0xffff)
11462 inst.instruction = THUMB_OP32 (opcode);
11463 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11464 return;
11465 }
11466 if (inst.operands[1].isreg
11467 && !inst.operands[1].writeback
11468 && !inst.operands[1].shifted && !inst.operands[1].postind
11469 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11470 && opcode <= 0xffff
11471 && inst.size_req != 4)
11472 {
11473 /* Insn may have a 16-bit form. */
11474 Rn = inst.operands[1].reg;
11475 if (inst.operands[1].immisreg)
11476 {
11477 inst.instruction = THUMB_OP16 (opcode);
11478 /* [Rn, Rik] */
11479 if (Rn <= 7 && inst.operands[1].imm <= 7)
11480 goto op16;
11481 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11482 reject_bad_reg (inst.operands[1].imm);
11483 }
11484 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11485 && opcode != T_MNEM_ldrsb)
11486 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11487 || (Rn == REG_SP && opcode == T_MNEM_str))
11488 {
11489 /* [Rn, #const] */
11490 if (Rn > 7)
11491 {
11492 if (Rn == REG_PC)
11493 {
11494 if (inst.reloc.pc_rel)
11495 opcode = T_MNEM_ldr_pc2;
11496 else
11497 opcode = T_MNEM_ldr_pc;
11498 }
11499 else
11500 {
11501 if (opcode == T_MNEM_ldr)
11502 opcode = T_MNEM_ldr_sp;
11503 else
11504 opcode = T_MNEM_str_sp;
11505 }
11506 inst.instruction = inst.operands[0].reg << 8;
11507 }
11508 else
11509 {
11510 inst.instruction = inst.operands[0].reg;
11511 inst.instruction |= inst.operands[1].reg << 3;
11512 }
11513 inst.instruction |= THUMB_OP16 (opcode);
11514 if (inst.size_req == 2)
11515 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11516 else
11517 inst.relax = opcode;
11518 return;
11519 }
11520 }
11521 /* Definitely a 32-bit variant. */
11522
11523 /* Warning for Erratum 752419. */
11524 if (opcode == T_MNEM_ldr
11525 && inst.operands[0].reg == REG_SP
11526 && inst.operands[1].writeback == 1
11527 && !inst.operands[1].immisreg)
11528 {
11529 if (no_cpu_selected ()
11530 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11531 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11532 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11533 as_warn (_("This instruction may be unpredictable "
11534 "if executed on M-profile cores "
11535 "with interrupts enabled."));
11536 }
11537
11538 /* Do some validations regarding addressing modes. */
11539 if (inst.operands[1].immisreg)
11540 reject_bad_reg (inst.operands[1].imm);
11541
11542 constraint (inst.operands[1].writeback == 1
11543 && inst.operands[0].reg == inst.operands[1].reg,
11544 BAD_OVERLAP);
11545
11546 inst.instruction = THUMB_OP32 (opcode);
11547 inst.instruction |= inst.operands[0].reg << 12;
11548 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11549 check_ldr_r15_aligned ();
11550 return;
11551 }
11552
11553 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11554
11555 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11556 {
11557 /* Only [Rn,Rm] is acceptable. */
11558 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11559 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11560 || inst.operands[1].postind || inst.operands[1].shifted
11561 || inst.operands[1].negative,
11562 _("Thumb does not support this addressing mode"));
11563 inst.instruction = THUMB_OP16 (inst.instruction);
11564 goto op16;
11565 }
11566
11567 inst.instruction = THUMB_OP16 (inst.instruction);
11568 if (!inst.operands[1].isreg)
11569 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11570 return;
11571
11572 constraint (!inst.operands[1].preind
11573 || inst.operands[1].shifted
11574 || inst.operands[1].writeback,
11575 _("Thumb does not support this addressing mode"));
11576 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11577 {
11578 constraint (inst.instruction & 0x0600,
11579 _("byte or halfword not valid for base register"));
11580 constraint (inst.operands[1].reg == REG_PC
11581 && !(inst.instruction & THUMB_LOAD_BIT),
11582 _("r15 based store not allowed"));
11583 constraint (inst.operands[1].immisreg,
11584 _("invalid base register for register offset"));
11585
11586 if (inst.operands[1].reg == REG_PC)
11587 inst.instruction = T_OPCODE_LDR_PC;
11588 else if (inst.instruction & THUMB_LOAD_BIT)
11589 inst.instruction = T_OPCODE_LDR_SP;
11590 else
11591 inst.instruction = T_OPCODE_STR_SP;
11592
11593 inst.instruction |= inst.operands[0].reg << 8;
11594 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11595 return;
11596 }
11597
11598 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11599 if (!inst.operands[1].immisreg)
11600 {
11601 /* Immediate offset. */
11602 inst.instruction |= inst.operands[0].reg;
11603 inst.instruction |= inst.operands[1].reg << 3;
11604 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11605 return;
11606 }
11607
11608 /* Register offset. */
11609 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11610 constraint (inst.operands[1].negative,
11611 _("Thumb does not support this addressing mode"));
11612
11613 op16:
11614 switch (inst.instruction)
11615 {
11616 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11617 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11618 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11619 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11620 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11621 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11622 case 0x5600 /* ldrsb */:
11623 case 0x5e00 /* ldrsh */: break;
11624 default: abort ();
11625 }
11626
11627 inst.instruction |= inst.operands[0].reg;
11628 inst.instruction |= inst.operands[1].reg << 3;
11629 inst.instruction |= inst.operands[1].imm << 6;
11630 }
11631
11632 static void
11633 do_t_ldstd (void)
11634 {
11635 if (!inst.operands[1].present)
11636 {
11637 inst.operands[1].reg = inst.operands[0].reg + 1;
11638 constraint (inst.operands[0].reg == REG_LR,
11639 _("r14 not allowed here"));
11640 constraint (inst.operands[0].reg == REG_R12,
11641 _("r12 not allowed here"));
11642 }
11643
11644 if (inst.operands[2].writeback
11645 && (inst.operands[0].reg == inst.operands[2].reg
11646 || inst.operands[1].reg == inst.operands[2].reg))
11647 as_warn (_("base register written back, and overlaps "
11648 "one of transfer registers"));
11649
11650 inst.instruction |= inst.operands[0].reg << 12;
11651 inst.instruction |= inst.operands[1].reg << 8;
11652 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11653 }
11654
11655 static void
11656 do_t_ldstt (void)
11657 {
11658 inst.instruction |= inst.operands[0].reg << 12;
11659 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11660 }
11661
11662 static void
11663 do_t_mla (void)
11664 {
11665 unsigned Rd, Rn, Rm, Ra;
11666
11667 Rd = inst.operands[0].reg;
11668 Rn = inst.operands[1].reg;
11669 Rm = inst.operands[2].reg;
11670 Ra = inst.operands[3].reg;
11671
11672 reject_bad_reg (Rd);
11673 reject_bad_reg (Rn);
11674 reject_bad_reg (Rm);
11675 reject_bad_reg (Ra);
11676
11677 inst.instruction |= Rd << 8;
11678 inst.instruction |= Rn << 16;
11679 inst.instruction |= Rm;
11680 inst.instruction |= Ra << 12;
11681 }
11682
11683 static void
11684 do_t_mlal (void)
11685 {
11686 unsigned RdLo, RdHi, Rn, Rm;
11687
11688 RdLo = inst.operands[0].reg;
11689 RdHi = inst.operands[1].reg;
11690 Rn = inst.operands[2].reg;
11691 Rm = inst.operands[3].reg;
11692
11693 reject_bad_reg (RdLo);
11694 reject_bad_reg (RdHi);
11695 reject_bad_reg (Rn);
11696 reject_bad_reg (Rm);
11697
11698 inst.instruction |= RdLo << 12;
11699 inst.instruction |= RdHi << 8;
11700 inst.instruction |= Rn << 16;
11701 inst.instruction |= Rm;
11702 }
11703
11704 static void
11705 do_t_mov_cmp (void)
11706 {
11707 unsigned Rn, Rm;
11708
11709 Rn = inst.operands[0].reg;
11710 Rm = inst.operands[1].reg;
11711
11712 if (Rn == REG_PC)
11713 set_it_insn_type_last ();
11714
11715 if (unified_syntax)
11716 {
11717 int r0off = (inst.instruction == T_MNEM_mov
11718 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11719 unsigned long opcode;
11720 bfd_boolean narrow;
11721 bfd_boolean low_regs;
11722
11723 low_regs = (Rn <= 7 && Rm <= 7);
11724 opcode = inst.instruction;
11725 if (in_it_block ())
11726 narrow = opcode != T_MNEM_movs;
11727 else
11728 narrow = opcode != T_MNEM_movs || low_regs;
11729 if (inst.size_req == 4
11730 || inst.operands[1].shifted)
11731 narrow = FALSE;
11732
11733 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11734 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11735 && !inst.operands[1].shifted
11736 && Rn == REG_PC
11737 && Rm == REG_LR)
11738 {
11739 inst.instruction = T2_SUBS_PC_LR;
11740 return;
11741 }
11742
11743 if (opcode == T_MNEM_cmp)
11744 {
11745 constraint (Rn == REG_PC, BAD_PC);
11746 if (narrow)
11747 {
11748 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11749 but valid. */
11750 warn_deprecated_sp (Rm);
11751 /* R15 was documented as a valid choice for Rm in ARMv6,
11752 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11753 tools reject R15, so we do too. */
11754 constraint (Rm == REG_PC, BAD_PC);
11755 }
11756 else
11757 reject_bad_reg (Rm);
11758 }
11759 else if (opcode == T_MNEM_mov
11760 || opcode == T_MNEM_movs)
11761 {
11762 if (inst.operands[1].isreg)
11763 {
11764 if (opcode == T_MNEM_movs)
11765 {
11766 reject_bad_reg (Rn);
11767 reject_bad_reg (Rm);
11768 }
11769 else if (narrow)
11770 {
11771 /* This is mov.n. */
11772 if ((Rn == REG_SP || Rn == REG_PC)
11773 && (Rm == REG_SP || Rm == REG_PC))
11774 {
11775 as_tsktsk (_("Use of r%u as a source register is "
11776 "deprecated when r%u is the destination "
11777 "register."), Rm, Rn);
11778 }
11779 }
11780 else
11781 {
11782 /* This is mov.w. */
11783 constraint (Rn == REG_PC, BAD_PC);
11784 constraint (Rm == REG_PC, BAD_PC);
11785 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11786 }
11787 }
11788 else
11789 reject_bad_reg (Rn);
11790 }
11791
11792 if (!inst.operands[1].isreg)
11793 {
11794 /* Immediate operand. */
11795 if (!in_it_block () && opcode == T_MNEM_mov)
11796 narrow = 0;
11797 if (low_regs && narrow)
11798 {
11799 inst.instruction = THUMB_OP16 (opcode);
11800 inst.instruction |= Rn << 8;
11801 if (inst.size_req == 2)
11802 {
11803 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11804 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
11805 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11806 }
11807 else
11808 inst.relax = opcode;
11809 }
11810 else
11811 {
11812 inst.instruction = THUMB_OP32 (inst.instruction);
11813 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11814 inst.instruction |= Rn << r0off;
11815 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11816 }
11817 }
11818 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11819 && (inst.instruction == T_MNEM_mov
11820 || inst.instruction == T_MNEM_movs))
11821 {
11822 /* Register shifts are encoded as separate shift instructions. */
11823 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11824
11825 if (in_it_block ())
11826 narrow = !flags;
11827 else
11828 narrow = flags;
11829
11830 if (inst.size_req == 4)
11831 narrow = FALSE;
11832
11833 if (!low_regs || inst.operands[1].imm > 7)
11834 narrow = FALSE;
11835
11836 if (Rn != Rm)
11837 narrow = FALSE;
11838
11839 switch (inst.operands[1].shift_kind)
11840 {
11841 case SHIFT_LSL:
11842 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11843 break;
11844 case SHIFT_ASR:
11845 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11846 break;
11847 case SHIFT_LSR:
11848 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11849 break;
11850 case SHIFT_ROR:
11851 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11852 break;
11853 default:
11854 abort ();
11855 }
11856
11857 inst.instruction = opcode;
11858 if (narrow)
11859 {
11860 inst.instruction |= Rn;
11861 inst.instruction |= inst.operands[1].imm << 3;
11862 }
11863 else
11864 {
11865 if (flags)
11866 inst.instruction |= CONDS_BIT;
11867
11868 inst.instruction |= Rn << 8;
11869 inst.instruction |= Rm << 16;
11870 inst.instruction |= inst.operands[1].imm;
11871 }
11872 }
11873 else if (!narrow)
11874 {
11875 /* Some mov with immediate shift have narrow variants.
11876 Register shifts are handled above. */
11877 if (low_regs && inst.operands[1].shifted
11878 && (inst.instruction == T_MNEM_mov
11879 || inst.instruction == T_MNEM_movs))
11880 {
11881 if (in_it_block ())
11882 narrow = (inst.instruction == T_MNEM_mov);
11883 else
11884 narrow = (inst.instruction == T_MNEM_movs);
11885 }
11886
11887 if (narrow)
11888 {
11889 switch (inst.operands[1].shift_kind)
11890 {
11891 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11892 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11893 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11894 default: narrow = FALSE; break;
11895 }
11896 }
11897
11898 if (narrow)
11899 {
11900 inst.instruction |= Rn;
11901 inst.instruction |= Rm << 3;
11902 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11903 }
11904 else
11905 {
11906 inst.instruction = THUMB_OP32 (inst.instruction);
11907 inst.instruction |= Rn << r0off;
11908 encode_thumb32_shifted_operand (1);
11909 }
11910 }
11911 else
11912 switch (inst.instruction)
11913 {
11914 case T_MNEM_mov:
11915 /* In v4t or v5t a move of two lowregs produces unpredictable
11916 results. Don't allow this. */
11917 if (low_regs)
11918 {
11919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11920 "MOV Rd, Rs with two low registers is not "
11921 "permitted on this architecture");
11922 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11923 arm_ext_v6);
11924 }
11925
11926 inst.instruction = T_OPCODE_MOV_HR;
11927 inst.instruction |= (Rn & 0x8) << 4;
11928 inst.instruction |= (Rn & 0x7);
11929 inst.instruction |= Rm << 3;
11930 break;
11931
11932 case T_MNEM_movs:
11933 /* We know we have low registers at this point.
11934 Generate LSLS Rd, Rs, #0. */
11935 inst.instruction = T_OPCODE_LSL_I;
11936 inst.instruction |= Rn;
11937 inst.instruction |= Rm << 3;
11938 break;
11939
11940 case T_MNEM_cmp:
11941 if (low_regs)
11942 {
11943 inst.instruction = T_OPCODE_CMP_LR;
11944 inst.instruction |= Rn;
11945 inst.instruction |= Rm << 3;
11946 }
11947 else
11948 {
11949 inst.instruction = T_OPCODE_CMP_HR;
11950 inst.instruction |= (Rn & 0x8) << 4;
11951 inst.instruction |= (Rn & 0x7);
11952 inst.instruction |= Rm << 3;
11953 }
11954 break;
11955 }
11956 return;
11957 }
11958
11959 inst.instruction = THUMB_OP16 (inst.instruction);
11960
11961 /* PR 10443: Do not silently ignore shifted operands. */
11962 constraint (inst.operands[1].shifted,
11963 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11964
11965 if (inst.operands[1].isreg)
11966 {
11967 if (Rn < 8 && Rm < 8)
11968 {
11969 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11970 since a MOV instruction produces unpredictable results. */
11971 if (inst.instruction == T_OPCODE_MOV_I8)
11972 inst.instruction = T_OPCODE_ADD_I3;
11973 else
11974 inst.instruction = T_OPCODE_CMP_LR;
11975
11976 inst.instruction |= Rn;
11977 inst.instruction |= Rm << 3;
11978 }
11979 else
11980 {
11981 if (inst.instruction == T_OPCODE_MOV_I8)
11982 inst.instruction = T_OPCODE_MOV_HR;
11983 else
11984 inst.instruction = T_OPCODE_CMP_HR;
11985 do_t_cpy ();
11986 }
11987 }
11988 else
11989 {
11990 constraint (Rn > 7,
11991 _("only lo regs allowed with immediate"));
11992 inst.instruction |= Rn << 8;
11993 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11994 }
11995 }
11996
11997 static void
11998 do_t_mov16 (void)
11999 {
12000 unsigned Rd;
12001 bfd_vma imm;
12002 bfd_boolean top;
12003
12004 top = (inst.instruction & 0x00800000) != 0;
12005 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12006 {
12007 constraint (top, _(":lower16: not allowed this instruction"));
12008 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12009 }
12010 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12011 {
12012 constraint (!top, _(":upper16: not allowed this instruction"));
12013 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12014 }
12015
12016 Rd = inst.operands[0].reg;
12017 reject_bad_reg (Rd);
12018
12019 inst.instruction |= Rd << 8;
12020 if (inst.reloc.type == BFD_RELOC_UNUSED)
12021 {
12022 imm = inst.reloc.exp.X_add_number;
12023 inst.instruction |= (imm & 0xf000) << 4;
12024 inst.instruction |= (imm & 0x0800) << 15;
12025 inst.instruction |= (imm & 0x0700) << 4;
12026 inst.instruction |= (imm & 0x00ff);
12027 }
12028 }
12029
12030 static void
12031 do_t_mvn_tst (void)
12032 {
12033 unsigned Rn, Rm;
12034
12035 Rn = inst.operands[0].reg;
12036 Rm = inst.operands[1].reg;
12037
12038 if (inst.instruction == T_MNEM_cmp
12039 || inst.instruction == T_MNEM_cmn)
12040 constraint (Rn == REG_PC, BAD_PC);
12041 else
12042 reject_bad_reg (Rn);
12043 reject_bad_reg (Rm);
12044
12045 if (unified_syntax)
12046 {
12047 int r0off = (inst.instruction == T_MNEM_mvn
12048 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12049 bfd_boolean narrow;
12050
12051 if (inst.size_req == 4
12052 || inst.instruction > 0xffff
12053 || inst.operands[1].shifted
12054 || Rn > 7 || Rm > 7)
12055 narrow = FALSE;
12056 else if (inst.instruction == T_MNEM_cmn
12057 || inst.instruction == T_MNEM_tst)
12058 narrow = TRUE;
12059 else if (THUMB_SETS_FLAGS (inst.instruction))
12060 narrow = !in_it_block ();
12061 else
12062 narrow = in_it_block ();
12063
12064 if (!inst.operands[1].isreg)
12065 {
12066 /* For an immediate, we always generate a 32-bit opcode;
12067 section relaxation will shrink it later if possible. */
12068 if (inst.instruction < 0xffff)
12069 inst.instruction = THUMB_OP32 (inst.instruction);
12070 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12071 inst.instruction |= Rn << r0off;
12072 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12073 }
12074 else
12075 {
12076 /* See if we can do this with a 16-bit instruction. */
12077 if (narrow)
12078 {
12079 inst.instruction = THUMB_OP16 (inst.instruction);
12080 inst.instruction |= Rn;
12081 inst.instruction |= Rm << 3;
12082 }
12083 else
12084 {
12085 constraint (inst.operands[1].shifted
12086 && inst.operands[1].immisreg,
12087 _("shift must be constant"));
12088 if (inst.instruction < 0xffff)
12089 inst.instruction = THUMB_OP32 (inst.instruction);
12090 inst.instruction |= Rn << r0off;
12091 encode_thumb32_shifted_operand (1);
12092 }
12093 }
12094 }
12095 else
12096 {
12097 constraint (inst.instruction > 0xffff
12098 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12099 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12100 _("unshifted register required"));
12101 constraint (Rn > 7 || Rm > 7,
12102 BAD_HIREG);
12103
12104 inst.instruction = THUMB_OP16 (inst.instruction);
12105 inst.instruction |= Rn;
12106 inst.instruction |= Rm << 3;
12107 }
12108 }
12109
12110 static void
12111 do_t_mrs (void)
12112 {
12113 unsigned Rd;
12114
12115 if (do_vfp_nsyn_mrs () == SUCCESS)
12116 return;
12117
12118 Rd = inst.operands[0].reg;
12119 reject_bad_reg (Rd);
12120 inst.instruction |= Rd << 8;
12121
12122 if (inst.operands[1].isreg)
12123 {
12124 unsigned br = inst.operands[1].reg;
12125 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12126 as_bad (_("bad register for mrs"));
12127
12128 inst.instruction |= br & (0xf << 16);
12129 inst.instruction |= (br & 0x300) >> 4;
12130 inst.instruction |= (br & SPSR_BIT) >> 2;
12131 }
12132 else
12133 {
12134 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12135
12136 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12137 {
12138 /* PR gas/12698: The constraint is only applied for m_profile.
12139 If the user has specified -march=all, we want to ignore it as
12140 we are building for any CPU type, including non-m variants. */
12141 bfd_boolean m_profile =
12142 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12143 constraint ((flags != 0) && m_profile, _("selected processor does "
12144 "not support requested special purpose register"));
12145 }
12146 else
12147 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12148 devices). */
12149 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12150 _("'APSR', 'CPSR' or 'SPSR' expected"));
12151
12152 inst.instruction |= (flags & SPSR_BIT) >> 2;
12153 inst.instruction |= inst.operands[1].imm & 0xff;
12154 inst.instruction |= 0xf0000;
12155 }
12156 }
12157
12158 static void
12159 do_t_msr (void)
12160 {
12161 int flags;
12162 unsigned Rn;
12163
12164 if (do_vfp_nsyn_msr () == SUCCESS)
12165 return;
12166
12167 constraint (!inst.operands[1].isreg,
12168 _("Thumb encoding does not support an immediate here"));
12169
12170 if (inst.operands[0].isreg)
12171 flags = (int)(inst.operands[0].reg);
12172 else
12173 flags = inst.operands[0].imm;
12174
12175 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12176 {
12177 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12178
12179 /* PR gas/12698: The constraint is only applied for m_profile.
12180 If the user has specified -march=all, we want to ignore it as
12181 we are building for any CPU type, including non-m variants. */
12182 bfd_boolean m_profile =
12183 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12184 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12185 && (bits & ~(PSR_s | PSR_f)) != 0)
12186 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12187 && bits != PSR_f)) && m_profile,
12188 _("selected processor does not support requested special "
12189 "purpose register"));
12190 }
12191 else
12192 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12193 "requested special purpose register"));
12194
12195 Rn = inst.operands[1].reg;
12196 reject_bad_reg (Rn);
12197
12198 inst.instruction |= (flags & SPSR_BIT) >> 2;
12199 inst.instruction |= (flags & 0xf0000) >> 8;
12200 inst.instruction |= (flags & 0x300) >> 4;
12201 inst.instruction |= (flags & 0xff);
12202 inst.instruction |= Rn << 16;
12203 }
12204
12205 static void
12206 do_t_mul (void)
12207 {
12208 bfd_boolean narrow;
12209 unsigned Rd, Rn, Rm;
12210
12211 if (!inst.operands[2].present)
12212 inst.operands[2].reg = inst.operands[0].reg;
12213
12214 Rd = inst.operands[0].reg;
12215 Rn = inst.operands[1].reg;
12216 Rm = inst.operands[2].reg;
12217
12218 if (unified_syntax)
12219 {
12220 if (inst.size_req == 4
12221 || (Rd != Rn
12222 && Rd != Rm)
12223 || Rn > 7
12224 || Rm > 7)
12225 narrow = FALSE;
12226 else if (inst.instruction == T_MNEM_muls)
12227 narrow = !in_it_block ();
12228 else
12229 narrow = in_it_block ();
12230 }
12231 else
12232 {
12233 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12234 constraint (Rn > 7 || Rm > 7,
12235 BAD_HIREG);
12236 narrow = TRUE;
12237 }
12238
12239 if (narrow)
12240 {
12241 /* 16-bit MULS/Conditional MUL. */
12242 inst.instruction = THUMB_OP16 (inst.instruction);
12243 inst.instruction |= Rd;
12244
12245 if (Rd == Rn)
12246 inst.instruction |= Rm << 3;
12247 else if (Rd == Rm)
12248 inst.instruction |= Rn << 3;
12249 else
12250 constraint (1, _("dest must overlap one source register"));
12251 }
12252 else
12253 {
12254 constraint (inst.instruction != T_MNEM_mul,
12255 _("Thumb-2 MUL must not set flags"));
12256 /* 32-bit MUL. */
12257 inst.instruction = THUMB_OP32 (inst.instruction);
12258 inst.instruction |= Rd << 8;
12259 inst.instruction |= Rn << 16;
12260 inst.instruction |= Rm << 0;
12261
12262 reject_bad_reg (Rd);
12263 reject_bad_reg (Rn);
12264 reject_bad_reg (Rm);
12265 }
12266 }
12267
12268 static void
12269 do_t_mull (void)
12270 {
12271 unsigned RdLo, RdHi, Rn, Rm;
12272
12273 RdLo = inst.operands[0].reg;
12274 RdHi = inst.operands[1].reg;
12275 Rn = inst.operands[2].reg;
12276 Rm = inst.operands[3].reg;
12277
12278 reject_bad_reg (RdLo);
12279 reject_bad_reg (RdHi);
12280 reject_bad_reg (Rn);
12281 reject_bad_reg (Rm);
12282
12283 inst.instruction |= RdLo << 12;
12284 inst.instruction |= RdHi << 8;
12285 inst.instruction |= Rn << 16;
12286 inst.instruction |= Rm;
12287
12288 if (RdLo == RdHi)
12289 as_tsktsk (_("rdhi and rdlo must be different"));
12290 }
12291
12292 static void
12293 do_t_nop (void)
12294 {
12295 set_it_insn_type (NEUTRAL_IT_INSN);
12296
12297 if (unified_syntax)
12298 {
12299 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12300 {
12301 inst.instruction = THUMB_OP32 (inst.instruction);
12302 inst.instruction |= inst.operands[0].imm;
12303 }
12304 else
12305 {
12306 /* PR9722: Check for Thumb2 availability before
12307 generating a thumb2 nop instruction. */
12308 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12309 {
12310 inst.instruction = THUMB_OP16 (inst.instruction);
12311 inst.instruction |= inst.operands[0].imm << 4;
12312 }
12313 else
12314 inst.instruction = 0x46c0;
12315 }
12316 }
12317 else
12318 {
12319 constraint (inst.operands[0].present,
12320 _("Thumb does not support NOP with hints"));
12321 inst.instruction = 0x46c0;
12322 }
12323 }
12324
12325 static void
12326 do_t_neg (void)
12327 {
12328 if (unified_syntax)
12329 {
12330 bfd_boolean narrow;
12331
12332 if (THUMB_SETS_FLAGS (inst.instruction))
12333 narrow = !in_it_block ();
12334 else
12335 narrow = in_it_block ();
12336 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12337 narrow = FALSE;
12338 if (inst.size_req == 4)
12339 narrow = FALSE;
12340
12341 if (!narrow)
12342 {
12343 inst.instruction = THUMB_OP32 (inst.instruction);
12344 inst.instruction |= inst.operands[0].reg << 8;
12345 inst.instruction |= inst.operands[1].reg << 16;
12346 }
12347 else
12348 {
12349 inst.instruction = THUMB_OP16 (inst.instruction);
12350 inst.instruction |= inst.operands[0].reg;
12351 inst.instruction |= inst.operands[1].reg << 3;
12352 }
12353 }
12354 else
12355 {
12356 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12357 BAD_HIREG);
12358 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12359
12360 inst.instruction = THUMB_OP16 (inst.instruction);
12361 inst.instruction |= inst.operands[0].reg;
12362 inst.instruction |= inst.operands[1].reg << 3;
12363 }
12364 }
12365
12366 static void
12367 do_t_orn (void)
12368 {
12369 unsigned Rd, Rn;
12370
12371 Rd = inst.operands[0].reg;
12372 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12373
12374 reject_bad_reg (Rd);
12375 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12376 reject_bad_reg (Rn);
12377
12378 inst.instruction |= Rd << 8;
12379 inst.instruction |= Rn << 16;
12380
12381 if (!inst.operands[2].isreg)
12382 {
12383 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12384 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12385 }
12386 else
12387 {
12388 unsigned Rm;
12389
12390 Rm = inst.operands[2].reg;
12391 reject_bad_reg (Rm);
12392
12393 constraint (inst.operands[2].shifted
12394 && inst.operands[2].immisreg,
12395 _("shift must be constant"));
12396 encode_thumb32_shifted_operand (2);
12397 }
12398 }
12399
12400 static void
12401 do_t_pkhbt (void)
12402 {
12403 unsigned Rd, Rn, Rm;
12404
12405 Rd = inst.operands[0].reg;
12406 Rn = inst.operands[1].reg;
12407 Rm = inst.operands[2].reg;
12408
12409 reject_bad_reg (Rd);
12410 reject_bad_reg (Rn);
12411 reject_bad_reg (Rm);
12412
12413 inst.instruction |= Rd << 8;
12414 inst.instruction |= Rn << 16;
12415 inst.instruction |= Rm;
12416 if (inst.operands[3].present)
12417 {
12418 unsigned int val = inst.reloc.exp.X_add_number;
12419 constraint (inst.reloc.exp.X_op != O_constant,
12420 _("expression too complex"));
12421 inst.instruction |= (val & 0x1c) << 10;
12422 inst.instruction |= (val & 0x03) << 6;
12423 }
12424 }
12425
12426 static void
12427 do_t_pkhtb (void)
12428 {
12429 if (!inst.operands[3].present)
12430 {
12431 unsigned Rtmp;
12432
12433 inst.instruction &= ~0x00000020;
12434
12435 /* PR 10168. Swap the Rm and Rn registers. */
12436 Rtmp = inst.operands[1].reg;
12437 inst.operands[1].reg = inst.operands[2].reg;
12438 inst.operands[2].reg = Rtmp;
12439 }
12440 do_t_pkhbt ();
12441 }
12442
12443 static void
12444 do_t_pld (void)
12445 {
12446 if (inst.operands[0].immisreg)
12447 reject_bad_reg (inst.operands[0].imm);
12448
12449 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12450 }
12451
12452 static void
12453 do_t_push_pop (void)
12454 {
12455 unsigned mask;
12456
12457 constraint (inst.operands[0].writeback,
12458 _("push/pop do not support {reglist}^"));
12459 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12460 _("expression too complex"));
12461
12462 mask = inst.operands[0].imm;
12463 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12464 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12465 else if (inst.size_req != 4
12466 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12467 ? REG_LR : REG_PC)))
12468 {
12469 inst.instruction = THUMB_OP16 (inst.instruction);
12470 inst.instruction |= THUMB_PP_PC_LR;
12471 inst.instruction |= mask & 0xff;
12472 }
12473 else if (unified_syntax)
12474 {
12475 inst.instruction = THUMB_OP32 (inst.instruction);
12476 encode_thumb2_ldmstm (13, mask, TRUE);
12477 }
12478 else
12479 {
12480 inst.error = _("invalid register list to push/pop instruction");
12481 return;
12482 }
12483 }
12484
12485 static void
12486 do_t_rbit (void)
12487 {
12488 unsigned Rd, Rm;
12489
12490 Rd = inst.operands[0].reg;
12491 Rm = inst.operands[1].reg;
12492
12493 reject_bad_reg (Rd);
12494 reject_bad_reg (Rm);
12495
12496 inst.instruction |= Rd << 8;
12497 inst.instruction |= Rm << 16;
12498 inst.instruction |= Rm;
12499 }
12500
12501 static void
12502 do_t_rev (void)
12503 {
12504 unsigned Rd, Rm;
12505
12506 Rd = inst.operands[0].reg;
12507 Rm = inst.operands[1].reg;
12508
12509 reject_bad_reg (Rd);
12510 reject_bad_reg (Rm);
12511
12512 if (Rd <= 7 && Rm <= 7
12513 && inst.size_req != 4)
12514 {
12515 inst.instruction = THUMB_OP16 (inst.instruction);
12516 inst.instruction |= Rd;
12517 inst.instruction |= Rm << 3;
12518 }
12519 else if (unified_syntax)
12520 {
12521 inst.instruction = THUMB_OP32 (inst.instruction);
12522 inst.instruction |= Rd << 8;
12523 inst.instruction |= Rm << 16;
12524 inst.instruction |= Rm;
12525 }
12526 else
12527 inst.error = BAD_HIREG;
12528 }
12529
12530 static void
12531 do_t_rrx (void)
12532 {
12533 unsigned Rd, Rm;
12534
12535 Rd = inst.operands[0].reg;
12536 Rm = inst.operands[1].reg;
12537
12538 reject_bad_reg (Rd);
12539 reject_bad_reg (Rm);
12540
12541 inst.instruction |= Rd << 8;
12542 inst.instruction |= Rm;
12543 }
12544
12545 static void
12546 do_t_rsb (void)
12547 {
12548 unsigned Rd, Rs;
12549
12550 Rd = inst.operands[0].reg;
12551 Rs = (inst.operands[1].present
12552 ? inst.operands[1].reg /* Rd, Rs, foo */
12553 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12554
12555 reject_bad_reg (Rd);
12556 reject_bad_reg (Rs);
12557 if (inst.operands[2].isreg)
12558 reject_bad_reg (inst.operands[2].reg);
12559
12560 inst.instruction |= Rd << 8;
12561 inst.instruction |= Rs << 16;
12562 if (!inst.operands[2].isreg)
12563 {
12564 bfd_boolean narrow;
12565
12566 if ((inst.instruction & 0x00100000) != 0)
12567 narrow = !in_it_block ();
12568 else
12569 narrow = in_it_block ();
12570
12571 if (Rd > 7 || Rs > 7)
12572 narrow = FALSE;
12573
12574 if (inst.size_req == 4 || !unified_syntax)
12575 narrow = FALSE;
12576
12577 if (inst.reloc.exp.X_op != O_constant
12578 || inst.reloc.exp.X_add_number != 0)
12579 narrow = FALSE;
12580
12581 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12582 relaxation, but it doesn't seem worth the hassle. */
12583 if (narrow)
12584 {
12585 inst.reloc.type = BFD_RELOC_UNUSED;
12586 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12587 inst.instruction |= Rs << 3;
12588 inst.instruction |= Rd;
12589 }
12590 else
12591 {
12592 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12593 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12594 }
12595 }
12596 else
12597 encode_thumb32_shifted_operand (2);
12598 }
12599
12600 static void
12601 do_t_setend (void)
12602 {
12603 if (warn_on_deprecated
12604 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12605 as_tsktsk (_("setend use is deprecated for ARMv8"));
12606
12607 set_it_insn_type (OUTSIDE_IT_INSN);
12608 if (inst.operands[0].imm)
12609 inst.instruction |= 0x8;
12610 }
12611
12612 static void
12613 do_t_shift (void)
12614 {
12615 if (!inst.operands[1].present)
12616 inst.operands[1].reg = inst.operands[0].reg;
12617
12618 if (unified_syntax)
12619 {
12620 bfd_boolean narrow;
12621 int shift_kind;
12622
12623 switch (inst.instruction)
12624 {
12625 case T_MNEM_asr:
12626 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12627 case T_MNEM_lsl:
12628 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12629 case T_MNEM_lsr:
12630 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12631 case T_MNEM_ror:
12632 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12633 default: abort ();
12634 }
12635
12636 if (THUMB_SETS_FLAGS (inst.instruction))
12637 narrow = !in_it_block ();
12638 else
12639 narrow = in_it_block ();
12640 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12641 narrow = FALSE;
12642 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12643 narrow = FALSE;
12644 if (inst.operands[2].isreg
12645 && (inst.operands[1].reg != inst.operands[0].reg
12646 || inst.operands[2].reg > 7))
12647 narrow = FALSE;
12648 if (inst.size_req == 4)
12649 narrow = FALSE;
12650
12651 reject_bad_reg (inst.operands[0].reg);
12652 reject_bad_reg (inst.operands[1].reg);
12653
12654 if (!narrow)
12655 {
12656 if (inst.operands[2].isreg)
12657 {
12658 reject_bad_reg (inst.operands[2].reg);
12659 inst.instruction = THUMB_OP32 (inst.instruction);
12660 inst.instruction |= inst.operands[0].reg << 8;
12661 inst.instruction |= inst.operands[1].reg << 16;
12662 inst.instruction |= inst.operands[2].reg;
12663
12664 /* PR 12854: Error on extraneous shifts. */
12665 constraint (inst.operands[2].shifted,
12666 _("extraneous shift as part of operand to shift insn"));
12667 }
12668 else
12669 {
12670 inst.operands[1].shifted = 1;
12671 inst.operands[1].shift_kind = shift_kind;
12672 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12673 ? T_MNEM_movs : T_MNEM_mov);
12674 inst.instruction |= inst.operands[0].reg << 8;
12675 encode_thumb32_shifted_operand (1);
12676 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12677 inst.reloc.type = BFD_RELOC_UNUSED;
12678 }
12679 }
12680 else
12681 {
12682 if (inst.operands[2].isreg)
12683 {
12684 switch (shift_kind)
12685 {
12686 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12687 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12688 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12689 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12690 default: abort ();
12691 }
12692
12693 inst.instruction |= inst.operands[0].reg;
12694 inst.instruction |= inst.operands[2].reg << 3;
12695
12696 /* PR 12854: Error on extraneous shifts. */
12697 constraint (inst.operands[2].shifted,
12698 _("extraneous shift as part of operand to shift insn"));
12699 }
12700 else
12701 {
12702 switch (shift_kind)
12703 {
12704 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12705 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12706 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12707 default: abort ();
12708 }
12709 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12710 inst.instruction |= inst.operands[0].reg;
12711 inst.instruction |= inst.operands[1].reg << 3;
12712 }
12713 }
12714 }
12715 else
12716 {
12717 constraint (inst.operands[0].reg > 7
12718 || inst.operands[1].reg > 7, BAD_HIREG);
12719 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12720
12721 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12722 {
12723 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12724 constraint (inst.operands[0].reg != inst.operands[1].reg,
12725 _("source1 and dest must be same register"));
12726
12727 switch (inst.instruction)
12728 {
12729 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12730 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12731 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12732 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12733 default: abort ();
12734 }
12735
12736 inst.instruction |= inst.operands[0].reg;
12737 inst.instruction |= inst.operands[2].reg << 3;
12738
12739 /* PR 12854: Error on extraneous shifts. */
12740 constraint (inst.operands[2].shifted,
12741 _("extraneous shift as part of operand to shift insn"));
12742 }
12743 else
12744 {
12745 switch (inst.instruction)
12746 {
12747 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12748 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12749 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12750 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12751 default: abort ();
12752 }
12753 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12754 inst.instruction |= inst.operands[0].reg;
12755 inst.instruction |= inst.operands[1].reg << 3;
12756 }
12757 }
12758 }
12759
12760 static void
12761 do_t_simd (void)
12762 {
12763 unsigned Rd, Rn, Rm;
12764
12765 Rd = inst.operands[0].reg;
12766 Rn = inst.operands[1].reg;
12767 Rm = inst.operands[2].reg;
12768
12769 reject_bad_reg (Rd);
12770 reject_bad_reg (Rn);
12771 reject_bad_reg (Rm);
12772
12773 inst.instruction |= Rd << 8;
12774 inst.instruction |= Rn << 16;
12775 inst.instruction |= Rm;
12776 }
12777
12778 static void
12779 do_t_simd2 (void)
12780 {
12781 unsigned Rd, Rn, Rm;
12782
12783 Rd = inst.operands[0].reg;
12784 Rm = inst.operands[1].reg;
12785 Rn = inst.operands[2].reg;
12786
12787 reject_bad_reg (Rd);
12788 reject_bad_reg (Rn);
12789 reject_bad_reg (Rm);
12790
12791 inst.instruction |= Rd << 8;
12792 inst.instruction |= Rn << 16;
12793 inst.instruction |= Rm;
12794 }
12795
12796 static void
12797 do_t_smc (void)
12798 {
12799 unsigned int value = inst.reloc.exp.X_add_number;
12800 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12801 _("SMC is not permitted on this architecture"));
12802 constraint (inst.reloc.exp.X_op != O_constant,
12803 _("expression too complex"));
12804 inst.reloc.type = BFD_RELOC_UNUSED;
12805 inst.instruction |= (value & 0xf000) >> 12;
12806 inst.instruction |= (value & 0x0ff0);
12807 inst.instruction |= (value & 0x000f) << 16;
12808 /* PR gas/15623: SMC instructions must be last in an IT block. */
12809 set_it_insn_type_last ();
12810 }
12811
12812 static void
12813 do_t_hvc (void)
12814 {
12815 unsigned int value = inst.reloc.exp.X_add_number;
12816
12817 inst.reloc.type = BFD_RELOC_UNUSED;
12818 inst.instruction |= (value & 0x0fff);
12819 inst.instruction |= (value & 0xf000) << 4;
12820 }
12821
12822 static void
12823 do_t_ssat_usat (int bias)
12824 {
12825 unsigned Rd, Rn;
12826
12827 Rd = inst.operands[0].reg;
12828 Rn = inst.operands[2].reg;
12829
12830 reject_bad_reg (Rd);
12831 reject_bad_reg (Rn);
12832
12833 inst.instruction |= Rd << 8;
12834 inst.instruction |= inst.operands[1].imm - bias;
12835 inst.instruction |= Rn << 16;
12836
12837 if (inst.operands[3].present)
12838 {
12839 offsetT shift_amount = inst.reloc.exp.X_add_number;
12840
12841 inst.reloc.type = BFD_RELOC_UNUSED;
12842
12843 constraint (inst.reloc.exp.X_op != O_constant,
12844 _("expression too complex"));
12845
12846 if (shift_amount != 0)
12847 {
12848 constraint (shift_amount > 31,
12849 _("shift expression is too large"));
12850
12851 if (inst.operands[3].shift_kind == SHIFT_ASR)
12852 inst.instruction |= 0x00200000; /* sh bit. */
12853
12854 inst.instruction |= (shift_amount & 0x1c) << 10;
12855 inst.instruction |= (shift_amount & 0x03) << 6;
12856 }
12857 }
12858 }
12859
12860 static void
12861 do_t_ssat (void)
12862 {
12863 do_t_ssat_usat (1);
12864 }
12865
12866 static void
12867 do_t_ssat16 (void)
12868 {
12869 unsigned Rd, Rn;
12870
12871 Rd = inst.operands[0].reg;
12872 Rn = inst.operands[2].reg;
12873
12874 reject_bad_reg (Rd);
12875 reject_bad_reg (Rn);
12876
12877 inst.instruction |= Rd << 8;
12878 inst.instruction |= inst.operands[1].imm - 1;
12879 inst.instruction |= Rn << 16;
12880 }
12881
12882 static void
12883 do_t_strex (void)
12884 {
12885 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12886 || inst.operands[2].postind || inst.operands[2].writeback
12887 || inst.operands[2].immisreg || inst.operands[2].shifted
12888 || inst.operands[2].negative,
12889 BAD_ADDR_MODE);
12890
12891 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12892
12893 inst.instruction |= inst.operands[0].reg << 8;
12894 inst.instruction |= inst.operands[1].reg << 12;
12895 inst.instruction |= inst.operands[2].reg << 16;
12896 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12897 }
12898
12899 static void
12900 do_t_strexd (void)
12901 {
12902 if (!inst.operands[2].present)
12903 inst.operands[2].reg = inst.operands[1].reg + 1;
12904
12905 constraint (inst.operands[0].reg == inst.operands[1].reg
12906 || inst.operands[0].reg == inst.operands[2].reg
12907 || inst.operands[0].reg == inst.operands[3].reg,
12908 BAD_OVERLAP);
12909
12910 inst.instruction |= inst.operands[0].reg;
12911 inst.instruction |= inst.operands[1].reg << 12;
12912 inst.instruction |= inst.operands[2].reg << 8;
12913 inst.instruction |= inst.operands[3].reg << 16;
12914 }
12915
12916 static void
12917 do_t_sxtah (void)
12918 {
12919 unsigned Rd, Rn, Rm;
12920
12921 Rd = inst.operands[0].reg;
12922 Rn = inst.operands[1].reg;
12923 Rm = inst.operands[2].reg;
12924
12925 reject_bad_reg (Rd);
12926 reject_bad_reg (Rn);
12927 reject_bad_reg (Rm);
12928
12929 inst.instruction |= Rd << 8;
12930 inst.instruction |= Rn << 16;
12931 inst.instruction |= Rm;
12932 inst.instruction |= inst.operands[3].imm << 4;
12933 }
12934
12935 static void
12936 do_t_sxth (void)
12937 {
12938 unsigned Rd, Rm;
12939
12940 Rd = inst.operands[0].reg;
12941 Rm = inst.operands[1].reg;
12942
12943 reject_bad_reg (Rd);
12944 reject_bad_reg (Rm);
12945
12946 if (inst.instruction <= 0xffff
12947 && inst.size_req != 4
12948 && Rd <= 7 && Rm <= 7
12949 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12950 {
12951 inst.instruction = THUMB_OP16 (inst.instruction);
12952 inst.instruction |= Rd;
12953 inst.instruction |= Rm << 3;
12954 }
12955 else if (unified_syntax)
12956 {
12957 if (inst.instruction <= 0xffff)
12958 inst.instruction = THUMB_OP32 (inst.instruction);
12959 inst.instruction |= Rd << 8;
12960 inst.instruction |= Rm;
12961 inst.instruction |= inst.operands[2].imm << 4;
12962 }
12963 else
12964 {
12965 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12966 _("Thumb encoding does not support rotation"));
12967 constraint (1, BAD_HIREG);
12968 }
12969 }
12970
12971 static void
12972 do_t_swi (void)
12973 {
12974 /* We have to do the following check manually as ARM_EXT_OS only applies
12975 to ARM_EXT_V6M. */
12976 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12977 {
12978 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12979 /* This only applies to the v6m howver, not later architectures. */
12980 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12981 as_bad (_("SVC is not permitted on this architecture"));
12982 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12983 }
12984
12985 inst.reloc.type = BFD_RELOC_ARM_SWI;
12986 }
12987
12988 static void
12989 do_t_tb (void)
12990 {
12991 unsigned Rn, Rm;
12992 int half;
12993
12994 half = (inst.instruction & 0x10) != 0;
12995 set_it_insn_type_last ();
12996 constraint (inst.operands[0].immisreg,
12997 _("instruction requires register index"));
12998
12999 Rn = inst.operands[0].reg;
13000 Rm = inst.operands[0].imm;
13001
13002 constraint (Rn == REG_SP, BAD_SP);
13003 reject_bad_reg (Rm);
13004
13005 constraint (!half && inst.operands[0].shifted,
13006 _("instruction does not allow shifted index"));
13007 inst.instruction |= (Rn << 16) | Rm;
13008 }
13009
13010 static void
13011 do_t_udf (void)
13012 {
13013 if (!inst.operands[0].present)
13014 inst.operands[0].imm = 0;
13015
13016 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13017 {
13018 constraint (inst.size_req == 2,
13019 _("immediate value out of range"));
13020 inst.instruction = THUMB_OP32 (inst.instruction);
13021 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13022 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13023 }
13024 else
13025 {
13026 inst.instruction = THUMB_OP16 (inst.instruction);
13027 inst.instruction |= inst.operands[0].imm;
13028 }
13029
13030 set_it_insn_type (NEUTRAL_IT_INSN);
13031 }
13032
13033
13034 static void
13035 do_t_usat (void)
13036 {
13037 do_t_ssat_usat (0);
13038 }
13039
13040 static void
13041 do_t_usat16 (void)
13042 {
13043 unsigned Rd, Rn;
13044
13045 Rd = inst.operands[0].reg;
13046 Rn = inst.operands[2].reg;
13047
13048 reject_bad_reg (Rd);
13049 reject_bad_reg (Rn);
13050
13051 inst.instruction |= Rd << 8;
13052 inst.instruction |= inst.operands[1].imm;
13053 inst.instruction |= Rn << 16;
13054 }
13055
13056 /* Neon instruction encoder helpers. */
13057
13058 /* Encodings for the different types for various Neon opcodes. */
13059
13060 /* An "invalid" code for the following tables. */
13061 #define N_INV -1u
13062
13063 struct neon_tab_entry
13064 {
13065 unsigned integer;
13066 unsigned float_or_poly;
13067 unsigned scalar_or_imm;
13068 };
13069
13070 /* Map overloaded Neon opcodes to their respective encodings. */
13071 #define NEON_ENC_TAB \
13072 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13073 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13074 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13075 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13076 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13077 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13078 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13079 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13080 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13081 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13082 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13083 /* Register variants of the following two instructions are encoded as
13084 vcge / vcgt with the operands reversed. */ \
13085 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13086 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13087 X(vfma, N_INV, 0x0000c10, N_INV), \
13088 X(vfms, N_INV, 0x0200c10, N_INV), \
13089 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13090 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13091 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13092 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13093 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13094 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13095 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13096 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13097 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13098 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13099 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13100 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13101 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13102 X(vshl, 0x0000400, N_INV, 0x0800510), \
13103 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13104 X(vand, 0x0000110, N_INV, 0x0800030), \
13105 X(vbic, 0x0100110, N_INV, 0x0800030), \
13106 X(veor, 0x1000110, N_INV, N_INV), \
13107 X(vorn, 0x0300110, N_INV, 0x0800010), \
13108 X(vorr, 0x0200110, N_INV, 0x0800010), \
13109 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13110 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13111 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13112 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13113 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13114 X(vst1, 0x0000000, 0x0800000, N_INV), \
13115 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13116 X(vst2, 0x0000100, 0x0800100, N_INV), \
13117 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13118 X(vst3, 0x0000200, 0x0800200, N_INV), \
13119 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13120 X(vst4, 0x0000300, 0x0800300, N_INV), \
13121 X(vmovn, 0x1b20200, N_INV, N_INV), \
13122 X(vtrn, 0x1b20080, N_INV, N_INV), \
13123 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13124 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13125 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13126 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13127 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13128 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13129 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13130 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13131 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13132 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13133 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13134 X(vseleq, 0xe000a00, N_INV, N_INV), \
13135 X(vselvs, 0xe100a00, N_INV, N_INV), \
13136 X(vselge, 0xe200a00, N_INV, N_INV), \
13137 X(vselgt, 0xe300a00, N_INV, N_INV), \
13138 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13139 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13140 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13141 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13142 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13143 X(aes, 0x3b00300, N_INV, N_INV), \
13144 X(sha3op, 0x2000c00, N_INV, N_INV), \
13145 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13146 X(sha2op, 0x3ba0380, N_INV, N_INV)
13147
13148 enum neon_opc
13149 {
13150 #define X(OPC,I,F,S) N_MNEM_##OPC
13151 NEON_ENC_TAB
13152 #undef X
13153 };
13154
13155 static const struct neon_tab_entry neon_enc_tab[] =
13156 {
13157 #define X(OPC,I,F,S) { (I), (F), (S) }
13158 NEON_ENC_TAB
13159 #undef X
13160 };
13161
13162 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13163 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13164 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13165 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13166 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13167 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13168 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13169 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13170 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13171 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13172 #define NEON_ENC_SINGLE_(X) \
13173 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13174 #define NEON_ENC_DOUBLE_(X) \
13175 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13176 #define NEON_ENC_FPV8_(X) \
13177 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13178
13179 #define NEON_ENCODE(type, inst) \
13180 do \
13181 { \
13182 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13183 inst.is_neon = 1; \
13184 } \
13185 while (0)
13186
13187 #define check_neon_suffixes \
13188 do \
13189 { \
13190 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13191 { \
13192 as_bad (_("invalid neon suffix for non neon instruction")); \
13193 return; \
13194 } \
13195 } \
13196 while (0)
13197
13198 /* Define shapes for instruction operands. The following mnemonic characters
13199 are used in this table:
13200
13201 F - VFP S<n> register
13202 D - Neon D<n> register
13203 Q - Neon Q<n> register
13204 I - Immediate
13205 S - Scalar
13206 R - ARM register
13207 L - D<n> register list
13208
13209 This table is used to generate various data:
13210 - enumerations of the form NS_DDR to be used as arguments to
13211 neon_select_shape.
13212 - a table classifying shapes into single, double, quad, mixed.
13213 - a table used to drive neon_select_shape. */
13214
13215 #define NEON_SHAPE_DEF \
13216 X(3, (D, D, D), DOUBLE), \
13217 X(3, (Q, Q, Q), QUAD), \
13218 X(3, (D, D, I), DOUBLE), \
13219 X(3, (Q, Q, I), QUAD), \
13220 X(3, (D, D, S), DOUBLE), \
13221 X(3, (Q, Q, S), QUAD), \
13222 X(2, (D, D), DOUBLE), \
13223 X(2, (Q, Q), QUAD), \
13224 X(2, (D, S), DOUBLE), \
13225 X(2, (Q, S), QUAD), \
13226 X(2, (D, R), DOUBLE), \
13227 X(2, (Q, R), QUAD), \
13228 X(2, (D, I), DOUBLE), \
13229 X(2, (Q, I), QUAD), \
13230 X(3, (D, L, D), DOUBLE), \
13231 X(2, (D, Q), MIXED), \
13232 X(2, (Q, D), MIXED), \
13233 X(3, (D, Q, I), MIXED), \
13234 X(3, (Q, D, I), MIXED), \
13235 X(3, (Q, D, D), MIXED), \
13236 X(3, (D, Q, Q), MIXED), \
13237 X(3, (Q, Q, D), MIXED), \
13238 X(3, (Q, D, S), MIXED), \
13239 X(3, (D, Q, S), MIXED), \
13240 X(4, (D, D, D, I), DOUBLE), \
13241 X(4, (Q, Q, Q, I), QUAD), \
13242 X(2, (F, F), SINGLE), \
13243 X(3, (F, F, F), SINGLE), \
13244 X(2, (F, I), SINGLE), \
13245 X(2, (F, D), MIXED), \
13246 X(2, (D, F), MIXED), \
13247 X(3, (F, F, I), MIXED), \
13248 X(4, (R, R, F, F), SINGLE), \
13249 X(4, (F, F, R, R), SINGLE), \
13250 X(3, (D, R, R), DOUBLE), \
13251 X(3, (R, R, D), DOUBLE), \
13252 X(2, (S, R), SINGLE), \
13253 X(2, (R, S), SINGLE), \
13254 X(2, (F, R), SINGLE), \
13255 X(2, (R, F), SINGLE), \
13256 /* Half float shape supported so far. */\
13257 X (2, (H, D), MIXED), \
13258 X (2, (D, H), MIXED), \
13259 X (2, (H, F), MIXED), \
13260 X (2, (F, H), MIXED), \
13261 X (2, (H, H), HALF), \
13262 X (2, (H, R), HALF), \
13263 X (2, (R, H), HALF), \
13264 X (2, (H, I), HALF), \
13265 X (3, (H, H, H), HALF), \
13266 X (3, (H, F, I), MIXED), \
13267 X (3, (F, H, I), MIXED)
13268
13269 #define S2(A,B) NS_##A##B
13270 #define S3(A,B,C) NS_##A##B##C
13271 #define S4(A,B,C,D) NS_##A##B##C##D
13272
13273 #define X(N, L, C) S##N L
13274
13275 enum neon_shape
13276 {
13277 NEON_SHAPE_DEF,
13278 NS_NULL
13279 };
13280
13281 #undef X
13282 #undef S2
13283 #undef S3
13284 #undef S4
13285
13286 enum neon_shape_class
13287 {
13288 SC_HALF,
13289 SC_SINGLE,
13290 SC_DOUBLE,
13291 SC_QUAD,
13292 SC_MIXED
13293 };
13294
13295 #define X(N, L, C) SC_##C
13296
13297 static enum neon_shape_class neon_shape_class[] =
13298 {
13299 NEON_SHAPE_DEF
13300 };
13301
13302 #undef X
13303
13304 enum neon_shape_el
13305 {
13306 SE_H,
13307 SE_F,
13308 SE_D,
13309 SE_Q,
13310 SE_I,
13311 SE_S,
13312 SE_R,
13313 SE_L
13314 };
13315
13316 /* Register widths of above. */
13317 static unsigned neon_shape_el_size[] =
13318 {
13319 16,
13320 32,
13321 64,
13322 128,
13323 0,
13324 32,
13325 32,
13326 0
13327 };
13328
13329 struct neon_shape_info
13330 {
13331 unsigned els;
13332 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13333 };
13334
13335 #define S2(A,B) { SE_##A, SE_##B }
13336 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13337 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13338
13339 #define X(N, L, C) { N, S##N L }
13340
13341 static struct neon_shape_info neon_shape_tab[] =
13342 {
13343 NEON_SHAPE_DEF
13344 };
13345
13346 #undef X
13347 #undef S2
13348 #undef S3
13349 #undef S4
13350
13351 /* Bit masks used in type checking given instructions.
13352 'N_EQK' means the type must be the same as (or based on in some way) the key
13353 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13354 set, various other bits can be set as well in order to modify the meaning of
13355 the type constraint. */
13356
13357 enum neon_type_mask
13358 {
13359 N_S8 = 0x0000001,
13360 N_S16 = 0x0000002,
13361 N_S32 = 0x0000004,
13362 N_S64 = 0x0000008,
13363 N_U8 = 0x0000010,
13364 N_U16 = 0x0000020,
13365 N_U32 = 0x0000040,
13366 N_U64 = 0x0000080,
13367 N_I8 = 0x0000100,
13368 N_I16 = 0x0000200,
13369 N_I32 = 0x0000400,
13370 N_I64 = 0x0000800,
13371 N_8 = 0x0001000,
13372 N_16 = 0x0002000,
13373 N_32 = 0x0004000,
13374 N_64 = 0x0008000,
13375 N_P8 = 0x0010000,
13376 N_P16 = 0x0020000,
13377 N_F16 = 0x0040000,
13378 N_F32 = 0x0080000,
13379 N_F64 = 0x0100000,
13380 N_P64 = 0x0200000,
13381 N_KEY = 0x1000000, /* Key element (main type specifier). */
13382 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13383 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13384 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13385 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13386 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13387 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13388 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13389 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13390 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13391 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13392 N_UTYP = 0,
13393 N_MAX_NONSPECIAL = N_P64
13394 };
13395
13396 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13397
13398 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13399 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13400 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13401 #define N_SUF_32 (N_SU_32 | N_F32)
13402 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13403 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13404 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13405
13406 /* Pass this as the first type argument to neon_check_type to ignore types
13407 altogether. */
13408 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13409
13410 /* Select a "shape" for the current instruction (describing register types or
13411 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13412 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13413 function of operand parsing, so this function doesn't need to be called.
13414 Shapes should be listed in order of decreasing length. */
13415
13416 static enum neon_shape
13417 neon_select_shape (enum neon_shape shape, ...)
13418 {
13419 va_list ap;
13420 enum neon_shape first_shape = shape;
13421
13422 /* Fix missing optional operands. FIXME: we don't know at this point how
13423 many arguments we should have, so this makes the assumption that we have
13424 > 1. This is true of all current Neon opcodes, I think, but may not be
13425 true in the future. */
13426 if (!inst.operands[1].present)
13427 inst.operands[1] = inst.operands[0];
13428
13429 va_start (ap, shape);
13430
13431 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13432 {
13433 unsigned j;
13434 int matches = 1;
13435
13436 for (j = 0; j < neon_shape_tab[shape].els; j++)
13437 {
13438 if (!inst.operands[j].present)
13439 {
13440 matches = 0;
13441 break;
13442 }
13443
13444 switch (neon_shape_tab[shape].el[j])
13445 {
13446 /* If a .f16, .16, .u16, .s16 type specifier is given over
13447 a VFP single precision register operand, it's essentially
13448 means only half of the register is used.
13449
13450 If the type specifier is given after the mnemonics, the
13451 information is stored in inst.vectype. If the type specifier
13452 is given after register operand, the information is stored
13453 in inst.operands[].vectype.
13454
13455 When there is only one type specifier, and all the register
13456 operands are the same type of hardware register, the type
13457 specifier applies to all register operands.
13458
13459 If no type specifier is given, the shape is inferred from
13460 operand information.
13461
13462 for example:
13463 vadd.f16 s0, s1, s2: NS_HHH
13464 vabs.f16 s0, s1: NS_HH
13465 vmov.f16 s0, r1: NS_HR
13466 vmov.f16 r0, s1: NS_RH
13467 vcvt.f16 r0, s1: NS_RH
13468 vcvt.f16.s32 s2, s2, #29: NS_HFI
13469 vcvt.f16.s32 s2, s2: NS_HF
13470 */
13471 case SE_H:
13472 if (!(inst.operands[j].isreg
13473 && inst.operands[j].isvec
13474 && inst.operands[j].issingle
13475 && !inst.operands[j].isquad
13476 && ((inst.vectype.elems == 1
13477 && inst.vectype.el[0].size == 16)
13478 || (inst.vectype.elems > 1
13479 && inst.vectype.el[j].size == 16)
13480 || (inst.vectype.elems == 0
13481 && inst.operands[j].vectype.type != NT_invtype
13482 && inst.operands[j].vectype.size == 16))))
13483 matches = 0;
13484 break;
13485
13486 case SE_F:
13487 if (!(inst.operands[j].isreg
13488 && inst.operands[j].isvec
13489 && inst.operands[j].issingle
13490 && !inst.operands[j].isquad
13491 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13492 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13493 || (inst.vectype.elems == 0
13494 && (inst.operands[j].vectype.size == 32
13495 || inst.operands[j].vectype.type == NT_invtype)))))
13496 matches = 0;
13497 break;
13498
13499 case SE_D:
13500 if (!(inst.operands[j].isreg
13501 && inst.operands[j].isvec
13502 && !inst.operands[j].isquad
13503 && !inst.operands[j].issingle))
13504 matches = 0;
13505 break;
13506
13507 case SE_R:
13508 if (!(inst.operands[j].isreg
13509 && !inst.operands[j].isvec))
13510 matches = 0;
13511 break;
13512
13513 case SE_Q:
13514 if (!(inst.operands[j].isreg
13515 && inst.operands[j].isvec
13516 && inst.operands[j].isquad
13517 && !inst.operands[j].issingle))
13518 matches = 0;
13519 break;
13520
13521 case SE_I:
13522 if (!(!inst.operands[j].isreg
13523 && !inst.operands[j].isscalar))
13524 matches = 0;
13525 break;
13526
13527 case SE_S:
13528 if (!(!inst.operands[j].isreg
13529 && inst.operands[j].isscalar))
13530 matches = 0;
13531 break;
13532
13533 case SE_L:
13534 break;
13535 }
13536 if (!matches)
13537 break;
13538 }
13539 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13540 /* We've matched all the entries in the shape table, and we don't
13541 have any left over operands which have not been matched. */
13542 break;
13543 }
13544
13545 va_end (ap);
13546
13547 if (shape == NS_NULL && first_shape != NS_NULL)
13548 first_error (_("invalid instruction shape"));
13549
13550 return shape;
13551 }
13552
13553 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13554 means the Q bit should be set). */
13555
13556 static int
13557 neon_quad (enum neon_shape shape)
13558 {
13559 return neon_shape_class[shape] == SC_QUAD;
13560 }
13561
13562 static void
13563 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13564 unsigned *g_size)
13565 {
13566 /* Allow modification to be made to types which are constrained to be
13567 based on the key element, based on bits set alongside N_EQK. */
13568 if ((typebits & N_EQK) != 0)
13569 {
13570 if ((typebits & N_HLF) != 0)
13571 *g_size /= 2;
13572 else if ((typebits & N_DBL) != 0)
13573 *g_size *= 2;
13574 if ((typebits & N_SGN) != 0)
13575 *g_type = NT_signed;
13576 else if ((typebits & N_UNS) != 0)
13577 *g_type = NT_unsigned;
13578 else if ((typebits & N_INT) != 0)
13579 *g_type = NT_integer;
13580 else if ((typebits & N_FLT) != 0)
13581 *g_type = NT_float;
13582 else if ((typebits & N_SIZ) != 0)
13583 *g_type = NT_untyped;
13584 }
13585 }
13586
13587 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13588 operand type, i.e. the single type specified in a Neon instruction when it
13589 is the only one given. */
13590
13591 static struct neon_type_el
13592 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13593 {
13594 struct neon_type_el dest = *key;
13595
13596 gas_assert ((thisarg & N_EQK) != 0);
13597
13598 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13599
13600 return dest;
13601 }
13602
13603 /* Convert Neon type and size into compact bitmask representation. */
13604
13605 static enum neon_type_mask
13606 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13607 {
13608 switch (type)
13609 {
13610 case NT_untyped:
13611 switch (size)
13612 {
13613 case 8: return N_8;
13614 case 16: return N_16;
13615 case 32: return N_32;
13616 case 64: return N_64;
13617 default: ;
13618 }
13619 break;
13620
13621 case NT_integer:
13622 switch (size)
13623 {
13624 case 8: return N_I8;
13625 case 16: return N_I16;
13626 case 32: return N_I32;
13627 case 64: return N_I64;
13628 default: ;
13629 }
13630 break;
13631
13632 case NT_float:
13633 switch (size)
13634 {
13635 case 16: return N_F16;
13636 case 32: return N_F32;
13637 case 64: return N_F64;
13638 default: ;
13639 }
13640 break;
13641
13642 case NT_poly:
13643 switch (size)
13644 {
13645 case 8: return N_P8;
13646 case 16: return N_P16;
13647 case 64: return N_P64;
13648 default: ;
13649 }
13650 break;
13651
13652 case NT_signed:
13653 switch (size)
13654 {
13655 case 8: return N_S8;
13656 case 16: return N_S16;
13657 case 32: return N_S32;
13658 case 64: return N_S64;
13659 default: ;
13660 }
13661 break;
13662
13663 case NT_unsigned:
13664 switch (size)
13665 {
13666 case 8: return N_U8;
13667 case 16: return N_U16;
13668 case 32: return N_U32;
13669 case 64: return N_U64;
13670 default: ;
13671 }
13672 break;
13673
13674 default: ;
13675 }
13676
13677 return N_UTYP;
13678 }
13679
13680 /* Convert compact Neon bitmask type representation to a type and size. Only
13681 handles the case where a single bit is set in the mask. */
13682
13683 static int
13684 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13685 enum neon_type_mask mask)
13686 {
13687 if ((mask & N_EQK) != 0)
13688 return FAIL;
13689
13690 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13691 *size = 8;
13692 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13693 *size = 16;
13694 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13695 *size = 32;
13696 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13697 *size = 64;
13698 else
13699 return FAIL;
13700
13701 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13702 *type = NT_signed;
13703 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13704 *type = NT_unsigned;
13705 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13706 *type = NT_integer;
13707 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13708 *type = NT_untyped;
13709 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13710 *type = NT_poly;
13711 else if ((mask & (N_F_ALL)) != 0)
13712 *type = NT_float;
13713 else
13714 return FAIL;
13715
13716 return SUCCESS;
13717 }
13718
13719 /* Modify a bitmask of allowed types. This is only needed for type
13720 relaxation. */
13721
13722 static unsigned
13723 modify_types_allowed (unsigned allowed, unsigned mods)
13724 {
13725 unsigned size;
13726 enum neon_el_type type;
13727 unsigned destmask;
13728 int i;
13729
13730 destmask = 0;
13731
13732 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13733 {
13734 if (el_type_of_type_chk (&type, &size,
13735 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13736 {
13737 neon_modify_type_size (mods, &type, &size);
13738 destmask |= type_chk_of_el_type (type, size);
13739 }
13740 }
13741
13742 return destmask;
13743 }
13744
13745 /* Check type and return type classification.
13746 The manual states (paraphrase): If one datatype is given, it indicates the
13747 type given in:
13748 - the second operand, if there is one
13749 - the operand, if there is no second operand
13750 - the result, if there are no operands.
13751 This isn't quite good enough though, so we use a concept of a "key" datatype
13752 which is set on a per-instruction basis, which is the one which matters when
13753 only one data type is written.
13754 Note: this function has side-effects (e.g. filling in missing operands). All
13755 Neon instructions should call it before performing bit encoding. */
13756
13757 static struct neon_type_el
13758 neon_check_type (unsigned els, enum neon_shape ns, ...)
13759 {
13760 va_list ap;
13761 unsigned i, pass, key_el = 0;
13762 unsigned types[NEON_MAX_TYPE_ELS];
13763 enum neon_el_type k_type = NT_invtype;
13764 unsigned k_size = -1u;
13765 struct neon_type_el badtype = {NT_invtype, -1};
13766 unsigned key_allowed = 0;
13767
13768 /* Optional registers in Neon instructions are always (not) in operand 1.
13769 Fill in the missing operand here, if it was omitted. */
13770 if (els > 1 && !inst.operands[1].present)
13771 inst.operands[1] = inst.operands[0];
13772
13773 /* Suck up all the varargs. */
13774 va_start (ap, ns);
13775 for (i = 0; i < els; i++)
13776 {
13777 unsigned thisarg = va_arg (ap, unsigned);
13778 if (thisarg == N_IGNORE_TYPE)
13779 {
13780 va_end (ap);
13781 return badtype;
13782 }
13783 types[i] = thisarg;
13784 if ((thisarg & N_KEY) != 0)
13785 key_el = i;
13786 }
13787 va_end (ap);
13788
13789 if (inst.vectype.elems > 0)
13790 for (i = 0; i < els; i++)
13791 if (inst.operands[i].vectype.type != NT_invtype)
13792 {
13793 first_error (_("types specified in both the mnemonic and operands"));
13794 return badtype;
13795 }
13796
13797 /* Duplicate inst.vectype elements here as necessary.
13798 FIXME: No idea if this is exactly the same as the ARM assembler,
13799 particularly when an insn takes one register and one non-register
13800 operand. */
13801 if (inst.vectype.elems == 1 && els > 1)
13802 {
13803 unsigned j;
13804 inst.vectype.elems = els;
13805 inst.vectype.el[key_el] = inst.vectype.el[0];
13806 for (j = 0; j < els; j++)
13807 if (j != key_el)
13808 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13809 types[j]);
13810 }
13811 else if (inst.vectype.elems == 0 && els > 0)
13812 {
13813 unsigned j;
13814 /* No types were given after the mnemonic, so look for types specified
13815 after each operand. We allow some flexibility here; as long as the
13816 "key" operand has a type, we can infer the others. */
13817 for (j = 0; j < els; j++)
13818 if (inst.operands[j].vectype.type != NT_invtype)
13819 inst.vectype.el[j] = inst.operands[j].vectype;
13820
13821 if (inst.operands[key_el].vectype.type != NT_invtype)
13822 {
13823 for (j = 0; j < els; j++)
13824 if (inst.operands[j].vectype.type == NT_invtype)
13825 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13826 types[j]);
13827 }
13828 else
13829 {
13830 first_error (_("operand types can't be inferred"));
13831 return badtype;
13832 }
13833 }
13834 else if (inst.vectype.elems != els)
13835 {
13836 first_error (_("type specifier has the wrong number of parts"));
13837 return badtype;
13838 }
13839
13840 for (pass = 0; pass < 2; pass++)
13841 {
13842 for (i = 0; i < els; i++)
13843 {
13844 unsigned thisarg = types[i];
13845 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13846 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13847 enum neon_el_type g_type = inst.vectype.el[i].type;
13848 unsigned g_size = inst.vectype.el[i].size;
13849
13850 /* Decay more-specific signed & unsigned types to sign-insensitive
13851 integer types if sign-specific variants are unavailable. */
13852 if ((g_type == NT_signed || g_type == NT_unsigned)
13853 && (types_allowed & N_SU_ALL) == 0)
13854 g_type = NT_integer;
13855
13856 /* If only untyped args are allowed, decay any more specific types to
13857 them. Some instructions only care about signs for some element
13858 sizes, so handle that properly. */
13859 if (((types_allowed & N_UNT) == 0)
13860 && ((g_size == 8 && (types_allowed & N_8) != 0)
13861 || (g_size == 16 && (types_allowed & N_16) != 0)
13862 || (g_size == 32 && (types_allowed & N_32) != 0)
13863 || (g_size == 64 && (types_allowed & N_64) != 0)))
13864 g_type = NT_untyped;
13865
13866 if (pass == 0)
13867 {
13868 if ((thisarg & N_KEY) != 0)
13869 {
13870 k_type = g_type;
13871 k_size = g_size;
13872 key_allowed = thisarg & ~N_KEY;
13873 }
13874 }
13875 else
13876 {
13877 if ((thisarg & N_VFP) != 0)
13878 {
13879 enum neon_shape_el regshape;
13880 unsigned regwidth, match;
13881
13882 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13883 if (ns == NS_NULL)
13884 {
13885 first_error (_("invalid instruction shape"));
13886 return badtype;
13887 }
13888 regshape = neon_shape_tab[ns].el[i];
13889 regwidth = neon_shape_el_size[regshape];
13890
13891 /* In VFP mode, operands must match register widths. If we
13892 have a key operand, use its width, else use the width of
13893 the current operand. */
13894 if (k_size != -1u)
13895 match = k_size;
13896 else
13897 match = g_size;
13898
13899 if (regwidth != match)
13900 {
13901 first_error (_("operand size must match register width"));
13902 return badtype;
13903 }
13904 }
13905
13906 if ((thisarg & N_EQK) == 0)
13907 {
13908 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13909
13910 if ((given_type & types_allowed) == 0)
13911 {
13912 first_error (_("bad type in Neon instruction"));
13913 return badtype;
13914 }
13915 }
13916 else
13917 {
13918 enum neon_el_type mod_k_type = k_type;
13919 unsigned mod_k_size = k_size;
13920 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13921 if (g_type != mod_k_type || g_size != mod_k_size)
13922 {
13923 first_error (_("inconsistent types in Neon instruction"));
13924 return badtype;
13925 }
13926 }
13927 }
13928 }
13929 }
13930
13931 return inst.vectype.el[key_el];
13932 }
13933
13934 /* Neon-style VFP instruction forwarding. */
13935
13936 /* Thumb VFP instructions have 0xE in the condition field. */
13937
13938 static void
13939 do_vfp_cond_or_thumb (void)
13940 {
13941 inst.is_neon = 1;
13942
13943 if (thumb_mode)
13944 inst.instruction |= 0xe0000000;
13945 else
13946 inst.instruction |= inst.cond << 28;
13947 }
13948
13949 /* Look up and encode a simple mnemonic, for use as a helper function for the
13950 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13951 etc. It is assumed that operand parsing has already been done, and that the
13952 operands are in the form expected by the given opcode (this isn't necessarily
13953 the same as the form in which they were parsed, hence some massaging must
13954 take place before this function is called).
13955 Checks current arch version against that in the looked-up opcode. */
13956
13957 static void
13958 do_vfp_nsyn_opcode (const char *opname)
13959 {
13960 const struct asm_opcode *opcode;
13961
13962 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13963
13964 if (!opcode)
13965 abort ();
13966
13967 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13968 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13969 _(BAD_FPU));
13970
13971 inst.is_neon = 1;
13972
13973 if (thumb_mode)
13974 {
13975 inst.instruction = opcode->tvalue;
13976 opcode->tencode ();
13977 }
13978 else
13979 {
13980 inst.instruction = (inst.cond << 28) | opcode->avalue;
13981 opcode->aencode ();
13982 }
13983 }
13984
13985 static void
13986 do_vfp_nsyn_add_sub (enum neon_shape rs)
13987 {
13988 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13989
13990 if (rs == NS_FFF)
13991 {
13992 if (is_add)
13993 do_vfp_nsyn_opcode ("fadds");
13994 else
13995 do_vfp_nsyn_opcode ("fsubs");
13996 }
13997 else
13998 {
13999 if (is_add)
14000 do_vfp_nsyn_opcode ("faddd");
14001 else
14002 do_vfp_nsyn_opcode ("fsubd");
14003 }
14004 }
14005
14006 /* Check operand types to see if this is a VFP instruction, and if so call
14007 PFN (). */
14008
14009 static int
14010 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14011 {
14012 enum neon_shape rs;
14013 struct neon_type_el et;
14014
14015 switch (args)
14016 {
14017 case 2:
14018 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14019 et = neon_check_type (2, rs,
14020 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14021 break;
14022
14023 case 3:
14024 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14025 et = neon_check_type (3, rs,
14026 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14027 break;
14028
14029 default:
14030 abort ();
14031 }
14032
14033 if (et.type != NT_invtype)
14034 {
14035 pfn (rs);
14036 return SUCCESS;
14037 }
14038
14039 inst.error = NULL;
14040 return FAIL;
14041 }
14042
14043 static void
14044 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14045 {
14046 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14047
14048 if (rs == NS_FFF)
14049 {
14050 if (is_mla)
14051 do_vfp_nsyn_opcode ("fmacs");
14052 else
14053 do_vfp_nsyn_opcode ("fnmacs");
14054 }
14055 else
14056 {
14057 if (is_mla)
14058 do_vfp_nsyn_opcode ("fmacd");
14059 else
14060 do_vfp_nsyn_opcode ("fnmacd");
14061 }
14062 }
14063
14064 static void
14065 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14066 {
14067 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14068
14069 if (rs == NS_FFF)
14070 {
14071 if (is_fma)
14072 do_vfp_nsyn_opcode ("ffmas");
14073 else
14074 do_vfp_nsyn_opcode ("ffnmas");
14075 }
14076 else
14077 {
14078 if (is_fma)
14079 do_vfp_nsyn_opcode ("ffmad");
14080 else
14081 do_vfp_nsyn_opcode ("ffnmad");
14082 }
14083 }
14084
14085 static void
14086 do_vfp_nsyn_mul (enum neon_shape rs)
14087 {
14088 if (rs == NS_FFF)
14089 do_vfp_nsyn_opcode ("fmuls");
14090 else
14091 do_vfp_nsyn_opcode ("fmuld");
14092 }
14093
14094 static void
14095 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14096 {
14097 int is_neg = (inst.instruction & 0x80) != 0;
14098 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
14099
14100 if (rs == NS_FF)
14101 {
14102 if (is_neg)
14103 do_vfp_nsyn_opcode ("fnegs");
14104 else
14105 do_vfp_nsyn_opcode ("fabss");
14106 }
14107 else
14108 {
14109 if (is_neg)
14110 do_vfp_nsyn_opcode ("fnegd");
14111 else
14112 do_vfp_nsyn_opcode ("fabsd");
14113 }
14114 }
14115
14116 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14117 insns belong to Neon, and are handled elsewhere. */
14118
14119 static void
14120 do_vfp_nsyn_ldm_stm (int is_dbmode)
14121 {
14122 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14123 if (is_ldm)
14124 {
14125 if (is_dbmode)
14126 do_vfp_nsyn_opcode ("fldmdbs");
14127 else
14128 do_vfp_nsyn_opcode ("fldmias");
14129 }
14130 else
14131 {
14132 if (is_dbmode)
14133 do_vfp_nsyn_opcode ("fstmdbs");
14134 else
14135 do_vfp_nsyn_opcode ("fstmias");
14136 }
14137 }
14138
14139 static void
14140 do_vfp_nsyn_sqrt (void)
14141 {
14142 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14143 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14144
14145 if (rs == NS_FF)
14146 do_vfp_nsyn_opcode ("fsqrts");
14147 else
14148 do_vfp_nsyn_opcode ("fsqrtd");
14149 }
14150
14151 static void
14152 do_vfp_nsyn_div (void)
14153 {
14154 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14155 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14156 N_F32 | N_F64 | N_KEY | N_VFP);
14157
14158 if (rs == NS_FFF)
14159 do_vfp_nsyn_opcode ("fdivs");
14160 else
14161 do_vfp_nsyn_opcode ("fdivd");
14162 }
14163
14164 static void
14165 do_vfp_nsyn_nmul (void)
14166 {
14167 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
14168 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14169 N_F32 | N_F64 | N_KEY | N_VFP);
14170
14171 if (rs == NS_FFF)
14172 {
14173 NEON_ENCODE (SINGLE, inst);
14174 do_vfp_sp_dyadic ();
14175 }
14176 else
14177 {
14178 NEON_ENCODE (DOUBLE, inst);
14179 do_vfp_dp_rd_rn_rm ();
14180 }
14181 do_vfp_cond_or_thumb ();
14182 }
14183
14184 static void
14185 do_vfp_nsyn_cmp (void)
14186 {
14187 if (inst.operands[1].isreg)
14188 {
14189 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
14190 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
14191
14192 if (rs == NS_FF)
14193 {
14194 NEON_ENCODE (SINGLE, inst);
14195 do_vfp_sp_monadic ();
14196 }
14197 else
14198 {
14199 NEON_ENCODE (DOUBLE, inst);
14200 do_vfp_dp_rd_rm ();
14201 }
14202 }
14203 else
14204 {
14205 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
14206 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
14207
14208 switch (inst.instruction & 0x0fffffff)
14209 {
14210 case N_MNEM_vcmp:
14211 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14212 break;
14213 case N_MNEM_vcmpe:
14214 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14215 break;
14216 default:
14217 abort ();
14218 }
14219
14220 if (rs == NS_FI)
14221 {
14222 NEON_ENCODE (SINGLE, inst);
14223 do_vfp_sp_compare_z ();
14224 }
14225 else
14226 {
14227 NEON_ENCODE (DOUBLE, inst);
14228 do_vfp_dp_rd ();
14229 }
14230 }
14231 do_vfp_cond_or_thumb ();
14232 }
14233
14234 static void
14235 nsyn_insert_sp (void)
14236 {
14237 inst.operands[1] = inst.operands[0];
14238 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14239 inst.operands[0].reg = REG_SP;
14240 inst.operands[0].isreg = 1;
14241 inst.operands[0].writeback = 1;
14242 inst.operands[0].present = 1;
14243 }
14244
14245 static void
14246 do_vfp_nsyn_push (void)
14247 {
14248 nsyn_insert_sp ();
14249 if (inst.operands[1].issingle)
14250 do_vfp_nsyn_opcode ("fstmdbs");
14251 else
14252 do_vfp_nsyn_opcode ("fstmdbd");
14253 }
14254
14255 static void
14256 do_vfp_nsyn_pop (void)
14257 {
14258 nsyn_insert_sp ();
14259 if (inst.operands[1].issingle)
14260 do_vfp_nsyn_opcode ("fldmias");
14261 else
14262 do_vfp_nsyn_opcode ("fldmiad");
14263 }
14264
14265 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14266 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14267
14268 static void
14269 neon_dp_fixup (struct arm_it* insn)
14270 {
14271 unsigned int i = insn->instruction;
14272 insn->is_neon = 1;
14273
14274 if (thumb_mode)
14275 {
14276 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14277 if (i & (1 << 24))
14278 i |= 1 << 28;
14279
14280 i &= ~(1 << 24);
14281
14282 i |= 0xef000000;
14283 }
14284 else
14285 i |= 0xf2000000;
14286
14287 insn->instruction = i;
14288 }
14289
14290 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14291 (0, 1, 2, 3). */
14292
14293 static unsigned
14294 neon_logbits (unsigned x)
14295 {
14296 return ffs (x) - 4;
14297 }
14298
14299 #define LOW4(R) ((R) & 0xf)
14300 #define HI1(R) (((R) >> 4) & 1)
14301
14302 /* Encode insns with bit pattern:
14303
14304 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14305 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14306
14307 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14308 different meaning for some instruction. */
14309
14310 static void
14311 neon_three_same (int isquad, int ubit, int size)
14312 {
14313 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14314 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14315 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14316 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14317 inst.instruction |= LOW4 (inst.operands[2].reg);
14318 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14319 inst.instruction |= (isquad != 0) << 6;
14320 inst.instruction |= (ubit != 0) << 24;
14321 if (size != -1)
14322 inst.instruction |= neon_logbits (size) << 20;
14323
14324 neon_dp_fixup (&inst);
14325 }
14326
14327 /* Encode instructions of the form:
14328
14329 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14330 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14331
14332 Don't write size if SIZE == -1. */
14333
14334 static void
14335 neon_two_same (int qbit, int ubit, int size)
14336 {
14337 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14338 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14339 inst.instruction |= LOW4 (inst.operands[1].reg);
14340 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14341 inst.instruction |= (qbit != 0) << 6;
14342 inst.instruction |= (ubit != 0) << 24;
14343
14344 if (size != -1)
14345 inst.instruction |= neon_logbits (size) << 18;
14346
14347 neon_dp_fixup (&inst);
14348 }
14349
14350 /* Neon instruction encoders, in approximate order of appearance. */
14351
14352 static void
14353 do_neon_dyadic_i_su (void)
14354 {
14355 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14356 struct neon_type_el et = neon_check_type (3, rs,
14357 N_EQK, N_EQK, N_SU_32 | N_KEY);
14358 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14359 }
14360
14361 static void
14362 do_neon_dyadic_i64_su (void)
14363 {
14364 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14365 struct neon_type_el et = neon_check_type (3, rs,
14366 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14367 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14368 }
14369
14370 static void
14371 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14372 unsigned immbits)
14373 {
14374 unsigned size = et.size >> 3;
14375 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14376 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14377 inst.instruction |= LOW4 (inst.operands[1].reg);
14378 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14379 inst.instruction |= (isquad != 0) << 6;
14380 inst.instruction |= immbits << 16;
14381 inst.instruction |= (size >> 3) << 7;
14382 inst.instruction |= (size & 0x7) << 19;
14383 if (write_ubit)
14384 inst.instruction |= (uval != 0) << 24;
14385
14386 neon_dp_fixup (&inst);
14387 }
14388
14389 static void
14390 do_neon_shl_imm (void)
14391 {
14392 if (!inst.operands[2].isreg)
14393 {
14394 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14395 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14396 int imm = inst.operands[2].imm;
14397
14398 constraint (imm < 0 || (unsigned)imm >= et.size,
14399 _("immediate out of range for shift"));
14400 NEON_ENCODE (IMMED, inst);
14401 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14402 }
14403 else
14404 {
14405 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14406 struct neon_type_el et = neon_check_type (3, rs,
14407 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14408 unsigned int tmp;
14409
14410 /* VSHL/VQSHL 3-register variants have syntax such as:
14411 vshl.xx Dd, Dm, Dn
14412 whereas other 3-register operations encoded by neon_three_same have
14413 syntax like:
14414 vadd.xx Dd, Dn, Dm
14415 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14416 here. */
14417 tmp = inst.operands[2].reg;
14418 inst.operands[2].reg = inst.operands[1].reg;
14419 inst.operands[1].reg = tmp;
14420 NEON_ENCODE (INTEGER, inst);
14421 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14422 }
14423 }
14424
14425 static void
14426 do_neon_qshl_imm (void)
14427 {
14428 if (!inst.operands[2].isreg)
14429 {
14430 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14431 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14432 int imm = inst.operands[2].imm;
14433
14434 constraint (imm < 0 || (unsigned)imm >= et.size,
14435 _("immediate out of range for shift"));
14436 NEON_ENCODE (IMMED, inst);
14437 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14438 }
14439 else
14440 {
14441 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14442 struct neon_type_el et = neon_check_type (3, rs,
14443 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14444 unsigned int tmp;
14445
14446 /* See note in do_neon_shl_imm. */
14447 tmp = inst.operands[2].reg;
14448 inst.operands[2].reg = inst.operands[1].reg;
14449 inst.operands[1].reg = tmp;
14450 NEON_ENCODE (INTEGER, inst);
14451 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14452 }
14453 }
14454
14455 static void
14456 do_neon_rshl (void)
14457 {
14458 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14459 struct neon_type_el et = neon_check_type (3, rs,
14460 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14461 unsigned int tmp;
14462
14463 tmp = inst.operands[2].reg;
14464 inst.operands[2].reg = inst.operands[1].reg;
14465 inst.operands[1].reg = tmp;
14466 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14467 }
14468
14469 static int
14470 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14471 {
14472 /* Handle .I8 pseudo-instructions. */
14473 if (size == 8)
14474 {
14475 /* Unfortunately, this will make everything apart from zero out-of-range.
14476 FIXME is this the intended semantics? There doesn't seem much point in
14477 accepting .I8 if so. */
14478 immediate |= immediate << 8;
14479 size = 16;
14480 }
14481
14482 if (size >= 32)
14483 {
14484 if (immediate == (immediate & 0x000000ff))
14485 {
14486 *immbits = immediate;
14487 return 0x1;
14488 }
14489 else if (immediate == (immediate & 0x0000ff00))
14490 {
14491 *immbits = immediate >> 8;
14492 return 0x3;
14493 }
14494 else if (immediate == (immediate & 0x00ff0000))
14495 {
14496 *immbits = immediate >> 16;
14497 return 0x5;
14498 }
14499 else if (immediate == (immediate & 0xff000000))
14500 {
14501 *immbits = immediate >> 24;
14502 return 0x7;
14503 }
14504 if ((immediate & 0xffff) != (immediate >> 16))
14505 goto bad_immediate;
14506 immediate &= 0xffff;
14507 }
14508
14509 if (immediate == (immediate & 0x000000ff))
14510 {
14511 *immbits = immediate;
14512 return 0x9;
14513 }
14514 else if (immediate == (immediate & 0x0000ff00))
14515 {
14516 *immbits = immediate >> 8;
14517 return 0xb;
14518 }
14519
14520 bad_immediate:
14521 first_error (_("immediate value out of range"));
14522 return FAIL;
14523 }
14524
14525 static void
14526 do_neon_logic (void)
14527 {
14528 if (inst.operands[2].present && inst.operands[2].isreg)
14529 {
14530 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14531 neon_check_type (3, rs, N_IGNORE_TYPE);
14532 /* U bit and size field were set as part of the bitmask. */
14533 NEON_ENCODE (INTEGER, inst);
14534 neon_three_same (neon_quad (rs), 0, -1);
14535 }
14536 else
14537 {
14538 const int three_ops_form = (inst.operands[2].present
14539 && !inst.operands[2].isreg);
14540 const int immoperand = (three_ops_form ? 2 : 1);
14541 enum neon_shape rs = (three_ops_form
14542 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14543 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14544 struct neon_type_el et = neon_check_type (2, rs,
14545 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14546 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14547 unsigned immbits;
14548 int cmode;
14549
14550 if (et.type == NT_invtype)
14551 return;
14552
14553 if (three_ops_form)
14554 constraint (inst.operands[0].reg != inst.operands[1].reg,
14555 _("first and second operands shall be the same register"));
14556
14557 NEON_ENCODE (IMMED, inst);
14558
14559 immbits = inst.operands[immoperand].imm;
14560 if (et.size == 64)
14561 {
14562 /* .i64 is a pseudo-op, so the immediate must be a repeating
14563 pattern. */
14564 if (immbits != (inst.operands[immoperand].regisimm ?
14565 inst.operands[immoperand].reg : 0))
14566 {
14567 /* Set immbits to an invalid constant. */
14568 immbits = 0xdeadbeef;
14569 }
14570 }
14571
14572 switch (opcode)
14573 {
14574 case N_MNEM_vbic:
14575 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14576 break;
14577
14578 case N_MNEM_vorr:
14579 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14580 break;
14581
14582 case N_MNEM_vand:
14583 /* Pseudo-instruction for VBIC. */
14584 neon_invert_size (&immbits, 0, et.size);
14585 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14586 break;
14587
14588 case N_MNEM_vorn:
14589 /* Pseudo-instruction for VORR. */
14590 neon_invert_size (&immbits, 0, et.size);
14591 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14592 break;
14593
14594 default:
14595 abort ();
14596 }
14597
14598 if (cmode == FAIL)
14599 return;
14600
14601 inst.instruction |= neon_quad (rs) << 6;
14602 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14603 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14604 inst.instruction |= cmode << 8;
14605 neon_write_immbits (immbits);
14606
14607 neon_dp_fixup (&inst);
14608 }
14609 }
14610
14611 static void
14612 do_neon_bitfield (void)
14613 {
14614 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14615 neon_check_type (3, rs, N_IGNORE_TYPE);
14616 neon_three_same (neon_quad (rs), 0, -1);
14617 }
14618
14619 static void
14620 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14621 unsigned destbits)
14622 {
14623 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14624 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14625 types | N_KEY);
14626 if (et.type == NT_float)
14627 {
14628 NEON_ENCODE (FLOAT, inst);
14629 neon_three_same (neon_quad (rs), 0, -1);
14630 }
14631 else
14632 {
14633 NEON_ENCODE (INTEGER, inst);
14634 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14635 }
14636 }
14637
14638 static void
14639 do_neon_dyadic_if_su (void)
14640 {
14641 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14642 }
14643
14644 static void
14645 do_neon_dyadic_if_su_d (void)
14646 {
14647 /* This version only allow D registers, but that constraint is enforced during
14648 operand parsing so we don't need to do anything extra here. */
14649 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14650 }
14651
14652 static void
14653 do_neon_dyadic_if_i_d (void)
14654 {
14655 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14656 affected if we specify unsigned args. */
14657 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14658 }
14659
14660 enum vfp_or_neon_is_neon_bits
14661 {
14662 NEON_CHECK_CC = 1,
14663 NEON_CHECK_ARCH = 2,
14664 NEON_CHECK_ARCH8 = 4
14665 };
14666
14667 /* Call this function if an instruction which may have belonged to the VFP or
14668 Neon instruction sets, but turned out to be a Neon instruction (due to the
14669 operand types involved, etc.). We have to check and/or fix-up a couple of
14670 things:
14671
14672 - Make sure the user hasn't attempted to make a Neon instruction
14673 conditional.
14674 - Alter the value in the condition code field if necessary.
14675 - Make sure that the arch supports Neon instructions.
14676
14677 Which of these operations take place depends on bits from enum
14678 vfp_or_neon_is_neon_bits.
14679
14680 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14681 current instruction's condition is COND_ALWAYS, the condition field is
14682 changed to inst.uncond_value. This is necessary because instructions shared
14683 between VFP and Neon may be conditional for the VFP variants only, and the
14684 unconditional Neon version must have, e.g., 0xF in the condition field. */
14685
14686 static int
14687 vfp_or_neon_is_neon (unsigned check)
14688 {
14689 /* Conditions are always legal in Thumb mode (IT blocks). */
14690 if (!thumb_mode && (check & NEON_CHECK_CC))
14691 {
14692 if (inst.cond != COND_ALWAYS)
14693 {
14694 first_error (_(BAD_COND));
14695 return FAIL;
14696 }
14697 if (inst.uncond_value != -1)
14698 inst.instruction |= inst.uncond_value << 28;
14699 }
14700
14701 if ((check & NEON_CHECK_ARCH)
14702 && !mark_feature_used (&fpu_neon_ext_v1))
14703 {
14704 first_error (_(BAD_FPU));
14705 return FAIL;
14706 }
14707
14708 if ((check & NEON_CHECK_ARCH8)
14709 && !mark_feature_used (&fpu_neon_ext_armv8))
14710 {
14711 first_error (_(BAD_FPU));
14712 return FAIL;
14713 }
14714
14715 return SUCCESS;
14716 }
14717
14718 static void
14719 do_neon_addsub_if_i (void)
14720 {
14721 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14722 return;
14723
14724 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14725 return;
14726
14727 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14728 affected if we specify unsigned args. */
14729 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14730 }
14731
14732 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14733 result to be:
14734 V<op> A,B (A is operand 0, B is operand 2)
14735 to mean:
14736 V<op> A,B,A
14737 not:
14738 V<op> A,B,B
14739 so handle that case specially. */
14740
14741 static void
14742 neon_exchange_operands (void)
14743 {
14744 void *scratch = alloca (sizeof (inst.operands[0]));
14745 if (inst.operands[1].present)
14746 {
14747 /* Swap operands[1] and operands[2]. */
14748 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14749 inst.operands[1] = inst.operands[2];
14750 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14751 }
14752 else
14753 {
14754 inst.operands[1] = inst.operands[2];
14755 inst.operands[2] = inst.operands[0];
14756 }
14757 }
14758
14759 static void
14760 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14761 {
14762 if (inst.operands[2].isreg)
14763 {
14764 if (invert)
14765 neon_exchange_operands ();
14766 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14767 }
14768 else
14769 {
14770 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14771 struct neon_type_el et = neon_check_type (2, rs,
14772 N_EQK | N_SIZ, immtypes | N_KEY);
14773
14774 NEON_ENCODE (IMMED, inst);
14775 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14776 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14777 inst.instruction |= LOW4 (inst.operands[1].reg);
14778 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14779 inst.instruction |= neon_quad (rs) << 6;
14780 inst.instruction |= (et.type == NT_float) << 10;
14781 inst.instruction |= neon_logbits (et.size) << 18;
14782
14783 neon_dp_fixup (&inst);
14784 }
14785 }
14786
14787 static void
14788 do_neon_cmp (void)
14789 {
14790 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14791 }
14792
14793 static void
14794 do_neon_cmp_inv (void)
14795 {
14796 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14797 }
14798
14799 static void
14800 do_neon_ceq (void)
14801 {
14802 neon_compare (N_IF_32, N_IF_32, FALSE);
14803 }
14804
14805 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14806 scalars, which are encoded in 5 bits, M : Rm.
14807 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14808 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14809 index in M. */
14810
14811 static unsigned
14812 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14813 {
14814 unsigned regno = NEON_SCALAR_REG (scalar);
14815 unsigned elno = NEON_SCALAR_INDEX (scalar);
14816
14817 switch (elsize)
14818 {
14819 case 16:
14820 if (regno > 7 || elno > 3)
14821 goto bad_scalar;
14822 return regno | (elno << 3);
14823
14824 case 32:
14825 if (regno > 15 || elno > 1)
14826 goto bad_scalar;
14827 return regno | (elno << 4);
14828
14829 default:
14830 bad_scalar:
14831 first_error (_("scalar out of range for multiply instruction"));
14832 }
14833
14834 return 0;
14835 }
14836
14837 /* Encode multiply / multiply-accumulate scalar instructions. */
14838
14839 static void
14840 neon_mul_mac (struct neon_type_el et, int ubit)
14841 {
14842 unsigned scalar;
14843
14844 /* Give a more helpful error message if we have an invalid type. */
14845 if (et.type == NT_invtype)
14846 return;
14847
14848 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14849 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14850 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14851 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14852 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14853 inst.instruction |= LOW4 (scalar);
14854 inst.instruction |= HI1 (scalar) << 5;
14855 inst.instruction |= (et.type == NT_float) << 8;
14856 inst.instruction |= neon_logbits (et.size) << 20;
14857 inst.instruction |= (ubit != 0) << 24;
14858
14859 neon_dp_fixup (&inst);
14860 }
14861
14862 static void
14863 do_neon_mac_maybe_scalar (void)
14864 {
14865 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14866 return;
14867
14868 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14869 return;
14870
14871 if (inst.operands[2].isscalar)
14872 {
14873 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14874 struct neon_type_el et = neon_check_type (3, rs,
14875 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14876 NEON_ENCODE (SCALAR, inst);
14877 neon_mul_mac (et, neon_quad (rs));
14878 }
14879 else
14880 {
14881 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14882 affected if we specify unsigned args. */
14883 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14884 }
14885 }
14886
14887 static void
14888 do_neon_fmac (void)
14889 {
14890 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14891 return;
14892
14893 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14894 return;
14895
14896 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14897 }
14898
14899 static void
14900 do_neon_tst (void)
14901 {
14902 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14903 struct neon_type_el et = neon_check_type (3, rs,
14904 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14905 neon_three_same (neon_quad (rs), 0, et.size);
14906 }
14907
14908 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14909 same types as the MAC equivalents. The polynomial type for this instruction
14910 is encoded the same as the integer type. */
14911
14912 static void
14913 do_neon_mul (void)
14914 {
14915 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14916 return;
14917
14918 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14919 return;
14920
14921 if (inst.operands[2].isscalar)
14922 do_neon_mac_maybe_scalar ();
14923 else
14924 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14925 }
14926
14927 static void
14928 do_neon_qdmulh (void)
14929 {
14930 if (inst.operands[2].isscalar)
14931 {
14932 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14933 struct neon_type_el et = neon_check_type (3, rs,
14934 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14935 NEON_ENCODE (SCALAR, inst);
14936 neon_mul_mac (et, neon_quad (rs));
14937 }
14938 else
14939 {
14940 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14941 struct neon_type_el et = neon_check_type (3, rs,
14942 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14943 NEON_ENCODE (INTEGER, inst);
14944 /* The U bit (rounding) comes from bit mask. */
14945 neon_three_same (neon_quad (rs), 0, et.size);
14946 }
14947 }
14948
14949 static void
14950 do_neon_fcmp_absolute (void)
14951 {
14952 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14953 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14954 /* Size field comes from bit mask. */
14955 neon_three_same (neon_quad (rs), 1, -1);
14956 }
14957
14958 static void
14959 do_neon_fcmp_absolute_inv (void)
14960 {
14961 neon_exchange_operands ();
14962 do_neon_fcmp_absolute ();
14963 }
14964
14965 static void
14966 do_neon_step (void)
14967 {
14968 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14969 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14970 neon_three_same (neon_quad (rs), 0, -1);
14971 }
14972
14973 static void
14974 do_neon_abs_neg (void)
14975 {
14976 enum neon_shape rs;
14977 struct neon_type_el et;
14978
14979 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14980 return;
14981
14982 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14983 return;
14984
14985 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14986 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14987
14988 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14989 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14990 inst.instruction |= LOW4 (inst.operands[1].reg);
14991 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14992 inst.instruction |= neon_quad (rs) << 6;
14993 inst.instruction |= (et.type == NT_float) << 10;
14994 inst.instruction |= neon_logbits (et.size) << 18;
14995
14996 neon_dp_fixup (&inst);
14997 }
14998
14999 static void
15000 do_neon_sli (void)
15001 {
15002 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15003 struct neon_type_el et = neon_check_type (2, rs,
15004 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15005 int imm = inst.operands[2].imm;
15006 constraint (imm < 0 || (unsigned)imm >= et.size,
15007 _("immediate out of range for insert"));
15008 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15009 }
15010
15011 static void
15012 do_neon_sri (void)
15013 {
15014 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15015 struct neon_type_el et = neon_check_type (2, rs,
15016 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15017 int imm = inst.operands[2].imm;
15018 constraint (imm < 1 || (unsigned)imm > et.size,
15019 _("immediate out of range for insert"));
15020 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15021 }
15022
15023 static void
15024 do_neon_qshlu_imm (void)
15025 {
15026 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15027 struct neon_type_el et = neon_check_type (2, rs,
15028 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15029 int imm = inst.operands[2].imm;
15030 constraint (imm < 0 || (unsigned)imm >= et.size,
15031 _("immediate out of range for shift"));
15032 /* Only encodes the 'U present' variant of the instruction.
15033 In this case, signed types have OP (bit 8) set to 0.
15034 Unsigned types have OP set to 1. */
15035 inst.instruction |= (et.type == NT_unsigned) << 8;
15036 /* The rest of the bits are the same as other immediate shifts. */
15037 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15038 }
15039
15040 static void
15041 do_neon_qmovn (void)
15042 {
15043 struct neon_type_el et = neon_check_type (2, NS_DQ,
15044 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15045 /* Saturating move where operands can be signed or unsigned, and the
15046 destination has the same signedness. */
15047 NEON_ENCODE (INTEGER, inst);
15048 if (et.type == NT_unsigned)
15049 inst.instruction |= 0xc0;
15050 else
15051 inst.instruction |= 0x80;
15052 neon_two_same (0, 1, et.size / 2);
15053 }
15054
15055 static void
15056 do_neon_qmovun (void)
15057 {
15058 struct neon_type_el et = neon_check_type (2, NS_DQ,
15059 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15060 /* Saturating move with unsigned results. Operands must be signed. */
15061 NEON_ENCODE (INTEGER, inst);
15062 neon_two_same (0, 1, et.size / 2);
15063 }
15064
15065 static void
15066 do_neon_rshift_sat_narrow (void)
15067 {
15068 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15069 or unsigned. If operands are unsigned, results must also be unsigned. */
15070 struct neon_type_el et = neon_check_type (2, NS_DQI,
15071 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15072 int imm = inst.operands[2].imm;
15073 /* This gets the bounds check, size encoding and immediate bits calculation
15074 right. */
15075 et.size /= 2;
15076
15077 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15078 VQMOVN.I<size> <Dd>, <Qm>. */
15079 if (imm == 0)
15080 {
15081 inst.operands[2].present = 0;
15082 inst.instruction = N_MNEM_vqmovn;
15083 do_neon_qmovn ();
15084 return;
15085 }
15086
15087 constraint (imm < 1 || (unsigned)imm > et.size,
15088 _("immediate out of range"));
15089 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15090 }
15091
15092 static void
15093 do_neon_rshift_sat_narrow_u (void)
15094 {
15095 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15096 or unsigned. If operands are unsigned, results must also be unsigned. */
15097 struct neon_type_el et = neon_check_type (2, NS_DQI,
15098 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15099 int imm = inst.operands[2].imm;
15100 /* This gets the bounds check, size encoding and immediate bits calculation
15101 right. */
15102 et.size /= 2;
15103
15104 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15105 VQMOVUN.I<size> <Dd>, <Qm>. */
15106 if (imm == 0)
15107 {
15108 inst.operands[2].present = 0;
15109 inst.instruction = N_MNEM_vqmovun;
15110 do_neon_qmovun ();
15111 return;
15112 }
15113
15114 constraint (imm < 1 || (unsigned)imm > et.size,
15115 _("immediate out of range"));
15116 /* FIXME: The manual is kind of unclear about what value U should have in
15117 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15118 must be 1. */
15119 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15120 }
15121
15122 static void
15123 do_neon_movn (void)
15124 {
15125 struct neon_type_el et = neon_check_type (2, NS_DQ,
15126 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15127 NEON_ENCODE (INTEGER, inst);
15128 neon_two_same (0, 1, et.size / 2);
15129 }
15130
15131 static void
15132 do_neon_rshift_narrow (void)
15133 {
15134 struct neon_type_el et = neon_check_type (2, NS_DQI,
15135 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15136 int imm = inst.operands[2].imm;
15137 /* This gets the bounds check, size encoding and immediate bits calculation
15138 right. */
15139 et.size /= 2;
15140
15141 /* If immediate is zero then we are a pseudo-instruction for
15142 VMOVN.I<size> <Dd>, <Qm> */
15143 if (imm == 0)
15144 {
15145 inst.operands[2].present = 0;
15146 inst.instruction = N_MNEM_vmovn;
15147 do_neon_movn ();
15148 return;
15149 }
15150
15151 constraint (imm < 1 || (unsigned)imm > et.size,
15152 _("immediate out of range for narrowing operation"));
15153 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15154 }
15155
15156 static void
15157 do_neon_shll (void)
15158 {
15159 /* FIXME: Type checking when lengthening. */
15160 struct neon_type_el et = neon_check_type (2, NS_QDI,
15161 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15162 unsigned imm = inst.operands[2].imm;
15163
15164 if (imm == et.size)
15165 {
15166 /* Maximum shift variant. */
15167 NEON_ENCODE (INTEGER, inst);
15168 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15169 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15170 inst.instruction |= LOW4 (inst.operands[1].reg);
15171 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15172 inst.instruction |= neon_logbits (et.size) << 18;
15173
15174 neon_dp_fixup (&inst);
15175 }
15176 else
15177 {
15178 /* A more-specific type check for non-max versions. */
15179 et = neon_check_type (2, NS_QDI,
15180 N_EQK | N_DBL, N_SU_32 | N_KEY);
15181 NEON_ENCODE (IMMED, inst);
15182 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15183 }
15184 }
15185
15186 /* Check the various types for the VCVT instruction, and return which version
15187 the current instruction is. */
15188
15189 #define CVT_FLAVOUR_VAR \
15190 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15191 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15192 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15193 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15194 /* Half-precision conversions. */ \
15195 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15196 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15197 /* VFP instructions. */ \
15198 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15199 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15200 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15201 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15202 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15203 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15204 /* VFP instructions with bitshift. */ \
15205 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15206 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15207 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15208 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15209 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15210 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15211 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15212 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15213
15214 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15215 neon_cvt_flavour_##C,
15216
15217 /* The different types of conversions we can do. */
15218 enum neon_cvt_flavour
15219 {
15220 CVT_FLAVOUR_VAR
15221 neon_cvt_flavour_invalid,
15222 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15223 };
15224
15225 #undef CVT_VAR
15226
15227 static enum neon_cvt_flavour
15228 get_neon_cvt_flavour (enum neon_shape rs)
15229 {
15230 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15231 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15232 if (et.type != NT_invtype) \
15233 { \
15234 inst.error = NULL; \
15235 return (neon_cvt_flavour_##C); \
15236 }
15237
15238 struct neon_type_el et;
15239 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15240 || rs == NS_FF) ? N_VFP : 0;
15241 /* The instruction versions which take an immediate take one register
15242 argument, which is extended to the width of the full register. Thus the
15243 "source" and "destination" registers must have the same width. Hack that
15244 here by making the size equal to the key (wider, in this case) operand. */
15245 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15246
15247 CVT_FLAVOUR_VAR;
15248
15249 return neon_cvt_flavour_invalid;
15250 #undef CVT_VAR
15251 }
15252
15253 enum neon_cvt_mode
15254 {
15255 neon_cvt_mode_a,
15256 neon_cvt_mode_n,
15257 neon_cvt_mode_p,
15258 neon_cvt_mode_m,
15259 neon_cvt_mode_z,
15260 neon_cvt_mode_x,
15261 neon_cvt_mode_r
15262 };
15263
15264 /* Neon-syntax VFP conversions. */
15265
15266 static void
15267 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15268 {
15269 const char *opname = 0;
15270
15271 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15272 || rs == NS_FHI || rs == NS_HFI)
15273 {
15274 /* Conversions with immediate bitshift. */
15275 const char *enc[] =
15276 {
15277 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15278 CVT_FLAVOUR_VAR
15279 NULL
15280 #undef CVT_VAR
15281 };
15282
15283 if (flavour < (int) ARRAY_SIZE (enc))
15284 {
15285 opname = enc[flavour];
15286 constraint (inst.operands[0].reg != inst.operands[1].reg,
15287 _("operands 0 and 1 must be the same register"));
15288 inst.operands[1] = inst.operands[2];
15289 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15290 }
15291 }
15292 else
15293 {
15294 /* Conversions without bitshift. */
15295 const char *enc[] =
15296 {
15297 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15298 CVT_FLAVOUR_VAR
15299 NULL
15300 #undef CVT_VAR
15301 };
15302
15303 if (flavour < (int) ARRAY_SIZE (enc))
15304 opname = enc[flavour];
15305 }
15306
15307 if (opname)
15308 do_vfp_nsyn_opcode (opname);
15309 }
15310
15311 static void
15312 do_vfp_nsyn_cvtz (void)
15313 {
15314 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15315 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15316 const char *enc[] =
15317 {
15318 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15319 CVT_FLAVOUR_VAR
15320 NULL
15321 #undef CVT_VAR
15322 };
15323
15324 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15325 do_vfp_nsyn_opcode (enc[flavour]);
15326 }
15327
15328 static void
15329 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15330 enum neon_cvt_mode mode)
15331 {
15332 int sz, op;
15333 int rm;
15334
15335 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15336 D register operands. */
15337 if (flavour == neon_cvt_flavour_s32_f64
15338 || flavour == neon_cvt_flavour_u32_f64)
15339 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15340 _(BAD_FPU));
15341
15342 set_it_insn_type (OUTSIDE_IT_INSN);
15343
15344 switch (flavour)
15345 {
15346 case neon_cvt_flavour_s32_f64:
15347 sz = 1;
15348 op = 1;
15349 break;
15350 case neon_cvt_flavour_s32_f32:
15351 sz = 0;
15352 op = 1;
15353 break;
15354 case neon_cvt_flavour_u32_f64:
15355 sz = 1;
15356 op = 0;
15357 break;
15358 case neon_cvt_flavour_u32_f32:
15359 sz = 0;
15360 op = 0;
15361 break;
15362 default:
15363 first_error (_("invalid instruction shape"));
15364 return;
15365 }
15366
15367 switch (mode)
15368 {
15369 case neon_cvt_mode_a: rm = 0; break;
15370 case neon_cvt_mode_n: rm = 1; break;
15371 case neon_cvt_mode_p: rm = 2; break;
15372 case neon_cvt_mode_m: rm = 3; break;
15373 default: first_error (_("invalid rounding mode")); return;
15374 }
15375
15376 NEON_ENCODE (FPV8, inst);
15377 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15378 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15379 inst.instruction |= sz << 8;
15380 inst.instruction |= op << 7;
15381 inst.instruction |= rm << 16;
15382 inst.instruction |= 0xf0000000;
15383 inst.is_neon = TRUE;
15384 }
15385
15386 static void
15387 do_neon_cvt_1 (enum neon_cvt_mode mode)
15388 {
15389 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15390 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15391 NS_FH, NS_HF, NS_FHI, NS_HFI,
15392 NS_NULL);
15393 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15394
15395 /* PR11109: Handle round-to-zero for VCVT conversions. */
15396 if (mode == neon_cvt_mode_z
15397 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15398 && (flavour == neon_cvt_flavour_s32_f32
15399 || flavour == neon_cvt_flavour_u32_f32
15400 || flavour == neon_cvt_flavour_s32_f64
15401 || flavour == neon_cvt_flavour_u32_f64)
15402 && (rs == NS_FD || rs == NS_FF))
15403 {
15404 do_vfp_nsyn_cvtz ();
15405 return;
15406 }
15407
15408 /* VFP rather than Neon conversions. */
15409 if (flavour >= neon_cvt_flavour_first_fp)
15410 {
15411 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15412 do_vfp_nsyn_cvt (rs, flavour);
15413 else
15414 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15415
15416 return;
15417 }
15418
15419 switch (rs)
15420 {
15421 case NS_DDI:
15422 case NS_QQI:
15423 {
15424 unsigned immbits;
15425 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15426
15427 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15428 return;
15429
15430 /* Fixed-point conversion with #0 immediate is encoded as an
15431 integer conversion. */
15432 if (inst.operands[2].present && inst.operands[2].imm == 0)
15433 goto int_encode;
15434 immbits = 32 - inst.operands[2].imm;
15435 NEON_ENCODE (IMMED, inst);
15436 if (flavour != neon_cvt_flavour_invalid)
15437 inst.instruction |= enctab[flavour];
15438 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15439 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15440 inst.instruction |= LOW4 (inst.operands[1].reg);
15441 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15442 inst.instruction |= neon_quad (rs) << 6;
15443 inst.instruction |= 1 << 21;
15444 inst.instruction |= immbits << 16;
15445
15446 neon_dp_fixup (&inst);
15447 }
15448 break;
15449
15450 case NS_DD:
15451 case NS_QQ:
15452 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15453 {
15454 NEON_ENCODE (FLOAT, inst);
15455 set_it_insn_type (OUTSIDE_IT_INSN);
15456
15457 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15458 return;
15459
15460 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15461 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15462 inst.instruction |= LOW4 (inst.operands[1].reg);
15463 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15464 inst.instruction |= neon_quad (rs) << 6;
15465 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15466 inst.instruction |= mode << 8;
15467 if (thumb_mode)
15468 inst.instruction |= 0xfc000000;
15469 else
15470 inst.instruction |= 0xf0000000;
15471 }
15472 else
15473 {
15474 int_encode:
15475 {
15476 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15477
15478 NEON_ENCODE (INTEGER, inst);
15479
15480 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15481 return;
15482
15483 if (flavour != neon_cvt_flavour_invalid)
15484 inst.instruction |= enctab[flavour];
15485
15486 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15487 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15488 inst.instruction |= LOW4 (inst.operands[1].reg);
15489 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15490 inst.instruction |= neon_quad (rs) << 6;
15491 inst.instruction |= 2 << 18;
15492
15493 neon_dp_fixup (&inst);
15494 }
15495 }
15496 break;
15497
15498 /* Half-precision conversions for Advanced SIMD -- neon. */
15499 case NS_QD:
15500 case NS_DQ:
15501
15502 if ((rs == NS_DQ)
15503 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15504 {
15505 as_bad (_("operand size must match register width"));
15506 break;
15507 }
15508
15509 if ((rs == NS_QD)
15510 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15511 {
15512 as_bad (_("operand size must match register width"));
15513 break;
15514 }
15515
15516 if (rs == NS_DQ)
15517 inst.instruction = 0x3b60600;
15518 else
15519 inst.instruction = 0x3b60700;
15520
15521 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15522 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15523 inst.instruction |= LOW4 (inst.operands[1].reg);
15524 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15525 neon_dp_fixup (&inst);
15526 break;
15527
15528 default:
15529 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15530 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15531 do_vfp_nsyn_cvt (rs, flavour);
15532 else
15533 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15534 }
15535 }
15536
15537 static void
15538 do_neon_cvtr (void)
15539 {
15540 do_neon_cvt_1 (neon_cvt_mode_x);
15541 }
15542
15543 static void
15544 do_neon_cvt (void)
15545 {
15546 do_neon_cvt_1 (neon_cvt_mode_z);
15547 }
15548
15549 static void
15550 do_neon_cvta (void)
15551 {
15552 do_neon_cvt_1 (neon_cvt_mode_a);
15553 }
15554
15555 static void
15556 do_neon_cvtn (void)
15557 {
15558 do_neon_cvt_1 (neon_cvt_mode_n);
15559 }
15560
15561 static void
15562 do_neon_cvtp (void)
15563 {
15564 do_neon_cvt_1 (neon_cvt_mode_p);
15565 }
15566
15567 static void
15568 do_neon_cvtm (void)
15569 {
15570 do_neon_cvt_1 (neon_cvt_mode_m);
15571 }
15572
15573 static void
15574 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15575 {
15576 if (is_double)
15577 mark_feature_used (&fpu_vfp_ext_armv8);
15578
15579 encode_arm_vfp_reg (inst.operands[0].reg,
15580 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15581 encode_arm_vfp_reg (inst.operands[1].reg,
15582 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15583 inst.instruction |= to ? 0x10000 : 0;
15584 inst.instruction |= t ? 0x80 : 0;
15585 inst.instruction |= is_double ? 0x100 : 0;
15586 do_vfp_cond_or_thumb ();
15587 }
15588
15589 static void
15590 do_neon_cvttb_1 (bfd_boolean t)
15591 {
15592 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
15593 NS_DF, NS_DH, NS_NULL);
15594
15595 if (rs == NS_NULL)
15596 return;
15597 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15598 {
15599 inst.error = NULL;
15600 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15601 }
15602 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15603 {
15604 inst.error = NULL;
15605 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15606 }
15607 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15608 {
15609 /* The VCVTB and VCVTT instructions with D-register operands
15610 don't work for SP only targets. */
15611 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15612 _(BAD_FPU));
15613
15614 inst.error = NULL;
15615 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15616 }
15617 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15618 {
15619 /* The VCVTB and VCVTT instructions with D-register operands
15620 don't work for SP only targets. */
15621 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15622 _(BAD_FPU));
15623
15624 inst.error = NULL;
15625 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15626 }
15627 else
15628 return;
15629 }
15630
15631 static void
15632 do_neon_cvtb (void)
15633 {
15634 do_neon_cvttb_1 (FALSE);
15635 }
15636
15637
15638 static void
15639 do_neon_cvtt (void)
15640 {
15641 do_neon_cvttb_1 (TRUE);
15642 }
15643
15644 static void
15645 neon_move_immediate (void)
15646 {
15647 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15648 struct neon_type_el et = neon_check_type (2, rs,
15649 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15650 unsigned immlo, immhi = 0, immbits;
15651 int op, cmode, float_p;
15652
15653 constraint (et.type == NT_invtype,
15654 _("operand size must be specified for immediate VMOV"));
15655
15656 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15657 op = (inst.instruction & (1 << 5)) != 0;
15658
15659 immlo = inst.operands[1].imm;
15660 if (inst.operands[1].regisimm)
15661 immhi = inst.operands[1].reg;
15662
15663 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15664 _("immediate has bits set outside the operand size"));
15665
15666 float_p = inst.operands[1].immisfloat;
15667
15668 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15669 et.size, et.type)) == FAIL)
15670 {
15671 /* Invert relevant bits only. */
15672 neon_invert_size (&immlo, &immhi, et.size);
15673 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15674 with one or the other; those cases are caught by
15675 neon_cmode_for_move_imm. */
15676 op = !op;
15677 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15678 &op, et.size, et.type)) == FAIL)
15679 {
15680 first_error (_("immediate out of range"));
15681 return;
15682 }
15683 }
15684
15685 inst.instruction &= ~(1 << 5);
15686 inst.instruction |= op << 5;
15687
15688 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15689 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15690 inst.instruction |= neon_quad (rs) << 6;
15691 inst.instruction |= cmode << 8;
15692
15693 neon_write_immbits (immbits);
15694 }
15695
15696 static void
15697 do_neon_mvn (void)
15698 {
15699 if (inst.operands[1].isreg)
15700 {
15701 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15702
15703 NEON_ENCODE (INTEGER, inst);
15704 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15705 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15706 inst.instruction |= LOW4 (inst.operands[1].reg);
15707 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15708 inst.instruction |= neon_quad (rs) << 6;
15709 }
15710 else
15711 {
15712 NEON_ENCODE (IMMED, inst);
15713 neon_move_immediate ();
15714 }
15715
15716 neon_dp_fixup (&inst);
15717 }
15718
15719 /* Encode instructions of form:
15720
15721 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15722 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15723
15724 static void
15725 neon_mixed_length (struct neon_type_el et, unsigned size)
15726 {
15727 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15728 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15729 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15730 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15731 inst.instruction |= LOW4 (inst.operands[2].reg);
15732 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15733 inst.instruction |= (et.type == NT_unsigned) << 24;
15734 inst.instruction |= neon_logbits (size) << 20;
15735
15736 neon_dp_fixup (&inst);
15737 }
15738
15739 static void
15740 do_neon_dyadic_long (void)
15741 {
15742 /* FIXME: Type checking for lengthening op. */
15743 struct neon_type_el et = neon_check_type (3, NS_QDD,
15744 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15745 neon_mixed_length (et, et.size);
15746 }
15747
15748 static void
15749 do_neon_abal (void)
15750 {
15751 struct neon_type_el et = neon_check_type (3, NS_QDD,
15752 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15753 neon_mixed_length (et, et.size);
15754 }
15755
15756 static void
15757 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15758 {
15759 if (inst.operands[2].isscalar)
15760 {
15761 struct neon_type_el et = neon_check_type (3, NS_QDS,
15762 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15763 NEON_ENCODE (SCALAR, inst);
15764 neon_mul_mac (et, et.type == NT_unsigned);
15765 }
15766 else
15767 {
15768 struct neon_type_el et = neon_check_type (3, NS_QDD,
15769 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15770 NEON_ENCODE (INTEGER, inst);
15771 neon_mixed_length (et, et.size);
15772 }
15773 }
15774
15775 static void
15776 do_neon_mac_maybe_scalar_long (void)
15777 {
15778 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15779 }
15780
15781 static void
15782 do_neon_dyadic_wide (void)
15783 {
15784 struct neon_type_el et = neon_check_type (3, NS_QQD,
15785 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15786 neon_mixed_length (et, et.size);
15787 }
15788
15789 static void
15790 do_neon_dyadic_narrow (void)
15791 {
15792 struct neon_type_el et = neon_check_type (3, NS_QDD,
15793 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15794 /* Operand sign is unimportant, and the U bit is part of the opcode,
15795 so force the operand type to integer. */
15796 et.type = NT_integer;
15797 neon_mixed_length (et, et.size / 2);
15798 }
15799
15800 static void
15801 do_neon_mul_sat_scalar_long (void)
15802 {
15803 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15804 }
15805
15806 static void
15807 do_neon_vmull (void)
15808 {
15809 if (inst.operands[2].isscalar)
15810 do_neon_mac_maybe_scalar_long ();
15811 else
15812 {
15813 struct neon_type_el et = neon_check_type (3, NS_QDD,
15814 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15815
15816 if (et.type == NT_poly)
15817 NEON_ENCODE (POLY, inst);
15818 else
15819 NEON_ENCODE (INTEGER, inst);
15820
15821 /* For polynomial encoding the U bit must be zero, and the size must
15822 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15823 obviously, as 0b10). */
15824 if (et.size == 64)
15825 {
15826 /* Check we're on the correct architecture. */
15827 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15828 inst.error =
15829 _("Instruction form not available on this architecture.");
15830
15831 et.size = 32;
15832 }
15833
15834 neon_mixed_length (et, et.size);
15835 }
15836 }
15837
15838 static void
15839 do_neon_ext (void)
15840 {
15841 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15842 struct neon_type_el et = neon_check_type (3, rs,
15843 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15844 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15845
15846 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15847 _("shift out of range"));
15848 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15849 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15850 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15851 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15852 inst.instruction |= LOW4 (inst.operands[2].reg);
15853 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15854 inst.instruction |= neon_quad (rs) << 6;
15855 inst.instruction |= imm << 8;
15856
15857 neon_dp_fixup (&inst);
15858 }
15859
15860 static void
15861 do_neon_rev (void)
15862 {
15863 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15864 struct neon_type_el et = neon_check_type (2, rs,
15865 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15866 unsigned op = (inst.instruction >> 7) & 3;
15867 /* N (width of reversed regions) is encoded as part of the bitmask. We
15868 extract it here to check the elements to be reversed are smaller.
15869 Otherwise we'd get a reserved instruction. */
15870 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15871 gas_assert (elsize != 0);
15872 constraint (et.size >= elsize,
15873 _("elements must be smaller than reversal region"));
15874 neon_two_same (neon_quad (rs), 1, et.size);
15875 }
15876
15877 static void
15878 do_neon_dup (void)
15879 {
15880 if (inst.operands[1].isscalar)
15881 {
15882 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15883 struct neon_type_el et = neon_check_type (2, rs,
15884 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15885 unsigned sizebits = et.size >> 3;
15886 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15887 int logsize = neon_logbits (et.size);
15888 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15889
15890 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15891 return;
15892
15893 NEON_ENCODE (SCALAR, inst);
15894 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15895 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15896 inst.instruction |= LOW4 (dm);
15897 inst.instruction |= HI1 (dm) << 5;
15898 inst.instruction |= neon_quad (rs) << 6;
15899 inst.instruction |= x << 17;
15900 inst.instruction |= sizebits << 16;
15901
15902 neon_dp_fixup (&inst);
15903 }
15904 else
15905 {
15906 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15907 struct neon_type_el et = neon_check_type (2, rs,
15908 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15909 /* Duplicate ARM register to lanes of vector. */
15910 NEON_ENCODE (ARMREG, inst);
15911 switch (et.size)
15912 {
15913 case 8: inst.instruction |= 0x400000; break;
15914 case 16: inst.instruction |= 0x000020; break;
15915 case 32: inst.instruction |= 0x000000; break;
15916 default: break;
15917 }
15918 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15919 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15920 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15921 inst.instruction |= neon_quad (rs) << 21;
15922 /* The encoding for this instruction is identical for the ARM and Thumb
15923 variants, except for the condition field. */
15924 do_vfp_cond_or_thumb ();
15925 }
15926 }
15927
15928 /* VMOV has particularly many variations. It can be one of:
15929 0. VMOV<c><q> <Qd>, <Qm>
15930 1. VMOV<c><q> <Dd>, <Dm>
15931 (Register operations, which are VORR with Rm = Rn.)
15932 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15933 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15934 (Immediate loads.)
15935 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15936 (ARM register to scalar.)
15937 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15938 (Two ARM registers to vector.)
15939 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15940 (Scalar to ARM register.)
15941 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15942 (Vector to two ARM registers.)
15943 8. VMOV.F32 <Sd>, <Sm>
15944 9. VMOV.F64 <Dd>, <Dm>
15945 (VFP register moves.)
15946 10. VMOV.F32 <Sd>, #imm
15947 11. VMOV.F64 <Dd>, #imm
15948 (VFP float immediate load.)
15949 12. VMOV <Rd>, <Sm>
15950 (VFP single to ARM reg.)
15951 13. VMOV <Sd>, <Rm>
15952 (ARM reg to VFP single.)
15953 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15954 (Two ARM regs to two VFP singles.)
15955 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15956 (Two VFP singles to two ARM regs.)
15957
15958 These cases can be disambiguated using neon_select_shape, except cases 1/9
15959 and 3/11 which depend on the operand type too.
15960
15961 All the encoded bits are hardcoded by this function.
15962
15963 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15964 Cases 5, 7 may be used with VFPv2 and above.
15965
15966 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15967 can specify a type where it doesn't make sense to, and is ignored). */
15968
15969 static void
15970 do_neon_mov (void)
15971 {
15972 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15973 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15974 NS_NULL);
15975 struct neon_type_el et;
15976 const char *ldconst = 0;
15977
15978 switch (rs)
15979 {
15980 case NS_DD: /* case 1/9. */
15981 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15982 /* It is not an error here if no type is given. */
15983 inst.error = NULL;
15984 if (et.type == NT_float && et.size == 64)
15985 {
15986 do_vfp_nsyn_opcode ("fcpyd");
15987 break;
15988 }
15989 /* fall through. */
15990
15991 case NS_QQ: /* case 0/1. */
15992 {
15993 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15994 return;
15995 /* The architecture manual I have doesn't explicitly state which
15996 value the U bit should have for register->register moves, but
15997 the equivalent VORR instruction has U = 0, so do that. */
15998 inst.instruction = 0x0200110;
15999 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16000 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16001 inst.instruction |= LOW4 (inst.operands[1].reg);
16002 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16003 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16004 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16005 inst.instruction |= neon_quad (rs) << 6;
16006
16007 neon_dp_fixup (&inst);
16008 }
16009 break;
16010
16011 case NS_DI: /* case 3/11. */
16012 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16013 inst.error = NULL;
16014 if (et.type == NT_float && et.size == 64)
16015 {
16016 /* case 11 (fconstd). */
16017 ldconst = "fconstd";
16018 goto encode_fconstd;
16019 }
16020 /* fall through. */
16021
16022 case NS_QI: /* case 2/3. */
16023 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16024 return;
16025 inst.instruction = 0x0800010;
16026 neon_move_immediate ();
16027 neon_dp_fixup (&inst);
16028 break;
16029
16030 case NS_SR: /* case 4. */
16031 {
16032 unsigned bcdebits = 0;
16033 int logsize;
16034 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16035 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16036
16037 /* .<size> is optional here, defaulting to .32. */
16038 if (inst.vectype.elems == 0
16039 && inst.operands[0].vectype.type == NT_invtype
16040 && inst.operands[1].vectype.type == NT_invtype)
16041 {
16042 inst.vectype.el[0].type = NT_untyped;
16043 inst.vectype.el[0].size = 32;
16044 inst.vectype.elems = 1;
16045 }
16046
16047 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16048 logsize = neon_logbits (et.size);
16049
16050 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16051 _(BAD_FPU));
16052 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16053 && et.size != 32, _(BAD_FPU));
16054 constraint (et.type == NT_invtype, _("bad type for scalar"));
16055 constraint (x >= 64 / et.size, _("scalar index out of range"));
16056
16057 switch (et.size)
16058 {
16059 case 8: bcdebits = 0x8; break;
16060 case 16: bcdebits = 0x1; break;
16061 case 32: bcdebits = 0x0; break;
16062 default: ;
16063 }
16064
16065 bcdebits |= x << logsize;
16066
16067 inst.instruction = 0xe000b10;
16068 do_vfp_cond_or_thumb ();
16069 inst.instruction |= LOW4 (dn) << 16;
16070 inst.instruction |= HI1 (dn) << 7;
16071 inst.instruction |= inst.operands[1].reg << 12;
16072 inst.instruction |= (bcdebits & 3) << 5;
16073 inst.instruction |= (bcdebits >> 2) << 21;
16074 }
16075 break;
16076
16077 case NS_DRR: /* case 5 (fmdrr). */
16078 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16079 _(BAD_FPU));
16080
16081 inst.instruction = 0xc400b10;
16082 do_vfp_cond_or_thumb ();
16083 inst.instruction |= LOW4 (inst.operands[0].reg);
16084 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16085 inst.instruction |= inst.operands[1].reg << 12;
16086 inst.instruction |= inst.operands[2].reg << 16;
16087 break;
16088
16089 case NS_RS: /* case 6. */
16090 {
16091 unsigned logsize;
16092 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16093 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16094 unsigned abcdebits = 0;
16095
16096 /* .<dt> is optional here, defaulting to .32. */
16097 if (inst.vectype.elems == 0
16098 && inst.operands[0].vectype.type == NT_invtype
16099 && inst.operands[1].vectype.type == NT_invtype)
16100 {
16101 inst.vectype.el[0].type = NT_untyped;
16102 inst.vectype.el[0].size = 32;
16103 inst.vectype.elems = 1;
16104 }
16105
16106 et = neon_check_type (2, NS_NULL,
16107 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16108 logsize = neon_logbits (et.size);
16109
16110 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16111 _(BAD_FPU));
16112 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16113 && et.size != 32, _(BAD_FPU));
16114 constraint (et.type == NT_invtype, _("bad type for scalar"));
16115 constraint (x >= 64 / et.size, _("scalar index out of range"));
16116
16117 switch (et.size)
16118 {
16119 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16120 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16121 case 32: abcdebits = 0x00; break;
16122 default: ;
16123 }
16124
16125 abcdebits |= x << logsize;
16126 inst.instruction = 0xe100b10;
16127 do_vfp_cond_or_thumb ();
16128 inst.instruction |= LOW4 (dn) << 16;
16129 inst.instruction |= HI1 (dn) << 7;
16130 inst.instruction |= inst.operands[0].reg << 12;
16131 inst.instruction |= (abcdebits & 3) << 5;
16132 inst.instruction |= (abcdebits >> 2) << 21;
16133 }
16134 break;
16135
16136 case NS_RRD: /* case 7 (fmrrd). */
16137 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16138 _(BAD_FPU));
16139
16140 inst.instruction = 0xc500b10;
16141 do_vfp_cond_or_thumb ();
16142 inst.instruction |= inst.operands[0].reg << 12;
16143 inst.instruction |= inst.operands[1].reg << 16;
16144 inst.instruction |= LOW4 (inst.operands[2].reg);
16145 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16146 break;
16147
16148 case NS_FF: /* case 8 (fcpys). */
16149 do_vfp_nsyn_opcode ("fcpys");
16150 break;
16151
16152 case NS_FI: /* case 10 (fconsts). */
16153 ldconst = "fconsts";
16154 encode_fconstd:
16155 if (is_quarter_float (inst.operands[1].imm))
16156 {
16157 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16158 do_vfp_nsyn_opcode (ldconst);
16159 }
16160 else
16161 first_error (_("immediate out of range"));
16162 break;
16163
16164 case NS_RF: /* case 12 (fmrs). */
16165 do_vfp_nsyn_opcode ("fmrs");
16166 break;
16167
16168 case NS_FR: /* case 13 (fmsr). */
16169 do_vfp_nsyn_opcode ("fmsr");
16170 break;
16171
16172 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16173 (one of which is a list), but we have parsed four. Do some fiddling to
16174 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16175 expect. */
16176 case NS_RRFF: /* case 14 (fmrrs). */
16177 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16178 _("VFP registers must be adjacent"));
16179 inst.operands[2].imm = 2;
16180 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16181 do_vfp_nsyn_opcode ("fmrrs");
16182 break;
16183
16184 case NS_FFRR: /* case 15 (fmsrr). */
16185 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16186 _("VFP registers must be adjacent"));
16187 inst.operands[1] = inst.operands[2];
16188 inst.operands[2] = inst.operands[3];
16189 inst.operands[0].imm = 2;
16190 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16191 do_vfp_nsyn_opcode ("fmsrr");
16192 break;
16193
16194 case NS_NULL:
16195 /* neon_select_shape has determined that the instruction
16196 shape is wrong and has already set the error message. */
16197 break;
16198
16199 default:
16200 abort ();
16201 }
16202 }
16203
16204 static void
16205 do_neon_rshift_round_imm (void)
16206 {
16207 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16208 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16209 int imm = inst.operands[2].imm;
16210
16211 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16212 if (imm == 0)
16213 {
16214 inst.operands[2].present = 0;
16215 do_neon_mov ();
16216 return;
16217 }
16218
16219 constraint (imm < 1 || (unsigned)imm > et.size,
16220 _("immediate out of range for shift"));
16221 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16222 et.size - imm);
16223 }
16224
16225 static void
16226 do_neon_movl (void)
16227 {
16228 struct neon_type_el et = neon_check_type (2, NS_QD,
16229 N_EQK | N_DBL, N_SU_32 | N_KEY);
16230 unsigned sizebits = et.size >> 3;
16231 inst.instruction |= sizebits << 19;
16232 neon_two_same (0, et.type == NT_unsigned, -1);
16233 }
16234
16235 static void
16236 do_neon_trn (void)
16237 {
16238 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16239 struct neon_type_el et = neon_check_type (2, rs,
16240 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16241 NEON_ENCODE (INTEGER, inst);
16242 neon_two_same (neon_quad (rs), 1, et.size);
16243 }
16244
16245 static void
16246 do_neon_zip_uzp (void)
16247 {
16248 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16249 struct neon_type_el et = neon_check_type (2, rs,
16250 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16251 if (rs == NS_DD && et.size == 32)
16252 {
16253 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16254 inst.instruction = N_MNEM_vtrn;
16255 do_neon_trn ();
16256 return;
16257 }
16258 neon_two_same (neon_quad (rs), 1, et.size);
16259 }
16260
16261 static void
16262 do_neon_sat_abs_neg (void)
16263 {
16264 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16265 struct neon_type_el et = neon_check_type (2, rs,
16266 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16267 neon_two_same (neon_quad (rs), 1, et.size);
16268 }
16269
16270 static void
16271 do_neon_pair_long (void)
16272 {
16273 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16274 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16275 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16276 inst.instruction |= (et.type == NT_unsigned) << 7;
16277 neon_two_same (neon_quad (rs), 1, et.size);
16278 }
16279
16280 static void
16281 do_neon_recip_est (void)
16282 {
16283 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16284 struct neon_type_el et = neon_check_type (2, rs,
16285 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
16286 inst.instruction |= (et.type == NT_float) << 8;
16287 neon_two_same (neon_quad (rs), 1, et.size);
16288 }
16289
16290 static void
16291 do_neon_cls (void)
16292 {
16293 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16294 struct neon_type_el et = neon_check_type (2, rs,
16295 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16296 neon_two_same (neon_quad (rs), 1, et.size);
16297 }
16298
16299 static void
16300 do_neon_clz (void)
16301 {
16302 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16303 struct neon_type_el et = neon_check_type (2, rs,
16304 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16305 neon_two_same (neon_quad (rs), 1, et.size);
16306 }
16307
16308 static void
16309 do_neon_cnt (void)
16310 {
16311 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16312 struct neon_type_el et = neon_check_type (2, rs,
16313 N_EQK | N_INT, N_8 | N_KEY);
16314 neon_two_same (neon_quad (rs), 1, et.size);
16315 }
16316
16317 static void
16318 do_neon_swp (void)
16319 {
16320 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16321 neon_two_same (neon_quad (rs), 1, -1);
16322 }
16323
16324 static void
16325 do_neon_tbl_tbx (void)
16326 {
16327 unsigned listlenbits;
16328 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16329
16330 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16331 {
16332 first_error (_("bad list length for table lookup"));
16333 return;
16334 }
16335
16336 listlenbits = inst.operands[1].imm - 1;
16337 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16338 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16339 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16340 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16341 inst.instruction |= LOW4 (inst.operands[2].reg);
16342 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16343 inst.instruction |= listlenbits << 8;
16344
16345 neon_dp_fixup (&inst);
16346 }
16347
16348 static void
16349 do_neon_ldm_stm (void)
16350 {
16351 /* P, U and L bits are part of bitmask. */
16352 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16353 unsigned offsetbits = inst.operands[1].imm * 2;
16354
16355 if (inst.operands[1].issingle)
16356 {
16357 do_vfp_nsyn_ldm_stm (is_dbmode);
16358 return;
16359 }
16360
16361 constraint (is_dbmode && !inst.operands[0].writeback,
16362 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16363
16364 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16365 _("register list must contain at least 1 and at most 16 "
16366 "registers"));
16367
16368 inst.instruction |= inst.operands[0].reg << 16;
16369 inst.instruction |= inst.operands[0].writeback << 21;
16370 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16371 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16372
16373 inst.instruction |= offsetbits;
16374
16375 do_vfp_cond_or_thumb ();
16376 }
16377
16378 static void
16379 do_neon_ldr_str (void)
16380 {
16381 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16382
16383 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16384 And is UNPREDICTABLE in thumb mode. */
16385 if (!is_ldr
16386 && inst.operands[1].reg == REG_PC
16387 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16388 {
16389 if (thumb_mode)
16390 inst.error = _("Use of PC here is UNPREDICTABLE");
16391 else if (warn_on_deprecated)
16392 as_tsktsk (_("Use of PC here is deprecated"));
16393 }
16394
16395 if (inst.operands[0].issingle)
16396 {
16397 if (is_ldr)
16398 do_vfp_nsyn_opcode ("flds");
16399 else
16400 do_vfp_nsyn_opcode ("fsts");
16401 }
16402 else
16403 {
16404 if (is_ldr)
16405 do_vfp_nsyn_opcode ("fldd");
16406 else
16407 do_vfp_nsyn_opcode ("fstd");
16408 }
16409 }
16410
16411 /* "interleave" version also handles non-interleaving register VLD1/VST1
16412 instructions. */
16413
16414 static void
16415 do_neon_ld_st_interleave (void)
16416 {
16417 struct neon_type_el et = neon_check_type (1, NS_NULL,
16418 N_8 | N_16 | N_32 | N_64);
16419 unsigned alignbits = 0;
16420 unsigned idx;
16421 /* The bits in this table go:
16422 0: register stride of one (0) or two (1)
16423 1,2: register list length, minus one (1, 2, 3, 4).
16424 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16425 We use -1 for invalid entries. */
16426 const int typetable[] =
16427 {
16428 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16429 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16430 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16431 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16432 };
16433 int typebits;
16434
16435 if (et.type == NT_invtype)
16436 return;
16437
16438 if (inst.operands[1].immisalign)
16439 switch (inst.operands[1].imm >> 8)
16440 {
16441 case 64: alignbits = 1; break;
16442 case 128:
16443 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16444 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16445 goto bad_alignment;
16446 alignbits = 2;
16447 break;
16448 case 256:
16449 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16450 goto bad_alignment;
16451 alignbits = 3;
16452 break;
16453 default:
16454 bad_alignment:
16455 first_error (_("bad alignment"));
16456 return;
16457 }
16458
16459 inst.instruction |= alignbits << 4;
16460 inst.instruction |= neon_logbits (et.size) << 6;
16461
16462 /* Bits [4:6] of the immediate in a list specifier encode register stride
16463 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16464 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16465 up the right value for "type" in a table based on this value and the given
16466 list style, then stick it back. */
16467 idx = ((inst.operands[0].imm >> 4) & 7)
16468 | (((inst.instruction >> 8) & 3) << 3);
16469
16470 typebits = typetable[idx];
16471
16472 constraint (typebits == -1, _("bad list type for instruction"));
16473 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16474 _("bad element type for instruction"));
16475
16476 inst.instruction &= ~0xf00;
16477 inst.instruction |= typebits << 8;
16478 }
16479
16480 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16481 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16482 otherwise. The variable arguments are a list of pairs of legal (size, align)
16483 values, terminated with -1. */
16484
16485 static int
16486 neon_alignment_bit (int size, int align, int *do_align, ...)
16487 {
16488 va_list ap;
16489 int result = FAIL, thissize, thisalign;
16490
16491 if (!inst.operands[1].immisalign)
16492 {
16493 *do_align = 0;
16494 return SUCCESS;
16495 }
16496
16497 va_start (ap, do_align);
16498
16499 do
16500 {
16501 thissize = va_arg (ap, int);
16502 if (thissize == -1)
16503 break;
16504 thisalign = va_arg (ap, int);
16505
16506 if (size == thissize && align == thisalign)
16507 result = SUCCESS;
16508 }
16509 while (result != SUCCESS);
16510
16511 va_end (ap);
16512
16513 if (result == SUCCESS)
16514 *do_align = 1;
16515 else
16516 first_error (_("unsupported alignment for instruction"));
16517
16518 return result;
16519 }
16520
16521 static void
16522 do_neon_ld_st_lane (void)
16523 {
16524 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16525 int align_good, do_align = 0;
16526 int logsize = neon_logbits (et.size);
16527 int align = inst.operands[1].imm >> 8;
16528 int n = (inst.instruction >> 8) & 3;
16529 int max_el = 64 / et.size;
16530
16531 if (et.type == NT_invtype)
16532 return;
16533
16534 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16535 _("bad list length"));
16536 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16537 _("scalar index out of range"));
16538 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16539 && et.size == 8,
16540 _("stride of 2 unavailable when element size is 8"));
16541
16542 switch (n)
16543 {
16544 case 0: /* VLD1 / VST1. */
16545 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16546 32, 32, -1);
16547 if (align_good == FAIL)
16548 return;
16549 if (do_align)
16550 {
16551 unsigned alignbits = 0;
16552 switch (et.size)
16553 {
16554 case 16: alignbits = 0x1; break;
16555 case 32: alignbits = 0x3; break;
16556 default: ;
16557 }
16558 inst.instruction |= alignbits << 4;
16559 }
16560 break;
16561
16562 case 1: /* VLD2 / VST2. */
16563 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16564 32, 64, -1);
16565 if (align_good == FAIL)
16566 return;
16567 if (do_align)
16568 inst.instruction |= 1 << 4;
16569 break;
16570
16571 case 2: /* VLD3 / VST3. */
16572 constraint (inst.operands[1].immisalign,
16573 _("can't use alignment with this instruction"));
16574 break;
16575
16576 case 3: /* VLD4 / VST4. */
16577 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16578 16, 64, 32, 64, 32, 128, -1);
16579 if (align_good == FAIL)
16580 return;
16581 if (do_align)
16582 {
16583 unsigned alignbits = 0;
16584 switch (et.size)
16585 {
16586 case 8: alignbits = 0x1; break;
16587 case 16: alignbits = 0x1; break;
16588 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16589 default: ;
16590 }
16591 inst.instruction |= alignbits << 4;
16592 }
16593 break;
16594
16595 default: ;
16596 }
16597
16598 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16599 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16600 inst.instruction |= 1 << (4 + logsize);
16601
16602 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16603 inst.instruction |= logsize << 10;
16604 }
16605
16606 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16607
16608 static void
16609 do_neon_ld_dup (void)
16610 {
16611 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16612 int align_good, do_align = 0;
16613
16614 if (et.type == NT_invtype)
16615 return;
16616
16617 switch ((inst.instruction >> 8) & 3)
16618 {
16619 case 0: /* VLD1. */
16620 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16621 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16622 &do_align, 16, 16, 32, 32, -1);
16623 if (align_good == FAIL)
16624 return;
16625 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16626 {
16627 case 1: break;
16628 case 2: inst.instruction |= 1 << 5; break;
16629 default: first_error (_("bad list length")); return;
16630 }
16631 inst.instruction |= neon_logbits (et.size) << 6;
16632 break;
16633
16634 case 1: /* VLD2. */
16635 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16636 &do_align, 8, 16, 16, 32, 32, 64, -1);
16637 if (align_good == FAIL)
16638 return;
16639 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16640 _("bad list length"));
16641 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16642 inst.instruction |= 1 << 5;
16643 inst.instruction |= neon_logbits (et.size) << 6;
16644 break;
16645
16646 case 2: /* VLD3. */
16647 constraint (inst.operands[1].immisalign,
16648 _("can't use alignment with this instruction"));
16649 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16650 _("bad list length"));
16651 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16652 inst.instruction |= 1 << 5;
16653 inst.instruction |= neon_logbits (et.size) << 6;
16654 break;
16655
16656 case 3: /* VLD4. */
16657 {
16658 int align = inst.operands[1].imm >> 8;
16659 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16660 16, 64, 32, 64, 32, 128, -1);
16661 if (align_good == FAIL)
16662 return;
16663 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16664 _("bad list length"));
16665 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16666 inst.instruction |= 1 << 5;
16667 if (et.size == 32 && align == 128)
16668 inst.instruction |= 0x3 << 6;
16669 else
16670 inst.instruction |= neon_logbits (et.size) << 6;
16671 }
16672 break;
16673
16674 default: ;
16675 }
16676
16677 inst.instruction |= do_align << 4;
16678 }
16679
16680 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16681 apart from bits [11:4]. */
16682
16683 static void
16684 do_neon_ldx_stx (void)
16685 {
16686 if (inst.operands[1].isreg)
16687 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16688
16689 switch (NEON_LANE (inst.operands[0].imm))
16690 {
16691 case NEON_INTERLEAVE_LANES:
16692 NEON_ENCODE (INTERLV, inst);
16693 do_neon_ld_st_interleave ();
16694 break;
16695
16696 case NEON_ALL_LANES:
16697 NEON_ENCODE (DUP, inst);
16698 if (inst.instruction == N_INV)
16699 {
16700 first_error ("only loads support such operands");
16701 break;
16702 }
16703 do_neon_ld_dup ();
16704 break;
16705
16706 default:
16707 NEON_ENCODE (LANE, inst);
16708 do_neon_ld_st_lane ();
16709 }
16710
16711 /* L bit comes from bit mask. */
16712 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16713 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16714 inst.instruction |= inst.operands[1].reg << 16;
16715
16716 if (inst.operands[1].postind)
16717 {
16718 int postreg = inst.operands[1].imm & 0xf;
16719 constraint (!inst.operands[1].immisreg,
16720 _("post-index must be a register"));
16721 constraint (postreg == 0xd || postreg == 0xf,
16722 _("bad register for post-index"));
16723 inst.instruction |= postreg;
16724 }
16725 else
16726 {
16727 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16728 constraint (inst.reloc.exp.X_op != O_constant
16729 || inst.reloc.exp.X_add_number != 0,
16730 BAD_ADDR_MODE);
16731
16732 if (inst.operands[1].writeback)
16733 {
16734 inst.instruction |= 0xd;
16735 }
16736 else
16737 inst.instruction |= 0xf;
16738 }
16739
16740 if (thumb_mode)
16741 inst.instruction |= 0xf9000000;
16742 else
16743 inst.instruction |= 0xf4000000;
16744 }
16745
16746 /* FP v8. */
16747 static void
16748 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16749 {
16750 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16751 D register operands. */
16752 if (neon_shape_class[rs] == SC_DOUBLE)
16753 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16754 _(BAD_FPU));
16755
16756 NEON_ENCODE (FPV8, inst);
16757
16758 if (rs == NS_FFF)
16759 do_vfp_sp_dyadic ();
16760 else
16761 do_vfp_dp_rd_rn_rm ();
16762
16763 if (rs == NS_DDD)
16764 inst.instruction |= 0x100;
16765
16766 inst.instruction |= 0xf0000000;
16767 }
16768
16769 static void
16770 do_vsel (void)
16771 {
16772 set_it_insn_type (OUTSIDE_IT_INSN);
16773
16774 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16775 first_error (_("invalid instruction shape"));
16776 }
16777
16778 static void
16779 do_vmaxnm (void)
16780 {
16781 set_it_insn_type (OUTSIDE_IT_INSN);
16782
16783 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16784 return;
16785
16786 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16787 return;
16788
16789 neon_dyadic_misc (NT_untyped, N_F32, 0);
16790 }
16791
16792 static void
16793 do_vrint_1 (enum neon_cvt_mode mode)
16794 {
16795 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16796 struct neon_type_el et;
16797
16798 if (rs == NS_NULL)
16799 return;
16800
16801 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16802 D register operands. */
16803 if (neon_shape_class[rs] == SC_DOUBLE)
16804 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16805 _(BAD_FPU));
16806
16807 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16808 if (et.type != NT_invtype)
16809 {
16810 /* VFP encodings. */
16811 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16812 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16813 set_it_insn_type (OUTSIDE_IT_INSN);
16814
16815 NEON_ENCODE (FPV8, inst);
16816 if (rs == NS_FF)
16817 do_vfp_sp_monadic ();
16818 else
16819 do_vfp_dp_rd_rm ();
16820
16821 switch (mode)
16822 {
16823 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16824 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16825 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16826 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16827 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16828 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16829 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16830 default: abort ();
16831 }
16832
16833 inst.instruction |= (rs == NS_DD) << 8;
16834 do_vfp_cond_or_thumb ();
16835 }
16836 else
16837 {
16838 /* Neon encodings (or something broken...). */
16839 inst.error = NULL;
16840 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16841
16842 if (et.type == NT_invtype)
16843 return;
16844
16845 set_it_insn_type (OUTSIDE_IT_INSN);
16846 NEON_ENCODE (FLOAT, inst);
16847
16848 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16849 return;
16850
16851 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16852 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16853 inst.instruction |= LOW4 (inst.operands[1].reg);
16854 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16855 inst.instruction |= neon_quad (rs) << 6;
16856 switch (mode)
16857 {
16858 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16859 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16860 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16861 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16862 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16863 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16864 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16865 default: abort ();
16866 }
16867
16868 if (thumb_mode)
16869 inst.instruction |= 0xfc000000;
16870 else
16871 inst.instruction |= 0xf0000000;
16872 }
16873 }
16874
16875 static void
16876 do_vrintx (void)
16877 {
16878 do_vrint_1 (neon_cvt_mode_x);
16879 }
16880
16881 static void
16882 do_vrintz (void)
16883 {
16884 do_vrint_1 (neon_cvt_mode_z);
16885 }
16886
16887 static void
16888 do_vrintr (void)
16889 {
16890 do_vrint_1 (neon_cvt_mode_r);
16891 }
16892
16893 static void
16894 do_vrinta (void)
16895 {
16896 do_vrint_1 (neon_cvt_mode_a);
16897 }
16898
16899 static void
16900 do_vrintn (void)
16901 {
16902 do_vrint_1 (neon_cvt_mode_n);
16903 }
16904
16905 static void
16906 do_vrintp (void)
16907 {
16908 do_vrint_1 (neon_cvt_mode_p);
16909 }
16910
16911 static void
16912 do_vrintm (void)
16913 {
16914 do_vrint_1 (neon_cvt_mode_m);
16915 }
16916
16917 /* Crypto v1 instructions. */
16918 static void
16919 do_crypto_2op_1 (unsigned elttype, int op)
16920 {
16921 set_it_insn_type (OUTSIDE_IT_INSN);
16922
16923 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16924 == NT_invtype)
16925 return;
16926
16927 inst.error = NULL;
16928
16929 NEON_ENCODE (INTEGER, inst);
16930 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16931 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16932 inst.instruction |= LOW4 (inst.operands[1].reg);
16933 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16934 if (op != -1)
16935 inst.instruction |= op << 6;
16936
16937 if (thumb_mode)
16938 inst.instruction |= 0xfc000000;
16939 else
16940 inst.instruction |= 0xf0000000;
16941 }
16942
16943 static void
16944 do_crypto_3op_1 (int u, int op)
16945 {
16946 set_it_insn_type (OUTSIDE_IT_INSN);
16947
16948 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16949 N_32 | N_UNT | N_KEY).type == NT_invtype)
16950 return;
16951
16952 inst.error = NULL;
16953
16954 NEON_ENCODE (INTEGER, inst);
16955 neon_three_same (1, u, 8 << op);
16956 }
16957
16958 static void
16959 do_aese (void)
16960 {
16961 do_crypto_2op_1 (N_8, 0);
16962 }
16963
16964 static void
16965 do_aesd (void)
16966 {
16967 do_crypto_2op_1 (N_8, 1);
16968 }
16969
16970 static void
16971 do_aesmc (void)
16972 {
16973 do_crypto_2op_1 (N_8, 2);
16974 }
16975
16976 static void
16977 do_aesimc (void)
16978 {
16979 do_crypto_2op_1 (N_8, 3);
16980 }
16981
16982 static void
16983 do_sha1c (void)
16984 {
16985 do_crypto_3op_1 (0, 0);
16986 }
16987
16988 static void
16989 do_sha1p (void)
16990 {
16991 do_crypto_3op_1 (0, 1);
16992 }
16993
16994 static void
16995 do_sha1m (void)
16996 {
16997 do_crypto_3op_1 (0, 2);
16998 }
16999
17000 static void
17001 do_sha1su0 (void)
17002 {
17003 do_crypto_3op_1 (0, 3);
17004 }
17005
17006 static void
17007 do_sha256h (void)
17008 {
17009 do_crypto_3op_1 (1, 0);
17010 }
17011
17012 static void
17013 do_sha256h2 (void)
17014 {
17015 do_crypto_3op_1 (1, 1);
17016 }
17017
17018 static void
17019 do_sha256su1 (void)
17020 {
17021 do_crypto_3op_1 (1, 2);
17022 }
17023
17024 static void
17025 do_sha1h (void)
17026 {
17027 do_crypto_2op_1 (N_32, -1);
17028 }
17029
17030 static void
17031 do_sha1su1 (void)
17032 {
17033 do_crypto_2op_1 (N_32, 0);
17034 }
17035
17036 static void
17037 do_sha256su0 (void)
17038 {
17039 do_crypto_2op_1 (N_32, 1);
17040 }
17041
17042 static void
17043 do_crc32_1 (unsigned int poly, unsigned int sz)
17044 {
17045 unsigned int Rd = inst.operands[0].reg;
17046 unsigned int Rn = inst.operands[1].reg;
17047 unsigned int Rm = inst.operands[2].reg;
17048
17049 set_it_insn_type (OUTSIDE_IT_INSN);
17050 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17051 inst.instruction |= LOW4 (Rn) << 16;
17052 inst.instruction |= LOW4 (Rm);
17053 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17054 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17055
17056 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17057 as_warn (UNPRED_REG ("r15"));
17058 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
17059 as_warn (UNPRED_REG ("r13"));
17060 }
17061
17062 static void
17063 do_crc32b (void)
17064 {
17065 do_crc32_1 (0, 0);
17066 }
17067
17068 static void
17069 do_crc32h (void)
17070 {
17071 do_crc32_1 (0, 1);
17072 }
17073
17074 static void
17075 do_crc32w (void)
17076 {
17077 do_crc32_1 (0, 2);
17078 }
17079
17080 static void
17081 do_crc32cb (void)
17082 {
17083 do_crc32_1 (1, 0);
17084 }
17085
17086 static void
17087 do_crc32ch (void)
17088 {
17089 do_crc32_1 (1, 1);
17090 }
17091
17092 static void
17093 do_crc32cw (void)
17094 {
17095 do_crc32_1 (1, 2);
17096 }
17097
17098 \f
17099 /* Overall per-instruction processing. */
17100
17101 /* We need to be able to fix up arbitrary expressions in some statements.
17102 This is so that we can handle symbols that are an arbitrary distance from
17103 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17104 which returns part of an address in a form which will be valid for
17105 a data instruction. We do this by pushing the expression into a symbol
17106 in the expr_section, and creating a fix for that. */
17107
17108 static void
17109 fix_new_arm (fragS * frag,
17110 int where,
17111 short int size,
17112 expressionS * exp,
17113 int pc_rel,
17114 int reloc)
17115 {
17116 fixS * new_fix;
17117
17118 switch (exp->X_op)
17119 {
17120 case O_constant:
17121 if (pc_rel)
17122 {
17123 /* Create an absolute valued symbol, so we have something to
17124 refer to in the object file. Unfortunately for us, gas's
17125 generic expression parsing will already have folded out
17126 any use of .set foo/.type foo %function that may have
17127 been used to set type information of the target location,
17128 that's being specified symbolically. We have to presume
17129 the user knows what they are doing. */
17130 char name[16 + 8];
17131 symbolS *symbol;
17132
17133 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17134
17135 symbol = symbol_find_or_make (name);
17136 S_SET_SEGMENT (symbol, absolute_section);
17137 symbol_set_frag (symbol, &zero_address_frag);
17138 S_SET_VALUE (symbol, exp->X_add_number);
17139 exp->X_op = O_symbol;
17140 exp->X_add_symbol = symbol;
17141 exp->X_add_number = 0;
17142 }
17143 /* FALLTHROUGH */
17144 case O_symbol:
17145 case O_add:
17146 case O_subtract:
17147 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17148 (enum bfd_reloc_code_real) reloc);
17149 break;
17150
17151 default:
17152 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17153 pc_rel, (enum bfd_reloc_code_real) reloc);
17154 break;
17155 }
17156
17157 /* Mark whether the fix is to a THUMB instruction, or an ARM
17158 instruction. */
17159 new_fix->tc_fix_data = thumb_mode;
17160 }
17161
17162 /* Create a frg for an instruction requiring relaxation. */
17163 static void
17164 output_relax_insn (void)
17165 {
17166 char * to;
17167 symbolS *sym;
17168 int offset;
17169
17170 /* The size of the instruction is unknown, so tie the debug info to the
17171 start of the instruction. */
17172 dwarf2_emit_insn (0);
17173
17174 switch (inst.reloc.exp.X_op)
17175 {
17176 case O_symbol:
17177 sym = inst.reloc.exp.X_add_symbol;
17178 offset = inst.reloc.exp.X_add_number;
17179 break;
17180 case O_constant:
17181 sym = NULL;
17182 offset = inst.reloc.exp.X_add_number;
17183 break;
17184 default:
17185 sym = make_expr_symbol (&inst.reloc.exp);
17186 offset = 0;
17187 break;
17188 }
17189 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17190 inst.relax, sym, offset, NULL/*offset, opcode*/);
17191 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17192 }
17193
17194 /* Write a 32-bit thumb instruction to buf. */
17195 static void
17196 put_thumb32_insn (char * buf, unsigned long insn)
17197 {
17198 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17199 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17200 }
17201
17202 static void
17203 output_inst (const char * str)
17204 {
17205 char * to = NULL;
17206
17207 if (inst.error)
17208 {
17209 as_bad ("%s -- `%s'", inst.error, str);
17210 return;
17211 }
17212 if (inst.relax)
17213 {
17214 output_relax_insn ();
17215 return;
17216 }
17217 if (inst.size == 0)
17218 return;
17219
17220 to = frag_more (inst.size);
17221 /* PR 9814: Record the thumb mode into the current frag so that we know
17222 what type of NOP padding to use, if necessary. We override any previous
17223 setting so that if the mode has changed then the NOPS that we use will
17224 match the encoding of the last instruction in the frag. */
17225 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
17226
17227 if (thumb_mode && (inst.size > THUMB_SIZE))
17228 {
17229 gas_assert (inst.size == (2 * THUMB_SIZE));
17230 put_thumb32_insn (to, inst.instruction);
17231 }
17232 else if (inst.size > INSN_SIZE)
17233 {
17234 gas_assert (inst.size == (2 * INSN_SIZE));
17235 md_number_to_chars (to, inst.instruction, INSN_SIZE);
17236 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
17237 }
17238 else
17239 md_number_to_chars (to, inst.instruction, inst.size);
17240
17241 if (inst.reloc.type != BFD_RELOC_UNUSED)
17242 fix_new_arm (frag_now, to - frag_now->fr_literal,
17243 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
17244 inst.reloc.type);
17245
17246 dwarf2_emit_insn (inst.size);
17247 }
17248
17249 static char *
17250 output_it_inst (int cond, int mask, char * to)
17251 {
17252 unsigned long instruction = 0xbf00;
17253
17254 mask &= 0xf;
17255 instruction |= mask;
17256 instruction |= cond << 4;
17257
17258 if (to == NULL)
17259 {
17260 to = frag_more (2);
17261 #ifdef OBJ_ELF
17262 dwarf2_emit_insn (2);
17263 #endif
17264 }
17265
17266 md_number_to_chars (to, instruction, 2);
17267
17268 return to;
17269 }
17270
17271 /* Tag values used in struct asm_opcode's tag field. */
17272 enum opcode_tag
17273 {
17274 OT_unconditional, /* Instruction cannot be conditionalized.
17275 The ARM condition field is still 0xE. */
17276 OT_unconditionalF, /* Instruction cannot be conditionalized
17277 and carries 0xF in its ARM condition field. */
17278 OT_csuffix, /* Instruction takes a conditional suffix. */
17279 OT_csuffixF, /* Some forms of the instruction take a conditional
17280 suffix, others place 0xF where the condition field
17281 would be. */
17282 OT_cinfix3, /* Instruction takes a conditional infix,
17283 beginning at character index 3. (In
17284 unified mode, it becomes a suffix.) */
17285 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
17286 tsts, cmps, cmns, and teqs. */
17287 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
17288 character index 3, even in unified mode. Used for
17289 legacy instructions where suffix and infix forms
17290 may be ambiguous. */
17291 OT_csuf_or_in3, /* Instruction takes either a conditional
17292 suffix or an infix at character index 3. */
17293 OT_odd_infix_unc, /* This is the unconditional variant of an
17294 instruction that takes a conditional infix
17295 at an unusual position. In unified mode,
17296 this variant will accept a suffix. */
17297 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
17298 are the conditional variants of instructions that
17299 take conditional infixes in unusual positions.
17300 The infix appears at character index
17301 (tag - OT_odd_infix_0). These are not accepted
17302 in unified mode. */
17303 };
17304
17305 /* Subroutine of md_assemble, responsible for looking up the primary
17306 opcode from the mnemonic the user wrote. STR points to the
17307 beginning of the mnemonic.
17308
17309 This is not simply a hash table lookup, because of conditional
17310 variants. Most instructions have conditional variants, which are
17311 expressed with a _conditional affix_ to the mnemonic. If we were
17312 to encode each conditional variant as a literal string in the opcode
17313 table, it would have approximately 20,000 entries.
17314
17315 Most mnemonics take this affix as a suffix, and in unified syntax,
17316 'most' is upgraded to 'all'. However, in the divided syntax, some
17317 instructions take the affix as an infix, notably the s-variants of
17318 the arithmetic instructions. Of those instructions, all but six
17319 have the infix appear after the third character of the mnemonic.
17320
17321 Accordingly, the algorithm for looking up primary opcodes given
17322 an identifier is:
17323
17324 1. Look up the identifier in the opcode table.
17325 If we find a match, go to step U.
17326
17327 2. Look up the last two characters of the identifier in the
17328 conditions table. If we find a match, look up the first N-2
17329 characters of the identifier in the opcode table. If we
17330 find a match, go to step CE.
17331
17332 3. Look up the fourth and fifth characters of the identifier in
17333 the conditions table. If we find a match, extract those
17334 characters from the identifier, and look up the remaining
17335 characters in the opcode table. If we find a match, go
17336 to step CM.
17337
17338 4. Fail.
17339
17340 U. Examine the tag field of the opcode structure, in case this is
17341 one of the six instructions with its conditional infix in an
17342 unusual place. If it is, the tag tells us where to find the
17343 infix; look it up in the conditions table and set inst.cond
17344 accordingly. Otherwise, this is an unconditional instruction.
17345 Again set inst.cond accordingly. Return the opcode structure.
17346
17347 CE. Examine the tag field to make sure this is an instruction that
17348 should receive a conditional suffix. If it is not, fail.
17349 Otherwise, set inst.cond from the suffix we already looked up,
17350 and return the opcode structure.
17351
17352 CM. Examine the tag field to make sure this is an instruction that
17353 should receive a conditional infix after the third character.
17354 If it is not, fail. Otherwise, undo the edits to the current
17355 line of input and proceed as for case CE. */
17356
17357 static const struct asm_opcode *
17358 opcode_lookup (char **str)
17359 {
17360 char *end, *base;
17361 char *affix;
17362 const struct asm_opcode *opcode;
17363 const struct asm_cond *cond;
17364 char save[2];
17365
17366 /* Scan up to the end of the mnemonic, which must end in white space,
17367 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17368 for (base = end = *str; *end != '\0'; end++)
17369 if (*end == ' ' || *end == '.')
17370 break;
17371
17372 if (end == base)
17373 return NULL;
17374
17375 /* Handle a possible width suffix and/or Neon type suffix. */
17376 if (end[0] == '.')
17377 {
17378 int offset = 2;
17379
17380 /* The .w and .n suffixes are only valid if the unified syntax is in
17381 use. */
17382 if (unified_syntax && end[1] == 'w')
17383 inst.size_req = 4;
17384 else if (unified_syntax && end[1] == 'n')
17385 inst.size_req = 2;
17386 else
17387 offset = 0;
17388
17389 inst.vectype.elems = 0;
17390
17391 *str = end + offset;
17392
17393 if (end[offset] == '.')
17394 {
17395 /* See if we have a Neon type suffix (possible in either unified or
17396 non-unified ARM syntax mode). */
17397 if (parse_neon_type (&inst.vectype, str) == FAIL)
17398 return NULL;
17399 }
17400 else if (end[offset] != '\0' && end[offset] != ' ')
17401 return NULL;
17402 }
17403 else
17404 *str = end;
17405
17406 /* Look for unaffixed or special-case affixed mnemonic. */
17407 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17408 end - base);
17409 if (opcode)
17410 {
17411 /* step U */
17412 if (opcode->tag < OT_odd_infix_0)
17413 {
17414 inst.cond = COND_ALWAYS;
17415 return opcode;
17416 }
17417
17418 if (warn_on_deprecated && unified_syntax)
17419 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17420 affix = base + (opcode->tag - OT_odd_infix_0);
17421 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17422 gas_assert (cond);
17423
17424 inst.cond = cond->value;
17425 return opcode;
17426 }
17427
17428 /* Cannot have a conditional suffix on a mnemonic of less than two
17429 characters. */
17430 if (end - base < 3)
17431 return NULL;
17432
17433 /* Look for suffixed mnemonic. */
17434 affix = end - 2;
17435 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17436 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17437 affix - base);
17438 if (opcode && cond)
17439 {
17440 /* step CE */
17441 switch (opcode->tag)
17442 {
17443 case OT_cinfix3_legacy:
17444 /* Ignore conditional suffixes matched on infix only mnemonics. */
17445 break;
17446
17447 case OT_cinfix3:
17448 case OT_cinfix3_deprecated:
17449 case OT_odd_infix_unc:
17450 if (!unified_syntax)
17451 return 0;
17452 /* else fall through */
17453
17454 case OT_csuffix:
17455 case OT_csuffixF:
17456 case OT_csuf_or_in3:
17457 inst.cond = cond->value;
17458 return opcode;
17459
17460 case OT_unconditional:
17461 case OT_unconditionalF:
17462 if (thumb_mode)
17463 inst.cond = cond->value;
17464 else
17465 {
17466 /* Delayed diagnostic. */
17467 inst.error = BAD_COND;
17468 inst.cond = COND_ALWAYS;
17469 }
17470 return opcode;
17471
17472 default:
17473 return NULL;
17474 }
17475 }
17476
17477 /* Cannot have a usual-position infix on a mnemonic of less than
17478 six characters (five would be a suffix). */
17479 if (end - base < 6)
17480 return NULL;
17481
17482 /* Look for infixed mnemonic in the usual position. */
17483 affix = base + 3;
17484 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17485 if (!cond)
17486 return NULL;
17487
17488 memcpy (save, affix, 2);
17489 memmove (affix, affix + 2, (end - affix) - 2);
17490 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17491 (end - base) - 2);
17492 memmove (affix + 2, affix, (end - affix) - 2);
17493 memcpy (affix, save, 2);
17494
17495 if (opcode
17496 && (opcode->tag == OT_cinfix3
17497 || opcode->tag == OT_cinfix3_deprecated
17498 || opcode->tag == OT_csuf_or_in3
17499 || opcode->tag == OT_cinfix3_legacy))
17500 {
17501 /* Step CM. */
17502 if (warn_on_deprecated && unified_syntax
17503 && (opcode->tag == OT_cinfix3
17504 || opcode->tag == OT_cinfix3_deprecated))
17505 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
17506
17507 inst.cond = cond->value;
17508 return opcode;
17509 }
17510
17511 return NULL;
17512 }
17513
17514 /* This function generates an initial IT instruction, leaving its block
17515 virtually open for the new instructions. Eventually,
17516 the mask will be updated by now_it_add_mask () each time
17517 a new instruction needs to be included in the IT block.
17518 Finally, the block is closed with close_automatic_it_block ().
17519 The block closure can be requested either from md_assemble (),
17520 a tencode (), or due to a label hook. */
17521
17522 static void
17523 new_automatic_it_block (int cond)
17524 {
17525 now_it.state = AUTOMATIC_IT_BLOCK;
17526 now_it.mask = 0x18;
17527 now_it.cc = cond;
17528 now_it.block_length = 1;
17529 mapping_state (MAP_THUMB);
17530 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17531 now_it.warn_deprecated = FALSE;
17532 now_it.insn_cond = TRUE;
17533 }
17534
17535 /* Close an automatic IT block.
17536 See comments in new_automatic_it_block (). */
17537
17538 static void
17539 close_automatic_it_block (void)
17540 {
17541 now_it.mask = 0x10;
17542 now_it.block_length = 0;
17543 }
17544
17545 /* Update the mask of the current automatically-generated IT
17546 instruction. See comments in new_automatic_it_block (). */
17547
17548 static void
17549 now_it_add_mask (int cond)
17550 {
17551 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17552 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17553 | ((bitvalue) << (nbit)))
17554 const int resulting_bit = (cond & 1);
17555
17556 now_it.mask &= 0xf;
17557 now_it.mask = SET_BIT_VALUE (now_it.mask,
17558 resulting_bit,
17559 (5 - now_it.block_length));
17560 now_it.mask = SET_BIT_VALUE (now_it.mask,
17561 1,
17562 ((5 - now_it.block_length) - 1) );
17563 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17564
17565 #undef CLEAR_BIT
17566 #undef SET_BIT_VALUE
17567 }
17568
17569 /* The IT blocks handling machinery is accessed through the these functions:
17570 it_fsm_pre_encode () from md_assemble ()
17571 set_it_insn_type () optional, from the tencode functions
17572 set_it_insn_type_last () ditto
17573 in_it_block () ditto
17574 it_fsm_post_encode () from md_assemble ()
17575 force_automatic_it_block_close () from label habdling functions
17576
17577 Rationale:
17578 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17579 initializing the IT insn type with a generic initial value depending
17580 on the inst.condition.
17581 2) During the tencode function, two things may happen:
17582 a) The tencode function overrides the IT insn type by
17583 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17584 b) The tencode function queries the IT block state by
17585 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17586
17587 Both set_it_insn_type and in_it_block run the internal FSM state
17588 handling function (handle_it_state), because: a) setting the IT insn
17589 type may incur in an invalid state (exiting the function),
17590 and b) querying the state requires the FSM to be updated.
17591 Specifically we want to avoid creating an IT block for conditional
17592 branches, so it_fsm_pre_encode is actually a guess and we can't
17593 determine whether an IT block is required until the tencode () routine
17594 has decided what type of instruction this actually it.
17595 Because of this, if set_it_insn_type and in_it_block have to be used,
17596 set_it_insn_type has to be called first.
17597
17598 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17599 determines the insn IT type depending on the inst.cond code.
17600 When a tencode () routine encodes an instruction that can be
17601 either outside an IT block, or, in the case of being inside, has to be
17602 the last one, set_it_insn_type_last () will determine the proper
17603 IT instruction type based on the inst.cond code. Otherwise,
17604 set_it_insn_type can be called for overriding that logic or
17605 for covering other cases.
17606
17607 Calling handle_it_state () may not transition the IT block state to
17608 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17609 still queried. Instead, if the FSM determines that the state should
17610 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17611 after the tencode () function: that's what it_fsm_post_encode () does.
17612
17613 Since in_it_block () calls the state handling function to get an
17614 updated state, an error may occur (due to invalid insns combination).
17615 In that case, inst.error is set.
17616 Therefore, inst.error has to be checked after the execution of
17617 the tencode () routine.
17618
17619 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17620 any pending state change (if any) that didn't take place in
17621 handle_it_state () as explained above. */
17622
17623 static void
17624 it_fsm_pre_encode (void)
17625 {
17626 if (inst.cond != COND_ALWAYS)
17627 inst.it_insn_type = INSIDE_IT_INSN;
17628 else
17629 inst.it_insn_type = OUTSIDE_IT_INSN;
17630
17631 now_it.state_handled = 0;
17632 }
17633
17634 /* IT state FSM handling function. */
17635
17636 static int
17637 handle_it_state (void)
17638 {
17639 now_it.state_handled = 1;
17640 now_it.insn_cond = FALSE;
17641
17642 switch (now_it.state)
17643 {
17644 case OUTSIDE_IT_BLOCK:
17645 switch (inst.it_insn_type)
17646 {
17647 case OUTSIDE_IT_INSN:
17648 break;
17649
17650 case INSIDE_IT_INSN:
17651 case INSIDE_IT_LAST_INSN:
17652 if (thumb_mode == 0)
17653 {
17654 if (unified_syntax
17655 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17656 as_tsktsk (_("Warning: conditional outside an IT block"\
17657 " for Thumb."));
17658 }
17659 else
17660 {
17661 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17662 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
17663 {
17664 /* Automatically generate the IT instruction. */
17665 new_automatic_it_block (inst.cond);
17666 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17667 close_automatic_it_block ();
17668 }
17669 else
17670 {
17671 inst.error = BAD_OUT_IT;
17672 return FAIL;
17673 }
17674 }
17675 break;
17676
17677 case IF_INSIDE_IT_LAST_INSN:
17678 case NEUTRAL_IT_INSN:
17679 break;
17680
17681 case IT_INSN:
17682 now_it.state = MANUAL_IT_BLOCK;
17683 now_it.block_length = 0;
17684 break;
17685 }
17686 break;
17687
17688 case AUTOMATIC_IT_BLOCK:
17689 /* Three things may happen now:
17690 a) We should increment current it block size;
17691 b) We should close current it block (closing insn or 4 insns);
17692 c) We should close current it block and start a new one (due
17693 to incompatible conditions or
17694 4 insns-length block reached). */
17695
17696 switch (inst.it_insn_type)
17697 {
17698 case OUTSIDE_IT_INSN:
17699 /* The closure of the block shall happen immediatelly,
17700 so any in_it_block () call reports the block as closed. */
17701 force_automatic_it_block_close ();
17702 break;
17703
17704 case INSIDE_IT_INSN:
17705 case INSIDE_IT_LAST_INSN:
17706 case IF_INSIDE_IT_LAST_INSN:
17707 now_it.block_length++;
17708
17709 if (now_it.block_length > 4
17710 || !now_it_compatible (inst.cond))
17711 {
17712 force_automatic_it_block_close ();
17713 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17714 new_automatic_it_block (inst.cond);
17715 }
17716 else
17717 {
17718 now_it.insn_cond = TRUE;
17719 now_it_add_mask (inst.cond);
17720 }
17721
17722 if (now_it.state == AUTOMATIC_IT_BLOCK
17723 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17724 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17725 close_automatic_it_block ();
17726 break;
17727
17728 case NEUTRAL_IT_INSN:
17729 now_it.block_length++;
17730 now_it.insn_cond = TRUE;
17731
17732 if (now_it.block_length > 4)
17733 force_automatic_it_block_close ();
17734 else
17735 now_it_add_mask (now_it.cc & 1);
17736 break;
17737
17738 case IT_INSN:
17739 close_automatic_it_block ();
17740 now_it.state = MANUAL_IT_BLOCK;
17741 break;
17742 }
17743 break;
17744
17745 case MANUAL_IT_BLOCK:
17746 {
17747 /* Check conditional suffixes. */
17748 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17749 int is_last;
17750 now_it.mask <<= 1;
17751 now_it.mask &= 0x1f;
17752 is_last = (now_it.mask == 0x10);
17753 now_it.insn_cond = TRUE;
17754
17755 switch (inst.it_insn_type)
17756 {
17757 case OUTSIDE_IT_INSN:
17758 inst.error = BAD_NOT_IT;
17759 return FAIL;
17760
17761 case INSIDE_IT_INSN:
17762 if (cond != inst.cond)
17763 {
17764 inst.error = BAD_IT_COND;
17765 return FAIL;
17766 }
17767 break;
17768
17769 case INSIDE_IT_LAST_INSN:
17770 case IF_INSIDE_IT_LAST_INSN:
17771 if (cond != inst.cond)
17772 {
17773 inst.error = BAD_IT_COND;
17774 return FAIL;
17775 }
17776 if (!is_last)
17777 {
17778 inst.error = BAD_BRANCH;
17779 return FAIL;
17780 }
17781 break;
17782
17783 case NEUTRAL_IT_INSN:
17784 /* The BKPT instruction is unconditional even in an IT block. */
17785 break;
17786
17787 case IT_INSN:
17788 inst.error = BAD_IT_IT;
17789 return FAIL;
17790 }
17791 }
17792 break;
17793 }
17794
17795 return SUCCESS;
17796 }
17797
17798 struct depr_insn_mask
17799 {
17800 unsigned long pattern;
17801 unsigned long mask;
17802 const char* description;
17803 };
17804
17805 /* List of 16-bit instruction patterns deprecated in an IT block in
17806 ARMv8. */
17807 static const struct depr_insn_mask depr_it_insns[] = {
17808 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17809 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17810 { 0xa000, 0xb800, N_("ADR") },
17811 { 0x4800, 0xf800, N_("Literal loads") },
17812 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17813 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17814 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17815 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17816 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17817 { 0, 0, NULL }
17818 };
17819
17820 static void
17821 it_fsm_post_encode (void)
17822 {
17823 int is_last;
17824
17825 if (!now_it.state_handled)
17826 handle_it_state ();
17827
17828 if (now_it.insn_cond
17829 && !now_it.warn_deprecated
17830 && warn_on_deprecated
17831 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17832 {
17833 if (inst.instruction >= 0x10000)
17834 {
17835 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
17836 "deprecated in ARMv8"));
17837 now_it.warn_deprecated = TRUE;
17838 }
17839 else
17840 {
17841 const struct depr_insn_mask *p = depr_it_insns;
17842
17843 while (p->mask != 0)
17844 {
17845 if ((inst.instruction & p->mask) == p->pattern)
17846 {
17847 as_tsktsk (_("IT blocks containing 16-bit Thumb instructions "
17848 "of the following class are deprecated in ARMv8: "
17849 "%s"), p->description);
17850 now_it.warn_deprecated = TRUE;
17851 break;
17852 }
17853
17854 ++p;
17855 }
17856 }
17857
17858 if (now_it.block_length > 1)
17859 {
17860 as_tsktsk (_("IT blocks containing more than one conditional "
17861 "instruction are deprecated in ARMv8"));
17862 now_it.warn_deprecated = TRUE;
17863 }
17864 }
17865
17866 is_last = (now_it.mask == 0x10);
17867 if (is_last)
17868 {
17869 now_it.state = OUTSIDE_IT_BLOCK;
17870 now_it.mask = 0;
17871 }
17872 }
17873
17874 static void
17875 force_automatic_it_block_close (void)
17876 {
17877 if (now_it.state == AUTOMATIC_IT_BLOCK)
17878 {
17879 close_automatic_it_block ();
17880 now_it.state = OUTSIDE_IT_BLOCK;
17881 now_it.mask = 0;
17882 }
17883 }
17884
17885 static int
17886 in_it_block (void)
17887 {
17888 if (!now_it.state_handled)
17889 handle_it_state ();
17890
17891 return now_it.state != OUTSIDE_IT_BLOCK;
17892 }
17893
17894 /* Whether OPCODE only has T32 encoding. Since this function is only used by
17895 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
17896 here, hence the "known" in the function name. */
17897
17898 static bfd_boolean
17899 known_t32_only_insn (const struct asm_opcode *opcode)
17900 {
17901 /* Original Thumb-1 wide instruction. */
17902 if (opcode->tencode == do_t_blx
17903 || opcode->tencode == do_t_branch23
17904 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17905 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
17906 return TRUE;
17907
17908 /* Wide-only instruction added to ARMv8-M. */
17909 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m)
17910 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
17911 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
17912 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
17913 return TRUE;
17914
17915 return FALSE;
17916 }
17917
17918 /* Whether wide instruction variant can be used if available for a valid OPCODE
17919 in ARCH. */
17920
17921 static bfd_boolean
17922 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
17923 {
17924 if (known_t32_only_insn (opcode))
17925 return TRUE;
17926
17927 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
17928 of variant T3 of B.W is checked in do_t_branch. */
17929 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
17930 && opcode->tencode == do_t_branch)
17931 return TRUE;
17932
17933 /* Wide instruction variants of all instructions with narrow *and* wide
17934 variants become available with ARMv6t2. Other opcodes are either
17935 narrow-only or wide-only and are thus available if OPCODE is valid. */
17936 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
17937 return TRUE;
17938
17939 /* OPCODE with narrow only instruction variant or wide variant not
17940 available. */
17941 return FALSE;
17942 }
17943
17944 void
17945 md_assemble (char *str)
17946 {
17947 char *p = str;
17948 const struct asm_opcode * opcode;
17949
17950 /* Align the previous label if needed. */
17951 if (last_label_seen != NULL)
17952 {
17953 symbol_set_frag (last_label_seen, frag_now);
17954 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17955 S_SET_SEGMENT (last_label_seen, now_seg);
17956 }
17957
17958 memset (&inst, '\0', sizeof (inst));
17959 inst.reloc.type = BFD_RELOC_UNUSED;
17960
17961 opcode = opcode_lookup (&p);
17962 if (!opcode)
17963 {
17964 /* It wasn't an instruction, but it might be a register alias of
17965 the form alias .req reg, or a Neon .dn/.qn directive. */
17966 if (! create_register_alias (str, p)
17967 && ! create_neon_reg_alias (str, p))
17968 as_bad (_("bad instruction `%s'"), str);
17969
17970 return;
17971 }
17972
17973 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17974 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
17975
17976 /* The value which unconditional instructions should have in place of the
17977 condition field. */
17978 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17979
17980 if (thumb_mode)
17981 {
17982 arm_feature_set variant;
17983
17984 variant = cpu_variant;
17985 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17986 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17987 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17988 /* Check that this instruction is supported for this CPU. */
17989 if (!opcode->tvariant
17990 || (thumb_mode == 1
17991 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17992 {
17993 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
17994 return;
17995 }
17996 if (inst.cond != COND_ALWAYS && !unified_syntax
17997 && opcode->tencode != do_t_branch)
17998 {
17999 as_bad (_("Thumb does not support conditional execution"));
18000 return;
18001 }
18002
18003 /* Two things are addressed here:
18004 1) Implicit require narrow instructions on Thumb-1.
18005 This avoids relaxation accidentally introducing Thumb-2
18006 instructions.
18007 2) Reject wide instructions in non Thumb-2 cores.
18008
18009 Only instructions with narrow and wide variants need to be handled
18010 but selecting all non wide-only instructions is easier. */
18011 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18012 && !t32_insn_ok (variant, opcode))
18013 {
18014 if (inst.size_req == 0)
18015 inst.size_req = 2;
18016 else if (inst.size_req == 4)
18017 {
18018 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18019 as_bad (_("selected processor does not support 32bit wide "
18020 "variant of instruction `%s'"), str);
18021 else
18022 as_bad (_("selected processor does not support `%s' in "
18023 "Thumb-2 mode"), str);
18024 return;
18025 }
18026 }
18027
18028 inst.instruction = opcode->tvalue;
18029
18030 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18031 {
18032 /* Prepare the it_insn_type for those encodings that don't set
18033 it. */
18034 it_fsm_pre_encode ();
18035
18036 opcode->tencode ();
18037
18038 it_fsm_post_encode ();
18039 }
18040
18041 if (!(inst.error || inst.relax))
18042 {
18043 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18044 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18045 if (inst.size_req && inst.size_req != inst.size)
18046 {
18047 as_bad (_("cannot honor width suffix -- `%s'"), str);
18048 return;
18049 }
18050 }
18051
18052 /* Something has gone badly wrong if we try to relax a fixed size
18053 instruction. */
18054 gas_assert (inst.size_req == 0 || !inst.relax);
18055
18056 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18057 *opcode->tvariant);
18058 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18059 set those bits when Thumb-2 32-bit instructions are seen. The impact
18060 of relaxable instructions will be considered later after we finish all
18061 relaxation. */
18062 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18063 variant = arm_arch_none;
18064 else
18065 variant = cpu_variant;
18066 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18067 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18068 arm_ext_v6t2);
18069
18070 check_neon_suffixes;
18071
18072 if (!inst.error)
18073 {
18074 mapping_state (MAP_THUMB);
18075 }
18076 }
18077 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18078 {
18079 bfd_boolean is_bx;
18080
18081 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18082 is_bx = (opcode->aencode == do_bx);
18083
18084 /* Check that this instruction is supported for this CPU. */
18085 if (!(is_bx && fix_v4bx)
18086 && !(opcode->avariant &&
18087 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18088 {
18089 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18090 return;
18091 }
18092 if (inst.size_req)
18093 {
18094 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18095 return;
18096 }
18097
18098 inst.instruction = opcode->avalue;
18099 if (opcode->tag == OT_unconditionalF)
18100 inst.instruction |= 0xFU << 28;
18101 else
18102 inst.instruction |= inst.cond << 28;
18103 inst.size = INSN_SIZE;
18104 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18105 {
18106 it_fsm_pre_encode ();
18107 opcode->aencode ();
18108 it_fsm_post_encode ();
18109 }
18110 /* Arm mode bx is marked as both v4T and v5 because it's still required
18111 on a hypothetical non-thumb v5 core. */
18112 if (is_bx)
18113 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18114 else
18115 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18116 *opcode->avariant);
18117
18118 check_neon_suffixes;
18119
18120 if (!inst.error)
18121 {
18122 mapping_state (MAP_ARM);
18123 }
18124 }
18125 else
18126 {
18127 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18128 "-- `%s'"), str);
18129 return;
18130 }
18131 output_inst (str);
18132 }
18133
18134 static void
18135 check_it_blocks_finished (void)
18136 {
18137 #ifdef OBJ_ELF
18138 asection *sect;
18139
18140 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18141 if (seg_info (sect)->tc_segment_info_data.current_it.state
18142 == MANUAL_IT_BLOCK)
18143 {
18144 as_warn (_("section '%s' finished with an open IT block."),
18145 sect->name);
18146 }
18147 #else
18148 if (now_it.state == MANUAL_IT_BLOCK)
18149 as_warn (_("file finished with an open IT block."));
18150 #endif
18151 }
18152
18153 /* Various frobbings of labels and their addresses. */
18154
18155 void
18156 arm_start_line_hook (void)
18157 {
18158 last_label_seen = NULL;
18159 }
18160
18161 void
18162 arm_frob_label (symbolS * sym)
18163 {
18164 last_label_seen = sym;
18165
18166 ARM_SET_THUMB (sym, thumb_mode);
18167
18168 #if defined OBJ_COFF || defined OBJ_ELF
18169 ARM_SET_INTERWORK (sym, support_interwork);
18170 #endif
18171
18172 force_automatic_it_block_close ();
18173
18174 /* Note - do not allow local symbols (.Lxxx) to be labelled
18175 as Thumb functions. This is because these labels, whilst
18176 they exist inside Thumb code, are not the entry points for
18177 possible ARM->Thumb calls. Also, these labels can be used
18178 as part of a computed goto or switch statement. eg gcc
18179 can generate code that looks like this:
18180
18181 ldr r2, [pc, .Laaa]
18182 lsl r3, r3, #2
18183 ldr r2, [r3, r2]
18184 mov pc, r2
18185
18186 .Lbbb: .word .Lxxx
18187 .Lccc: .word .Lyyy
18188 ..etc...
18189 .Laaa: .word Lbbb
18190
18191 The first instruction loads the address of the jump table.
18192 The second instruction converts a table index into a byte offset.
18193 The third instruction gets the jump address out of the table.
18194 The fourth instruction performs the jump.
18195
18196 If the address stored at .Laaa is that of a symbol which has the
18197 Thumb_Func bit set, then the linker will arrange for this address
18198 to have the bottom bit set, which in turn would mean that the
18199 address computation performed by the third instruction would end
18200 up with the bottom bit set. Since the ARM is capable of unaligned
18201 word loads, the instruction would then load the incorrect address
18202 out of the jump table, and chaos would ensue. */
18203 if (label_is_thumb_function_name
18204 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18205 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18206 {
18207 /* When the address of a Thumb function is taken the bottom
18208 bit of that address should be set. This will allow
18209 interworking between Arm and Thumb functions to work
18210 correctly. */
18211
18212 THUMB_SET_FUNC (sym, 1);
18213
18214 label_is_thumb_function_name = FALSE;
18215 }
18216
18217 dwarf2_emit_label (sym);
18218 }
18219
18220 bfd_boolean
18221 arm_data_in_code (void)
18222 {
18223 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
18224 {
18225 *input_line_pointer = '/';
18226 input_line_pointer += 5;
18227 *input_line_pointer = 0;
18228 return TRUE;
18229 }
18230
18231 return FALSE;
18232 }
18233
18234 char *
18235 arm_canonicalize_symbol_name (char * name)
18236 {
18237 int len;
18238
18239 if (thumb_mode && (len = strlen (name)) > 5
18240 && streq (name + len - 5, "/data"))
18241 *(name + len - 5) = 0;
18242
18243 return name;
18244 }
18245 \f
18246 /* Table of all register names defined by default. The user can
18247 define additional names with .req. Note that all register names
18248 should appear in both upper and lowercase variants. Some registers
18249 also have mixed-case names. */
18250
18251 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
18252 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
18253 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
18254 #define REGSET(p,t) \
18255 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
18256 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
18257 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
18258 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
18259 #define REGSETH(p,t) \
18260 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
18261 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
18262 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
18263 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
18264 #define REGSET2(p,t) \
18265 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
18266 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
18267 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
18268 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
18269 #define SPLRBANK(base,bank,t) \
18270 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
18271 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
18272 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
18273 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
18274 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
18275 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
18276
18277 static const struct reg_entry reg_names[] =
18278 {
18279 /* ARM integer registers. */
18280 REGSET(r, RN), REGSET(R, RN),
18281
18282 /* ATPCS synonyms. */
18283 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
18284 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
18285 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
18286
18287 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
18288 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
18289 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
18290
18291 /* Well-known aliases. */
18292 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
18293 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
18294
18295 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
18296 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
18297
18298 /* Coprocessor numbers. */
18299 REGSET(p, CP), REGSET(P, CP),
18300
18301 /* Coprocessor register numbers. The "cr" variants are for backward
18302 compatibility. */
18303 REGSET(c, CN), REGSET(C, CN),
18304 REGSET(cr, CN), REGSET(CR, CN),
18305
18306 /* ARM banked registers. */
18307 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
18308 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
18309 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
18310 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
18311 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
18312 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
18313 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
18314
18315 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
18316 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
18317 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
18318 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
18319 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
18320 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
18321 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
18322 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
18323
18324 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
18325 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
18326 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
18327 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
18328 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
18329 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
18330 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
18331 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
18332 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
18333
18334 /* FPA registers. */
18335 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
18336 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
18337
18338 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
18339 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
18340
18341 /* VFP SP registers. */
18342 REGSET(s,VFS), REGSET(S,VFS),
18343 REGSETH(s,VFS), REGSETH(S,VFS),
18344
18345 /* VFP DP Registers. */
18346 REGSET(d,VFD), REGSET(D,VFD),
18347 /* Extra Neon DP registers. */
18348 REGSETH(d,VFD), REGSETH(D,VFD),
18349
18350 /* Neon QP registers. */
18351 REGSET2(q,NQ), REGSET2(Q,NQ),
18352
18353 /* VFP control registers. */
18354 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
18355 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
18356 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
18357 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
18358 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
18359 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
18360
18361 /* Maverick DSP coprocessor registers. */
18362 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
18363 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
18364
18365 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
18366 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
18367 REGDEF(dspsc,0,DSPSC),
18368
18369 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
18370 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
18371 REGDEF(DSPSC,0,DSPSC),
18372
18373 /* iWMMXt data registers - p0, c0-15. */
18374 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
18375
18376 /* iWMMXt control registers - p1, c0-3. */
18377 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
18378 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
18379 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
18380 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
18381
18382 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
18383 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
18384 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
18385 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
18386 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18387
18388 /* XScale accumulator registers. */
18389 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18390 };
18391 #undef REGDEF
18392 #undef REGNUM
18393 #undef REGSET
18394
18395 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18396 within psr_required_here. */
18397 static const struct asm_psr psrs[] =
18398 {
18399 /* Backward compatibility notation. Note that "all" is no longer
18400 truly all possible PSR bits. */
18401 {"all", PSR_c | PSR_f},
18402 {"flg", PSR_f},
18403 {"ctl", PSR_c},
18404
18405 /* Individual flags. */
18406 {"f", PSR_f},
18407 {"c", PSR_c},
18408 {"x", PSR_x},
18409 {"s", PSR_s},
18410
18411 /* Combinations of flags. */
18412 {"fs", PSR_f | PSR_s},
18413 {"fx", PSR_f | PSR_x},
18414 {"fc", PSR_f | PSR_c},
18415 {"sf", PSR_s | PSR_f},
18416 {"sx", PSR_s | PSR_x},
18417 {"sc", PSR_s | PSR_c},
18418 {"xf", PSR_x | PSR_f},
18419 {"xs", PSR_x | PSR_s},
18420 {"xc", PSR_x | PSR_c},
18421 {"cf", PSR_c | PSR_f},
18422 {"cs", PSR_c | PSR_s},
18423 {"cx", PSR_c | PSR_x},
18424 {"fsx", PSR_f | PSR_s | PSR_x},
18425 {"fsc", PSR_f | PSR_s | PSR_c},
18426 {"fxs", PSR_f | PSR_x | PSR_s},
18427 {"fxc", PSR_f | PSR_x | PSR_c},
18428 {"fcs", PSR_f | PSR_c | PSR_s},
18429 {"fcx", PSR_f | PSR_c | PSR_x},
18430 {"sfx", PSR_s | PSR_f | PSR_x},
18431 {"sfc", PSR_s | PSR_f | PSR_c},
18432 {"sxf", PSR_s | PSR_x | PSR_f},
18433 {"sxc", PSR_s | PSR_x | PSR_c},
18434 {"scf", PSR_s | PSR_c | PSR_f},
18435 {"scx", PSR_s | PSR_c | PSR_x},
18436 {"xfs", PSR_x | PSR_f | PSR_s},
18437 {"xfc", PSR_x | PSR_f | PSR_c},
18438 {"xsf", PSR_x | PSR_s | PSR_f},
18439 {"xsc", PSR_x | PSR_s | PSR_c},
18440 {"xcf", PSR_x | PSR_c | PSR_f},
18441 {"xcs", PSR_x | PSR_c | PSR_s},
18442 {"cfs", PSR_c | PSR_f | PSR_s},
18443 {"cfx", PSR_c | PSR_f | PSR_x},
18444 {"csf", PSR_c | PSR_s | PSR_f},
18445 {"csx", PSR_c | PSR_s | PSR_x},
18446 {"cxf", PSR_c | PSR_x | PSR_f},
18447 {"cxs", PSR_c | PSR_x | PSR_s},
18448 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18449 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18450 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18451 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18452 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18453 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18454 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18455 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18456 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18457 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18458 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18459 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18460 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18461 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18462 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18463 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18464 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18465 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18466 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18467 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18468 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18469 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18470 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18471 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18472 };
18473
18474 /* Table of V7M psr names. */
18475 static const struct asm_psr v7m_psrs[] =
18476 {
18477 {"apsr", 0 }, {"APSR", 0 },
18478 {"iapsr", 1 }, {"IAPSR", 1 },
18479 {"eapsr", 2 }, {"EAPSR", 2 },
18480 {"psr", 3 }, {"PSR", 3 },
18481 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18482 {"ipsr", 5 }, {"IPSR", 5 },
18483 {"epsr", 6 }, {"EPSR", 6 },
18484 {"iepsr", 7 }, {"IEPSR", 7 },
18485 {"msp", 8 }, {"MSP", 8 },
18486 {"psp", 9 }, {"PSP", 9 },
18487 {"primask", 16}, {"PRIMASK", 16},
18488 {"basepri", 17}, {"BASEPRI", 17},
18489 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18490 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18491 {"faultmask", 19}, {"FAULTMASK", 19},
18492 {"control", 20}, {"CONTROL", 20}
18493 };
18494
18495 /* Table of all shift-in-operand names. */
18496 static const struct asm_shift_name shift_names [] =
18497 {
18498 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18499 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18500 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18501 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18502 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18503 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18504 };
18505
18506 /* Table of all explicit relocation names. */
18507 #ifdef OBJ_ELF
18508 static struct reloc_entry reloc_names[] =
18509 {
18510 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18511 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18512 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18513 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18514 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18515 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18516 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18517 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18518 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18519 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18520 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18521 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18522 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18523 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18524 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18525 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18526 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18527 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18528 };
18529 #endif
18530
18531 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18532 static const struct asm_cond conds[] =
18533 {
18534 {"eq", 0x0},
18535 {"ne", 0x1},
18536 {"cs", 0x2}, {"hs", 0x2},
18537 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18538 {"mi", 0x4},
18539 {"pl", 0x5},
18540 {"vs", 0x6},
18541 {"vc", 0x7},
18542 {"hi", 0x8},
18543 {"ls", 0x9},
18544 {"ge", 0xa},
18545 {"lt", 0xb},
18546 {"gt", 0xc},
18547 {"le", 0xd},
18548 {"al", 0xe}
18549 };
18550
18551 #define UL_BARRIER(L,U,CODE,FEAT) \
18552 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
18553 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
18554
18555 static struct asm_barrier_opt barrier_opt_names[] =
18556 {
18557 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18558 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18559 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18560 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18561 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18562 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18563 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18564 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18565 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18566 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18567 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18568 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18569 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18570 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18571 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18572 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18573 };
18574
18575 #undef UL_BARRIER
18576
18577 /* Table of ARM-format instructions. */
18578
18579 /* Macros for gluing together operand strings. N.B. In all cases
18580 other than OPS0, the trailing OP_stop comes from default
18581 zero-initialization of the unspecified elements of the array. */
18582 #define OPS0() { OP_stop, }
18583 #define OPS1(a) { OP_##a, }
18584 #define OPS2(a,b) { OP_##a,OP_##b, }
18585 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18586 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18587 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18588 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18589
18590 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18591 This is useful when mixing operands for ARM and THUMB, i.e. using the
18592 MIX_ARM_THUMB_OPERANDS macro.
18593 In order to use these macros, prefix the number of operands with _
18594 e.g. _3. */
18595 #define OPS_1(a) { a, }
18596 #define OPS_2(a,b) { a,b, }
18597 #define OPS_3(a,b,c) { a,b,c, }
18598 #define OPS_4(a,b,c,d) { a,b,c,d, }
18599 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18600 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18601
18602 /* These macros abstract out the exact format of the mnemonic table and
18603 save some repeated characters. */
18604
18605 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18606 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18607 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18608 THUMB_VARIANT, do_##ae, do_##te }
18609
18610 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18611 a T_MNEM_xyz enumerator. */
18612 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18613 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18614 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18615 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18616
18617 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18618 infix after the third character. */
18619 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18620 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18621 THUMB_VARIANT, do_##ae, do_##te }
18622 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18623 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18624 THUMB_VARIANT, do_##ae, do_##te }
18625 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18626 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18627 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18628 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18629 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18630 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18631 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18632 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18633
18634 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18635 field is still 0xE. Many of the Thumb variants can be executed
18636 conditionally, so this is checked separately. */
18637 #define TUE(mnem, op, top, nops, ops, ae, te) \
18638 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18639 THUMB_VARIANT, do_##ae, do_##te }
18640
18641 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18642 Used by mnemonics that have very minimal differences in the encoding for
18643 ARM and Thumb variants and can be handled in a common function. */
18644 #define TUEc(mnem, op, top, nops, ops, en) \
18645 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18646 THUMB_VARIANT, do_##en, do_##en }
18647
18648 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18649 condition code field. */
18650 #define TUF(mnem, op, top, nops, ops, ae, te) \
18651 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18652 THUMB_VARIANT, do_##ae, do_##te }
18653
18654 /* ARM-only variants of all the above. */
18655 #define CE(mnem, op, nops, ops, ae) \
18656 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18657
18658 #define C3(mnem, op, nops, ops, ae) \
18659 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18660
18661 /* Legacy mnemonics that always have conditional infix after the third
18662 character. */
18663 #define CL(mnem, op, nops, ops, ae) \
18664 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18665 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18666
18667 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18668 #define cCE(mnem, op, nops, ops, ae) \
18669 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18670
18671 /* Legacy coprocessor instructions where conditional infix and conditional
18672 suffix are ambiguous. For consistency this includes all FPA instructions,
18673 not just the potentially ambiguous ones. */
18674 #define cCL(mnem, op, nops, ops, ae) \
18675 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18676 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18677
18678 /* Coprocessor, takes either a suffix or a position-3 infix
18679 (for an FPA corner case). */
18680 #define C3E(mnem, op, nops, ops, ae) \
18681 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18682 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18683
18684 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18685 { m1 #m2 m3, OPS##nops ops, \
18686 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18687 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18688
18689 #define CM(m1, m2, op, nops, ops, ae) \
18690 xCM_ (m1, , m2, op, nops, ops, ae), \
18691 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18692 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18693 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18694 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18695 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18696 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18697 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18698 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18699 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18700 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18701 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18702 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18703 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18704 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18705 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18706 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18707 xCM_ (m1, le, m2, op, nops, ops, ae), \
18708 xCM_ (m1, al, m2, op, nops, ops, ae)
18709
18710 #define UE(mnem, op, nops, ops, ae) \
18711 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18712
18713 #define UF(mnem, op, nops, ops, ae) \
18714 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18715
18716 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18717 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18718 use the same encoding function for each. */
18719 #define NUF(mnem, op, nops, ops, enc) \
18720 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18721 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18722
18723 /* Neon data processing, version which indirects through neon_enc_tab for
18724 the various overloaded versions of opcodes. */
18725 #define nUF(mnem, op, nops, ops, enc) \
18726 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18727 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18728
18729 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18730 version. */
18731 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18732 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18733 THUMB_VARIANT, do_##enc, do_##enc }
18734
18735 #define NCE(mnem, op, nops, ops, enc) \
18736 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18737
18738 #define NCEF(mnem, op, nops, ops, enc) \
18739 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18740
18741 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18742 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18743 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18744 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18745
18746 #define nCE(mnem, op, nops, ops, enc) \
18747 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18748
18749 #define nCEF(mnem, op, nops, ops, enc) \
18750 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18751
18752 #define do_0 0
18753
18754 static const struct asm_opcode insns[] =
18755 {
18756 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18757 #define THUMB_VARIANT & arm_ext_v4t
18758 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18759 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18760 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18761 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18762 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18763 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18764 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18765 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18766 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18767 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18768 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18769 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18770 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18771 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18772 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18773 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18774
18775 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18776 for setting PSR flag bits. They are obsolete in V6 and do not
18777 have Thumb equivalents. */
18778 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18779 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18780 CL("tstp", 110f000, 2, (RR, SH), cmp),
18781 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18782 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18783 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18784 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18785 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18786 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18787
18788 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18789 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
18790 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18791 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18792
18793 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18794 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18795 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18796 OP_RRnpc),
18797 OP_ADDRGLDR),ldst, t_ldst),
18798 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18799
18800 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18801 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18802 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18803 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18804 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18805 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18806
18807 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18808 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18809 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18810 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18811
18812 /* Pseudo ops. */
18813 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18814 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18815 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18816 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18817
18818 /* Thumb-compatibility pseudo ops. */
18819 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18820 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18821 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18822 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18823 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18824 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18825 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18826 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18827 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18828 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18829 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18830 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18831
18832 /* These may simplify to neg. */
18833 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18834 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18835
18836 #undef THUMB_VARIANT
18837 #define THUMB_VARIANT & arm_ext_v6
18838
18839 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18840
18841 /* V1 instructions with no Thumb analogue prior to V6T2. */
18842 #undef THUMB_VARIANT
18843 #define THUMB_VARIANT & arm_ext_v6t2
18844
18845 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18846 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18847 CL("teqp", 130f000, 2, (RR, SH), cmp),
18848
18849 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18850 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18851 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18852 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18853
18854 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18855 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18856
18857 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18858 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18859
18860 /* V1 instructions with no Thumb analogue at all. */
18861 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18862 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18863
18864 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18865 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18866 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18867 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18868 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18869 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18870 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18871 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18872
18873 #undef ARM_VARIANT
18874 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18875 #undef THUMB_VARIANT
18876 #define THUMB_VARIANT & arm_ext_v4t
18877
18878 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18879 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18880
18881 #undef THUMB_VARIANT
18882 #define THUMB_VARIANT & arm_ext_v6t2
18883
18884 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18885 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18886
18887 /* Generic coprocessor instructions. */
18888 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18889 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18890 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18891 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18892 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18893 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18894 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18895
18896 #undef ARM_VARIANT
18897 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18898
18899 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18900 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18901
18902 #undef ARM_VARIANT
18903 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18904 #undef THUMB_VARIANT
18905 #define THUMB_VARIANT & arm_ext_msr
18906
18907 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18908 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18909
18910 #undef ARM_VARIANT
18911 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18912 #undef THUMB_VARIANT
18913 #define THUMB_VARIANT & arm_ext_v6t2
18914
18915 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18916 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18917 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18918 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18919 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18920 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18921 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18922 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18923
18924 #undef ARM_VARIANT
18925 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18926 #undef THUMB_VARIANT
18927 #define THUMB_VARIANT & arm_ext_v4t
18928
18929 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18930 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18931 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18932 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18933 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18934 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18935
18936 #undef ARM_VARIANT
18937 #define ARM_VARIANT & arm_ext_v4t_5
18938
18939 /* ARM Architecture 4T. */
18940 /* Note: bx (and blx) are required on V5, even if the processor does
18941 not support Thumb. */
18942 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18943
18944 #undef ARM_VARIANT
18945 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18946 #undef THUMB_VARIANT
18947 #define THUMB_VARIANT & arm_ext_v5t
18948
18949 /* Note: blx has 2 variants; the .value coded here is for
18950 BLX(2). Only this variant has conditional execution. */
18951 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18952 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18953
18954 #undef THUMB_VARIANT
18955 #define THUMB_VARIANT & arm_ext_v6t2
18956
18957 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18958 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18959 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18960 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18961 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18962 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18963 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18964 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18965
18966 #undef ARM_VARIANT
18967 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18968 #undef THUMB_VARIANT
18969 #define THUMB_VARIANT & arm_ext_v5exp
18970
18971 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18972 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18973 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18974 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18975
18976 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18977 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18978
18979 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18980 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18981 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18982 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18983
18984 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18985 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18986 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18987 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18988
18989 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18990 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18991
18992 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18993 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18994 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18995 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18996
18997 #undef ARM_VARIANT
18998 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18999 #undef THUMB_VARIANT
19000 #define THUMB_VARIANT & arm_ext_v6t2
19001
19002 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19003 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19004 ldrd, t_ldstd),
19005 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19006 ADDRGLDRS), ldrd, t_ldstd),
19007
19008 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19009 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19010
19011 #undef ARM_VARIANT
19012 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19013
19014 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19015
19016 #undef ARM_VARIANT
19017 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19018 #undef THUMB_VARIANT
19019 #define THUMB_VARIANT & arm_ext_v6
19020
19021 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19022 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19023 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19024 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19025 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19026 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19027 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19028 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19029 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19030 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19031
19032 #undef THUMB_VARIANT
19033 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19034
19035 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19036 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19037 strex, t_strex),
19038 #undef THUMB_VARIANT
19039 #define THUMB_VARIANT & arm_ext_v6t2
19040
19041 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19042 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19043
19044 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19045 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19046
19047 /* ARM V6 not included in V7M. */
19048 #undef THUMB_VARIANT
19049 #define THUMB_VARIANT & arm_ext_v6_notm
19050 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19051 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19052 UF(rfeib, 9900a00, 1, (RRw), rfe),
19053 UF(rfeda, 8100a00, 1, (RRw), rfe),
19054 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19055 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19056 UF(rfefa, 8100a00, 1, (RRw), rfe),
19057 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19058 UF(rfeed, 9900a00, 1, (RRw), rfe),
19059 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19060 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19061 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19062 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19063 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19064 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19065 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19066 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19067 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19068 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19069
19070 /* ARM V6 not included in V7M (eg. integer SIMD). */
19071 #undef THUMB_VARIANT
19072 #define THUMB_VARIANT & arm_ext_v6_dsp
19073 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19074 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19075 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19076 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19077 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19078 /* Old name for QASX. */
19079 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19080 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19081 /* Old name for QSAX. */
19082 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19083 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19084 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19085 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19086 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19087 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19088 /* Old name for SASX. */
19089 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19090 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19091 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19092 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19093 /* Old name for SHASX. */
19094 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19095 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19096 /* Old name for SHSAX. */
19097 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19098 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19099 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19100 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19101 /* Old name for SSAX. */
19102 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19103 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19104 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19105 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19106 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19107 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19108 /* Old name for UASX. */
19109 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19110 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19111 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19112 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19113 /* Old name for UHASX. */
19114 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19115 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19116 /* Old name for UHSAX. */
19117 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19118 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19119 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19120 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19121 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19122 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19123 /* Old name for UQASX. */
19124 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19125 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19126 /* Old name for UQSAX. */
19127 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19128 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19129 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19130 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19131 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19132 /* Old name for USAX. */
19133 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19134 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19135 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19136 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19137 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19138 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19139 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19140 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19141 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19142 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19143 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19144 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19145 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19146 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19147 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19148 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19149 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19150 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19151 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19152 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19153 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19154 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19155 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19156 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19157 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19158 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19159 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19160 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19161 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19162 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19163 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19164 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19165 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19166 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19167
19168 #undef ARM_VARIANT
19169 #define ARM_VARIANT & arm_ext_v6k
19170 #undef THUMB_VARIANT
19171 #define THUMB_VARIANT & arm_ext_v6k
19172
19173 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19174 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19175 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
19176 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
19177
19178 #undef THUMB_VARIANT
19179 #define THUMB_VARIANT & arm_ext_v6_notm
19180 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
19181 ldrexd, t_ldrexd),
19182 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
19183 RRnpcb), strexd, t_strexd),
19184
19185 #undef THUMB_VARIANT
19186 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19187 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
19188 rd_rn, rd_rn),
19189 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
19190 rd_rn, rd_rn),
19191 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19192 strex, t_strexbh),
19193 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19194 strex, t_strexbh),
19195 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
19196
19197 #undef ARM_VARIANT
19198 #define ARM_VARIANT & arm_ext_sec
19199 #undef THUMB_VARIANT
19200 #define THUMB_VARIANT & arm_ext_sec
19201
19202 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
19203
19204 #undef ARM_VARIANT
19205 #define ARM_VARIANT & arm_ext_virt
19206 #undef THUMB_VARIANT
19207 #define THUMB_VARIANT & arm_ext_virt
19208
19209 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
19210 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
19211
19212 #undef ARM_VARIANT
19213 #define ARM_VARIANT & arm_ext_pan
19214 #undef THUMB_VARIANT
19215 #define THUMB_VARIANT & arm_ext_pan
19216
19217 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
19218
19219 #undef ARM_VARIANT
19220 #define ARM_VARIANT & arm_ext_v6t2
19221 #undef THUMB_VARIANT
19222 #define THUMB_VARIANT & arm_ext_v6t2
19223
19224 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
19225 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
19226 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19227 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
19228
19229 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19230 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
19231
19232 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19233 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19234 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19235 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
19236
19237 #undef THUMB_VARIANT
19238 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19239 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
19240 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
19241
19242 /* Thumb-only instructions. */
19243 #undef ARM_VARIANT
19244 #define ARM_VARIANT NULL
19245 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
19246 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
19247
19248 /* ARM does not really have an IT instruction, so always allow it.
19249 The opcode is copied from Thumb in order to allow warnings in
19250 -mimplicit-it=[never | arm] modes. */
19251 #undef ARM_VARIANT
19252 #define ARM_VARIANT & arm_ext_v1
19253 #undef THUMB_VARIANT
19254 #define THUMB_VARIANT & arm_ext_v6t2
19255
19256 TUE("it", bf08, bf08, 1, (COND), it, t_it),
19257 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
19258 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
19259 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
19260 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
19261 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
19262 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
19263 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
19264 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
19265 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
19266 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
19267 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
19268 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
19269 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
19270 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
19271 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
19272 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
19273 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
19274
19275 /* Thumb2 only instructions. */
19276 #undef ARM_VARIANT
19277 #define ARM_VARIANT NULL
19278
19279 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19280 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
19281 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
19282 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
19283 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
19284 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
19285
19286 /* Hardware division instructions. */
19287 #undef ARM_VARIANT
19288 #define ARM_VARIANT & arm_ext_adiv
19289 #undef THUMB_VARIANT
19290 #define THUMB_VARIANT & arm_ext_div
19291
19292 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
19293 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
19294
19295 /* ARM V6M/V7 instructions. */
19296 #undef ARM_VARIANT
19297 #define ARM_VARIANT & arm_ext_barrier
19298 #undef THUMB_VARIANT
19299 #define THUMB_VARIANT & arm_ext_barrier
19300
19301 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
19302 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
19303 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
19304
19305 /* ARM V7 instructions. */
19306 #undef ARM_VARIANT
19307 #define ARM_VARIANT & arm_ext_v7
19308 #undef THUMB_VARIANT
19309 #define THUMB_VARIANT & arm_ext_v7
19310
19311 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
19312 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
19313
19314 #undef ARM_VARIANT
19315 #define ARM_VARIANT & arm_ext_mp
19316 #undef THUMB_VARIANT
19317 #define THUMB_VARIANT & arm_ext_mp
19318
19319 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
19320
19321 /* AArchv8 instructions. */
19322 #undef ARM_VARIANT
19323 #define ARM_VARIANT & arm_ext_v8
19324
19325 /* Instructions shared between armv8-a and armv8-m. */
19326 #undef THUMB_VARIANT
19327 #define THUMB_VARIANT & arm_ext_atomics
19328
19329 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19330 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19331 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19332 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19333 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19334 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
19335 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19336 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
19337 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
19338 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
19339 stlex, t_stlex),
19340 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
19341 stlex, t_stlex),
19342 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
19343 stlex, t_stlex),
19344 #undef THUMB_VARIANT
19345 #define THUMB_VARIANT & arm_ext_v8
19346
19347 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
19348 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
19349 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
19350 ldrexd, t_ldrexd),
19351 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
19352 strexd, t_strexd),
19353 /* ARMv8 T32 only. */
19354 #undef ARM_VARIANT
19355 #define ARM_VARIANT NULL
19356 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
19357 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
19358 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
19359
19360 /* FP for ARMv8. */
19361 #undef ARM_VARIANT
19362 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
19363 #undef THUMB_VARIANT
19364 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
19365
19366 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
19367 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
19368 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
19369 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
19370 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19371 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
19372 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
19373 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
19374 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
19375 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
19376 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
19377 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
19378 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
19379 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
19380 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
19381 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
19382 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
19383
19384 /* Crypto v1 extensions. */
19385 #undef ARM_VARIANT
19386 #define ARM_VARIANT & fpu_crypto_ext_armv8
19387 #undef THUMB_VARIANT
19388 #define THUMB_VARIANT & fpu_crypto_ext_armv8
19389
19390 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
19391 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
19392 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
19393 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
19394 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
19395 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
19396 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
19397 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
19398 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
19399 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
19400 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
19401 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
19402 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
19403 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19404
19405 #undef ARM_VARIANT
19406 #define ARM_VARIANT & crc_ext_armv8
19407 #undef THUMB_VARIANT
19408 #define THUMB_VARIANT & crc_ext_armv8
19409 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19410 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19411 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19412 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19413 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19414 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19415
19416 /* ARMv8.2 RAS extension. */
19417 #undef ARM_VARIANT
19418 #define ARM_VARIANT & arm_ext_v8_2
19419 #undef THUMB_VARIANT
19420 #define THUMB_VARIANT & arm_ext_v8_2
19421 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
19422
19423 #undef ARM_VARIANT
19424 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19425 #undef THUMB_VARIANT
19426 #define THUMB_VARIANT NULL
19427
19428 cCE("wfs", e200110, 1, (RR), rd),
19429 cCE("rfs", e300110, 1, (RR), rd),
19430 cCE("wfc", e400110, 1, (RR), rd),
19431 cCE("rfc", e500110, 1, (RR), rd),
19432
19433 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19434 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19435 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19436 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19437
19438 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19439 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19440 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19441 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19442
19443 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19444 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19445 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19446 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19447 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19448 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19449 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19450 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19451 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19452 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19453 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19454 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19455
19456 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19457 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19458 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19459 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19460 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19461 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19462 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19463 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19464 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19465 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19466 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19467 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19468
19469 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19470 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19471 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19472 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19473 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19474 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19475 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19476 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19477 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19478 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19479 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19480 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19481
19482 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19483 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19484 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19485 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19486 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19487 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19488 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19489 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19490 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19491 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19492 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19493 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19494
19495 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19496 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19497 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19498 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19499 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19500 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19501 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19502 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19503 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19504 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19505 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19506 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19507
19508 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19509 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19510 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19511 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19512 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19513 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19514 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19515 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19516 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19517 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19518 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19519 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19520
19521 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19522 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19523 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19524 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19525 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19526 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19527 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19528 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19529 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19530 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19531 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19532 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19533
19534 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19535 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19536 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19537 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19538 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19539 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19540 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19541 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19542 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19543 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19544 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19545 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19546
19547 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19548 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19549 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19550 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19551 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19552 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19553 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19554 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19555 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19556 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19557 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19558 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19559
19560 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19561 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19562 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19563 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19564 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19565 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19566 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19567 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19568 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19569 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19570 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19571 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19572
19573 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19574 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19575 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19576 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19577 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19578 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19579 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19580 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19581 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19582 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19583 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19584 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19585
19586 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19587 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19588 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19589 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19590 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19591 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19592 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19593 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19594 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19595 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19596 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19597 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19598
19599 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19600 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19601 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19602 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19603 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19604 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19605 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19606 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19607 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19608 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19609 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19610 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19611
19612 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19613 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19614 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19615 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19616 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19617 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19618 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19619 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19620 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19621 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19622 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19623 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19624
19625 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19626 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19627 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19628 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19629 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19630 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19631 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19632 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19633 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19634 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19635 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19636 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19637
19638 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19639 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19640 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19641 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19642 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19643 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19644 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19645 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19646 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19647 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19648 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19649 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19650
19651 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19652 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19653 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19654 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19655 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19656 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19657 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19658 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19659 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19660 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19661 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19662 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19663
19664 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19665 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19666 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19667 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19668 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19669 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19670 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19671 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19672 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19673 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19674 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19675 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19676
19677 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19678 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19679 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19680 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19681 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19682 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19683 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19684 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19685 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19686 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19687 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19688 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19689
19690 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19691 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19692 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19693 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19694 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19695 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19696 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19697 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19698 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19699 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19700 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19701 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19702
19703 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19704 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19705 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19706 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19707 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19708 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19709 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19710 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19711 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19712 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19713 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19714 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19715
19716 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19717 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19718 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19719 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19720 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19721 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19722 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19723 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19724 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19725 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19726 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19727 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19728
19729 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19730 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19731 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19732 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19733 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19734 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19735 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19736 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19737 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19738 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19739 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19740 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19741
19742 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19743 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19744 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19745 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19746 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19747 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19748 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19749 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19750 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19751 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19752 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19753 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19754
19755 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19756 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19757 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19758 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19759 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19760 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19761 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19762 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19763 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19764 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19765 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19766 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19767
19768 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19769 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19770 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19771 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19772 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19773 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19774 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19775 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19776 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19777 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19778 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19779 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19780
19781 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19782 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19783 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19784 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19785 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19786 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19787 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19788 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19789 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19790 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19791 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19792 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19793
19794 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19795 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19796 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19797 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19798 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19799 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19800 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19801 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19802 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19803 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19804 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19805 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19806
19807 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19808 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19809 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19810 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19811 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19812 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19813 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19814 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19815 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19816 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19817 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19818 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19819
19820 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19821 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19822 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19823 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19824
19825 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19826 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19827 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19828 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19829 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19830 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19831 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19832 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19833 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19834 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19835 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19836 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19837
19838 /* The implementation of the FIX instruction is broken on some
19839 assemblers, in that it accepts a precision specifier as well as a
19840 rounding specifier, despite the fact that this is meaningless.
19841 To be more compatible, we accept it as well, though of course it
19842 does not set any bits. */
19843 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19844 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19845 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19846 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19847 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19848 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19849 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19850 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19851 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19852 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19853 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19854 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19855 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19856
19857 /* Instructions that were new with the real FPA, call them V2. */
19858 #undef ARM_VARIANT
19859 #define ARM_VARIANT & fpu_fpa_ext_v2
19860
19861 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19862 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19863 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19864 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19865 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19866 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19867
19868 #undef ARM_VARIANT
19869 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19870
19871 /* Moves and type conversions. */
19872 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19873 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19874 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19875 cCE("fmstat", ef1fa10, 0, (), noargs),
19876 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19877 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19878 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19879 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19880 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19881 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19882 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19883 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19884 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19885 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19886
19887 /* Memory operations. */
19888 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19889 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19890 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19891 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19892 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19893 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19894 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19895 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19896 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19897 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19898 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19899 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19900 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19901 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19902 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19903 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19904 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19905 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19906
19907 /* Monadic operations. */
19908 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19909 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19910 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19911
19912 /* Dyadic operations. */
19913 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19914 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19915 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19916 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19917 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19918 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19919 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19920 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19921 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19922
19923 /* Comparisons. */
19924 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19925 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19926 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19927 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19928
19929 /* Double precision load/store are still present on single precision
19930 implementations. */
19931 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19932 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19933 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19934 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19935 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19936 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19937 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19938 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19939 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19940 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19941
19942 #undef ARM_VARIANT
19943 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19944
19945 /* Moves and type conversions. */
19946 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19947 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19948 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19949 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19950 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19951 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19952 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19953 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19954 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19955 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19956 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19957 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19958 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19959
19960 /* Monadic operations. */
19961 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19962 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19963 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19964
19965 /* Dyadic operations. */
19966 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19967 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19968 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19969 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19970 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19971 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19972 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19973 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19974 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19975
19976 /* Comparisons. */
19977 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19978 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19979 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19980 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19981
19982 #undef ARM_VARIANT
19983 #define ARM_VARIANT & fpu_vfp_ext_v2
19984
19985 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19986 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19987 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19988 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19989
19990 /* Instructions which may belong to either the Neon or VFP instruction sets.
19991 Individual encoder functions perform additional architecture checks. */
19992 #undef ARM_VARIANT
19993 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19994 #undef THUMB_VARIANT
19995 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19996
19997 /* These mnemonics are unique to VFP. */
19998 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19999 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20000 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20001 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20002 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20003 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20004 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20005 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20006 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20007 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20008
20009 /* Mnemonics shared by Neon and VFP. */
20010 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20011 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20012 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20013
20014 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20015 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20016
20017 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20018 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20019
20020 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20021 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20022 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20023 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20024 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20025 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20026 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20027 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20028
20029 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20030 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20031 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20032 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20033
20034
20035 /* NOTE: All VMOV encoding is special-cased! */
20036 NCE(vmov, 0, 1, (VMOV), neon_mov),
20037 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20038
20039 #undef THUMB_VARIANT
20040 #define THUMB_VARIANT & fpu_neon_ext_v1
20041 #undef ARM_VARIANT
20042 #define ARM_VARIANT & fpu_neon_ext_v1
20043
20044 /* Data processing with three registers of the same length. */
20045 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20046 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20047 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20048 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20049 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20050 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20051 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20052 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20053 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20054 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20055 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20056 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20057 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20058 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20059 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20060 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20061 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20062 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20063 /* If not immediate, fall back to neon_dyadic_i64_su.
20064 shl_imm should accept I8 I16 I32 I64,
20065 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20066 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20067 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20068 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20069 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20070 /* Logic ops, types optional & ignored. */
20071 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20072 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20073 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20074 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20075 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20076 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20077 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20078 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20079 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20080 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20081 /* Bitfield ops, untyped. */
20082 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20083 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20084 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20085 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20086 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20087 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20088 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
20089 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20090 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20091 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20092 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20093 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20094 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20095 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20096 back to neon_dyadic_if_su. */
20097 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20098 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20099 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20100 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20101 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20102 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20103 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20104 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20105 /* Comparison. Type I8 I16 I32 F32. */
20106 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20107 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20108 /* As above, D registers only. */
20109 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20110 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20111 /* Int and float variants, signedness unimportant. */
20112 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20113 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20114 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20115 /* Add/sub take types I8 I16 I32 I64 F32. */
20116 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20117 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20118 /* vtst takes sizes 8, 16, 32. */
20119 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20120 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20121 /* VMUL takes I8 I16 I32 F32 P8. */
20122 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20123 /* VQD{R}MULH takes S16 S32. */
20124 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20125 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20126 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20127 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20128 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20129 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20130 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20131 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20132 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20133 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20134 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20135 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20136 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20137 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20138 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
20139 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
20140 /* ARM v8.1 extension. */
20141 nUF(vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20142 nUF(vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20143 nUF(vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20144 nUF(vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20145
20146 /* Two address, int/float. Types S8 S16 S32 F32. */
20147 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
20148 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
20149
20150 /* Data processing with two registers and a shift amount. */
20151 /* Right shifts, and variants with rounding.
20152 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
20153 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20154 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20155 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
20156 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
20157 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20158 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20159 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
20160 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
20161 /* Shift and insert. Sizes accepted 8 16 32 64. */
20162 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
20163 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
20164 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
20165 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
20166 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
20167 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
20168 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
20169 /* Right shift immediate, saturating & narrowing, with rounding variants.
20170 Types accepted S16 S32 S64 U16 U32 U64. */
20171 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20172 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
20173 /* As above, unsigned. Types accepted S16 S32 S64. */
20174 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20175 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
20176 /* Right shift narrowing. Types accepted I16 I32 I64. */
20177 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20178 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
20179 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
20180 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
20181 /* CVT with optional immediate for fixed-point variant. */
20182 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
20183
20184 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
20185 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
20186
20187 /* Data processing, three registers of different lengths. */
20188 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
20189 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
20190 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
20191 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
20192 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
20193 /* If not scalar, fall back to neon_dyadic_long.
20194 Vector types as above, scalar types S16 S32 U16 U32. */
20195 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20196 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
20197 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
20198 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20199 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
20200 /* Dyadic, narrowing insns. Types I16 I32 I64. */
20201 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20202 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20203 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20204 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
20205 /* Saturating doubling multiplies. Types S16 S32. */
20206 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20207 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20208 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
20209 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
20210 S16 S32 U16 U32. */
20211 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
20212
20213 /* Extract. Size 8. */
20214 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
20215 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
20216
20217 /* Two registers, miscellaneous. */
20218 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
20219 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
20220 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
20221 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
20222 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
20223 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
20224 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
20225 /* Vector replicate. Sizes 8 16 32. */
20226 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
20227 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
20228 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
20229 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
20230 /* VMOVN. Types I16 I32 I64. */
20231 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
20232 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
20233 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
20234 /* VQMOVUN. Types S16 S32 S64. */
20235 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
20236 /* VZIP / VUZP. Sizes 8 16 32. */
20237 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
20238 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
20239 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
20240 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
20241 /* VQABS / VQNEG. Types S8 S16 S32. */
20242 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20243 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
20244 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
20245 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
20246 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
20247 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
20248 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
20249 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
20250 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
20251 /* Reciprocal estimates. Types U32 F32. */
20252 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
20253 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
20254 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
20255 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
20256 /* VCLS. Types S8 S16 S32. */
20257 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
20258 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
20259 /* VCLZ. Types I8 I16 I32. */
20260 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
20261 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
20262 /* VCNT. Size 8. */
20263 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
20264 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
20265 /* Two address, untyped. */
20266 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
20267 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
20268 /* VTRN. Sizes 8 16 32. */
20269 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
20270 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
20271
20272 /* Table lookup. Size 8. */
20273 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20274 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
20275
20276 #undef THUMB_VARIANT
20277 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
20278 #undef ARM_VARIANT
20279 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
20280
20281 /* Neon element/structure load/store. */
20282 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20283 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
20284 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20285 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
20286 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20287 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
20288 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20289 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
20290
20291 #undef THUMB_VARIANT
20292 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
20293 #undef ARM_VARIANT
20294 #define ARM_VARIANT & fpu_vfp_ext_v3xd
20295 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
20296 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20297 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20298 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20299 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20300 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20301 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20302 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
20303 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
20304
20305 #undef THUMB_VARIANT
20306 #define THUMB_VARIANT & fpu_vfp_ext_v3
20307 #undef ARM_VARIANT
20308 #define ARM_VARIANT & fpu_vfp_ext_v3
20309
20310 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
20311 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20312 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20313 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20314 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20315 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20316 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20317 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
20318 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
20319
20320 #undef ARM_VARIANT
20321 #define ARM_VARIANT & fpu_vfp_ext_fma
20322 #undef THUMB_VARIANT
20323 #define THUMB_VARIANT & fpu_vfp_ext_fma
20324 /* Mnemonics shared by Neon and VFP. These are included in the
20325 VFP FMA variant; NEON and VFP FMA always includes the NEON
20326 FMA instructions. */
20327 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20328 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
20329 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
20330 the v form should always be used. */
20331 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20332 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20333 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20334 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20335 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20336 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20337
20338 #undef THUMB_VARIANT
20339 #undef ARM_VARIANT
20340 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
20341
20342 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20343 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20344 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20345 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20346 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20347 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
20348 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
20349 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
20350
20351 #undef ARM_VARIANT
20352 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
20353
20354 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
20355 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
20356 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
20357 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
20358 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
20359 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
20360 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
20361 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
20362 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
20363 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20364 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20365 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
20366 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20367 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20368 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
20369 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20370 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20371 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
20372 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
20373 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
20374 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20375 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20376 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20377 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20378 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20379 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
20380 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
20381 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
20382 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
20383 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
20384 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
20385 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
20386 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
20387 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
20388 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
20389 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
20390 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
20391 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20392 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20393 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20394 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20395 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20396 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20397 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20398 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20399 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20400 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
20401 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20402 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20403 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20404 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20405 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20406 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20407 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20408 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20409 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20410 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20411 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20412 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20413 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20414 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20415 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20416 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20417 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20418 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20419 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20420 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20421 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20422 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20423 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20424 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20425 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20426 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20427 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20428 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20429 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20430 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20431 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20432 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20433 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20434 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20435 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20436 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20437 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20438 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20439 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20440 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20441 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20442 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20443 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20444 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20445 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20446 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20447 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20448 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20449 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20450 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20451 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20452 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20453 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20454 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20455 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20456 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20457 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20458 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20459 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20460 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20461 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20462 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20463 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20464 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20465 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20466 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20467 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20468 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20469 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20470 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20471 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20472 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20473 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20474 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20475 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20476 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20477 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20478 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20479 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20480 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20481 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20482 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20483 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20484 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20485 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20486 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20487 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20488 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20489 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20490 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20491 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20492 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20493 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20494 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20495 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20496 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20497 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20498 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20499 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20500 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20501 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20502 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20503 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20504 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20505 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20506 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20507 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20508 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20509 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20510 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20511 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20512 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20513 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20514 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20515 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20516
20517 #undef ARM_VARIANT
20518 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20519
20520 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20521 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20522 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20523 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20524 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20525 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20526 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20527 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20528 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20529 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20530 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20531 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20532 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20533 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20534 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20535 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20536 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20537 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20538 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20539 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20540 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20541 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20542 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20543 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20544 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20545 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20546 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20547 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20548 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20549 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20550 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20551 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20552 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20553 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20554 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20555 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20556 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20557 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20558 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20559 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20560 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20561 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20562 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20563 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20564 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20565 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20566 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20567 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20568 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20569 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20570 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20571 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20572 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20573 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20574 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20575 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20576 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20577
20578 #undef ARM_VARIANT
20579 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20580
20581 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20582 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20583 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20584 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20585 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20586 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20587 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20588 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20589 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20590 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20591 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20592 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20593 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20594 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20595 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20596 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20597 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20598 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20599 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20600 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20601 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20602 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20603 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20604 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20605 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20606 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20607 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20608 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20609 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20610 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20611 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20612 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20613 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20614 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20615 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20616 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20617 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20618 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20619 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20620 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20621 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20622 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20623 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20624 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20625 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20626 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20627 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20628 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20629 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20630 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20631 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20632 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20633 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20634 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20635 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20636 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20637 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20638 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20639 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20640 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20641 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20642 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20643 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20644 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20645 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20646 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20647 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20648 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20649 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20650 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20651 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20652 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20653 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20654 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20655 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20656 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20657
20658 #undef ARM_VARIANT
20659 #define ARM_VARIANT NULL
20660 #undef THUMB_VARIANT
20661 #define THUMB_VARIANT & arm_ext_v8m
20662 TUE("tt", 0, e840f000, 2, (RRnpc, RRnpc), 0, tt),
20663 TUE("ttt", 0, e840f040, 2, (RRnpc, RRnpc), 0, tt),
20664 };
20665 #undef ARM_VARIANT
20666 #undef THUMB_VARIANT
20667 #undef TCE
20668 #undef TUE
20669 #undef TUF
20670 #undef TCC
20671 #undef cCE
20672 #undef cCL
20673 #undef C3E
20674 #undef CE
20675 #undef CM
20676 #undef UE
20677 #undef UF
20678 #undef UT
20679 #undef NUF
20680 #undef nUF
20681 #undef NCE
20682 #undef nCE
20683 #undef OPS0
20684 #undef OPS1
20685 #undef OPS2
20686 #undef OPS3
20687 #undef OPS4
20688 #undef OPS5
20689 #undef OPS6
20690 #undef do_0
20691 \f
20692 /* MD interface: bits in the object file. */
20693
20694 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20695 for use in the a.out file, and stores them in the array pointed to by buf.
20696 This knows about the endian-ness of the target machine and does
20697 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20698 2 (short) and 4 (long) Floating numbers are put out as a series of
20699 LITTLENUMS (shorts, here at least). */
20700
20701 void
20702 md_number_to_chars (char * buf, valueT val, int n)
20703 {
20704 if (target_big_endian)
20705 number_to_chars_bigendian (buf, val, n);
20706 else
20707 number_to_chars_littleendian (buf, val, n);
20708 }
20709
20710 static valueT
20711 md_chars_to_number (char * buf, int n)
20712 {
20713 valueT result = 0;
20714 unsigned char * where = (unsigned char *) buf;
20715
20716 if (target_big_endian)
20717 {
20718 while (n--)
20719 {
20720 result <<= 8;
20721 result |= (*where++ & 255);
20722 }
20723 }
20724 else
20725 {
20726 while (n--)
20727 {
20728 result <<= 8;
20729 result |= (where[n] & 255);
20730 }
20731 }
20732
20733 return result;
20734 }
20735
20736 /* MD interface: Sections. */
20737
20738 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20739 that an rs_machine_dependent frag may reach. */
20740
20741 unsigned int
20742 arm_frag_max_var (fragS *fragp)
20743 {
20744 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20745 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20746
20747 Note that we generate relaxable instructions even for cases that don't
20748 really need it, like an immediate that's a trivial constant. So we're
20749 overestimating the instruction size for some of those cases. Rather
20750 than putting more intelligence here, it would probably be better to
20751 avoid generating a relaxation frag in the first place when it can be
20752 determined up front that a short instruction will suffice. */
20753
20754 gas_assert (fragp->fr_type == rs_machine_dependent);
20755 return INSN_SIZE;
20756 }
20757
20758 /* Estimate the size of a frag before relaxing. Assume everything fits in
20759 2 bytes. */
20760
20761 int
20762 md_estimate_size_before_relax (fragS * fragp,
20763 segT segtype ATTRIBUTE_UNUSED)
20764 {
20765 fragp->fr_var = 2;
20766 return 2;
20767 }
20768
20769 /* Convert a machine dependent frag. */
20770
20771 void
20772 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20773 {
20774 unsigned long insn;
20775 unsigned long old_op;
20776 char *buf;
20777 expressionS exp;
20778 fixS *fixp;
20779 int reloc_type;
20780 int pc_rel;
20781 int opcode;
20782
20783 buf = fragp->fr_literal + fragp->fr_fix;
20784
20785 old_op = bfd_get_16(abfd, buf);
20786 if (fragp->fr_symbol)
20787 {
20788 exp.X_op = O_symbol;
20789 exp.X_add_symbol = fragp->fr_symbol;
20790 }
20791 else
20792 {
20793 exp.X_op = O_constant;
20794 }
20795 exp.X_add_number = fragp->fr_offset;
20796 opcode = fragp->fr_subtype;
20797 switch (opcode)
20798 {
20799 case T_MNEM_ldr_pc:
20800 case T_MNEM_ldr_pc2:
20801 case T_MNEM_ldr_sp:
20802 case T_MNEM_str_sp:
20803 case T_MNEM_ldr:
20804 case T_MNEM_ldrb:
20805 case T_MNEM_ldrh:
20806 case T_MNEM_str:
20807 case T_MNEM_strb:
20808 case T_MNEM_strh:
20809 if (fragp->fr_var == 4)
20810 {
20811 insn = THUMB_OP32 (opcode);
20812 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20813 {
20814 insn |= (old_op & 0x700) << 4;
20815 }
20816 else
20817 {
20818 insn |= (old_op & 7) << 12;
20819 insn |= (old_op & 0x38) << 13;
20820 }
20821 insn |= 0x00000c00;
20822 put_thumb32_insn (buf, insn);
20823 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20824 }
20825 else
20826 {
20827 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20828 }
20829 pc_rel = (opcode == T_MNEM_ldr_pc2);
20830 break;
20831 case T_MNEM_adr:
20832 if (fragp->fr_var == 4)
20833 {
20834 insn = THUMB_OP32 (opcode);
20835 insn |= (old_op & 0xf0) << 4;
20836 put_thumb32_insn (buf, insn);
20837 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20838 }
20839 else
20840 {
20841 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20842 exp.X_add_number -= 4;
20843 }
20844 pc_rel = 1;
20845 break;
20846 case T_MNEM_mov:
20847 case T_MNEM_movs:
20848 case T_MNEM_cmp:
20849 case T_MNEM_cmn:
20850 if (fragp->fr_var == 4)
20851 {
20852 int r0off = (opcode == T_MNEM_mov
20853 || opcode == T_MNEM_movs) ? 0 : 8;
20854 insn = THUMB_OP32 (opcode);
20855 insn = (insn & 0xe1ffffff) | 0x10000000;
20856 insn |= (old_op & 0x700) << r0off;
20857 put_thumb32_insn (buf, insn);
20858 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20859 }
20860 else
20861 {
20862 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20863 }
20864 pc_rel = 0;
20865 break;
20866 case T_MNEM_b:
20867 if (fragp->fr_var == 4)
20868 {
20869 insn = THUMB_OP32(opcode);
20870 put_thumb32_insn (buf, insn);
20871 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20872 }
20873 else
20874 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20875 pc_rel = 1;
20876 break;
20877 case T_MNEM_bcond:
20878 if (fragp->fr_var == 4)
20879 {
20880 insn = THUMB_OP32(opcode);
20881 insn |= (old_op & 0xf00) << 14;
20882 put_thumb32_insn (buf, insn);
20883 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20884 }
20885 else
20886 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20887 pc_rel = 1;
20888 break;
20889 case T_MNEM_add_sp:
20890 case T_MNEM_add_pc:
20891 case T_MNEM_inc_sp:
20892 case T_MNEM_dec_sp:
20893 if (fragp->fr_var == 4)
20894 {
20895 /* ??? Choose between add and addw. */
20896 insn = THUMB_OP32 (opcode);
20897 insn |= (old_op & 0xf0) << 4;
20898 put_thumb32_insn (buf, insn);
20899 if (opcode == T_MNEM_add_pc)
20900 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20901 else
20902 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20903 }
20904 else
20905 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20906 pc_rel = 0;
20907 break;
20908
20909 case T_MNEM_addi:
20910 case T_MNEM_addis:
20911 case T_MNEM_subi:
20912 case T_MNEM_subis:
20913 if (fragp->fr_var == 4)
20914 {
20915 insn = THUMB_OP32 (opcode);
20916 insn |= (old_op & 0xf0) << 4;
20917 insn |= (old_op & 0xf) << 16;
20918 put_thumb32_insn (buf, insn);
20919 if (insn & (1 << 20))
20920 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20921 else
20922 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20923 }
20924 else
20925 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20926 pc_rel = 0;
20927 break;
20928 default:
20929 abort ();
20930 }
20931 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20932 (enum bfd_reloc_code_real) reloc_type);
20933 fixp->fx_file = fragp->fr_file;
20934 fixp->fx_line = fragp->fr_line;
20935 fragp->fr_fix += fragp->fr_var;
20936
20937 /* Set whether we use thumb-2 ISA based on final relaxation results. */
20938 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
20939 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
20940 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
20941 }
20942
20943 /* Return the size of a relaxable immediate operand instruction.
20944 SHIFT and SIZE specify the form of the allowable immediate. */
20945 static int
20946 relax_immediate (fragS *fragp, int size, int shift)
20947 {
20948 offsetT offset;
20949 offsetT mask;
20950 offsetT low;
20951
20952 /* ??? Should be able to do better than this. */
20953 if (fragp->fr_symbol)
20954 return 4;
20955
20956 low = (1 << shift) - 1;
20957 mask = (1 << (shift + size)) - (1 << shift);
20958 offset = fragp->fr_offset;
20959 /* Force misaligned offsets to 32-bit variant. */
20960 if (offset & low)
20961 return 4;
20962 if (offset & ~mask)
20963 return 4;
20964 return 2;
20965 }
20966
20967 /* Get the address of a symbol during relaxation. */
20968 static addressT
20969 relaxed_symbol_addr (fragS *fragp, long stretch)
20970 {
20971 fragS *sym_frag;
20972 addressT addr;
20973 symbolS *sym;
20974
20975 sym = fragp->fr_symbol;
20976 sym_frag = symbol_get_frag (sym);
20977 know (S_GET_SEGMENT (sym) != absolute_section
20978 || sym_frag == &zero_address_frag);
20979 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20980
20981 /* If frag has yet to be reached on this pass, assume it will
20982 move by STRETCH just as we did. If this is not so, it will
20983 be because some frag between grows, and that will force
20984 another pass. */
20985
20986 if (stretch != 0
20987 && sym_frag->relax_marker != fragp->relax_marker)
20988 {
20989 fragS *f;
20990
20991 /* Adjust stretch for any alignment frag. Note that if have
20992 been expanding the earlier code, the symbol may be
20993 defined in what appears to be an earlier frag. FIXME:
20994 This doesn't handle the fr_subtype field, which specifies
20995 a maximum number of bytes to skip when doing an
20996 alignment. */
20997 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20998 {
20999 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21000 {
21001 if (stretch < 0)
21002 stretch = - ((- stretch)
21003 & ~ ((1 << (int) f->fr_offset) - 1));
21004 else
21005 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21006 if (stretch == 0)
21007 break;
21008 }
21009 }
21010 if (f != NULL)
21011 addr += stretch;
21012 }
21013
21014 return addr;
21015 }
21016
21017 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21018 load. */
21019 static int
21020 relax_adr (fragS *fragp, asection *sec, long stretch)
21021 {
21022 addressT addr;
21023 offsetT val;
21024
21025 /* Assume worst case for symbols not known to be in the same section. */
21026 if (fragp->fr_symbol == NULL
21027 || !S_IS_DEFINED (fragp->fr_symbol)
21028 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21029 || S_IS_WEAK (fragp->fr_symbol))
21030 return 4;
21031
21032 val = relaxed_symbol_addr (fragp, stretch);
21033 addr = fragp->fr_address + fragp->fr_fix;
21034 addr = (addr + 4) & ~3;
21035 /* Force misaligned targets to 32-bit variant. */
21036 if (val & 3)
21037 return 4;
21038 val -= addr;
21039 if (val < 0 || val > 1020)
21040 return 4;
21041 return 2;
21042 }
21043
21044 /* Return the size of a relaxable add/sub immediate instruction. */
21045 static int
21046 relax_addsub (fragS *fragp, asection *sec)
21047 {
21048 char *buf;
21049 int op;
21050
21051 buf = fragp->fr_literal + fragp->fr_fix;
21052 op = bfd_get_16(sec->owner, buf);
21053 if ((op & 0xf) == ((op >> 4) & 0xf))
21054 return relax_immediate (fragp, 8, 0);
21055 else
21056 return relax_immediate (fragp, 3, 0);
21057 }
21058
21059 /* Return TRUE iff the definition of symbol S could be pre-empted
21060 (overridden) at link or load time. */
21061 static bfd_boolean
21062 symbol_preemptible (symbolS *s)
21063 {
21064 /* Weak symbols can always be pre-empted. */
21065 if (S_IS_WEAK (s))
21066 return TRUE;
21067
21068 /* Non-global symbols cannot be pre-empted. */
21069 if (! S_IS_EXTERNAL (s))
21070 return FALSE;
21071
21072 #ifdef OBJ_ELF
21073 /* In ELF, a global symbol can be marked protected, or private. In that
21074 case it can't be pre-empted (other definitions in the same link unit
21075 would violate the ODR). */
21076 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21077 return FALSE;
21078 #endif
21079
21080 /* Other global symbols might be pre-empted. */
21081 return TRUE;
21082 }
21083
21084 /* Return the size of a relaxable branch instruction. BITS is the
21085 size of the offset field in the narrow instruction. */
21086
21087 static int
21088 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21089 {
21090 addressT addr;
21091 offsetT val;
21092 offsetT limit;
21093
21094 /* Assume worst case for symbols not known to be in the same section. */
21095 if (!S_IS_DEFINED (fragp->fr_symbol)
21096 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21097 || S_IS_WEAK (fragp->fr_symbol))
21098 return 4;
21099
21100 #ifdef OBJ_ELF
21101 /* A branch to a function in ARM state will require interworking. */
21102 if (S_IS_DEFINED (fragp->fr_symbol)
21103 && ARM_IS_FUNC (fragp->fr_symbol))
21104 return 4;
21105 #endif
21106
21107 if (symbol_preemptible (fragp->fr_symbol))
21108 return 4;
21109
21110 val = relaxed_symbol_addr (fragp, stretch);
21111 addr = fragp->fr_address + fragp->fr_fix + 4;
21112 val -= addr;
21113
21114 /* Offset is a signed value *2 */
21115 limit = 1 << bits;
21116 if (val >= limit || val < -limit)
21117 return 4;
21118 return 2;
21119 }
21120
21121
21122 /* Relax a machine dependent frag. This returns the amount by which
21123 the current size of the frag should change. */
21124
21125 int
21126 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
21127 {
21128 int oldsize;
21129 int newsize;
21130
21131 oldsize = fragp->fr_var;
21132 switch (fragp->fr_subtype)
21133 {
21134 case T_MNEM_ldr_pc2:
21135 newsize = relax_adr (fragp, sec, stretch);
21136 break;
21137 case T_MNEM_ldr_pc:
21138 case T_MNEM_ldr_sp:
21139 case T_MNEM_str_sp:
21140 newsize = relax_immediate (fragp, 8, 2);
21141 break;
21142 case T_MNEM_ldr:
21143 case T_MNEM_str:
21144 newsize = relax_immediate (fragp, 5, 2);
21145 break;
21146 case T_MNEM_ldrh:
21147 case T_MNEM_strh:
21148 newsize = relax_immediate (fragp, 5, 1);
21149 break;
21150 case T_MNEM_ldrb:
21151 case T_MNEM_strb:
21152 newsize = relax_immediate (fragp, 5, 0);
21153 break;
21154 case T_MNEM_adr:
21155 newsize = relax_adr (fragp, sec, stretch);
21156 break;
21157 case T_MNEM_mov:
21158 case T_MNEM_movs:
21159 case T_MNEM_cmp:
21160 case T_MNEM_cmn:
21161 newsize = relax_immediate (fragp, 8, 0);
21162 break;
21163 case T_MNEM_b:
21164 newsize = relax_branch (fragp, sec, 11, stretch);
21165 break;
21166 case T_MNEM_bcond:
21167 newsize = relax_branch (fragp, sec, 8, stretch);
21168 break;
21169 case T_MNEM_add_sp:
21170 case T_MNEM_add_pc:
21171 newsize = relax_immediate (fragp, 8, 2);
21172 break;
21173 case T_MNEM_inc_sp:
21174 case T_MNEM_dec_sp:
21175 newsize = relax_immediate (fragp, 7, 2);
21176 break;
21177 case T_MNEM_addi:
21178 case T_MNEM_addis:
21179 case T_MNEM_subi:
21180 case T_MNEM_subis:
21181 newsize = relax_addsub (fragp, sec);
21182 break;
21183 default:
21184 abort ();
21185 }
21186
21187 fragp->fr_var = newsize;
21188 /* Freeze wide instructions that are at or before the same location as
21189 in the previous pass. This avoids infinite loops.
21190 Don't freeze them unconditionally because targets may be artificially
21191 misaligned by the expansion of preceding frags. */
21192 if (stretch <= 0 && newsize > 2)
21193 {
21194 md_convert_frag (sec->owner, sec, fragp);
21195 frag_wane (fragp);
21196 }
21197
21198 return newsize - oldsize;
21199 }
21200
21201 /* Round up a section size to the appropriate boundary. */
21202
21203 valueT
21204 md_section_align (segT segment ATTRIBUTE_UNUSED,
21205 valueT size)
21206 {
21207 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
21208 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
21209 {
21210 /* For a.out, force the section size to be aligned. If we don't do
21211 this, BFD will align it for us, but it will not write out the
21212 final bytes of the section. This may be a bug in BFD, but it is
21213 easier to fix it here since that is how the other a.out targets
21214 work. */
21215 int align;
21216
21217 align = bfd_get_section_alignment (stdoutput, segment);
21218 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
21219 }
21220 #endif
21221
21222 return size;
21223 }
21224
21225 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
21226 of an rs_align_code fragment. */
21227
21228 void
21229 arm_handle_align (fragS * fragP)
21230 {
21231 static char const arm_noop[2][2][4] =
21232 {
21233 { /* ARMv1 */
21234 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
21235 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
21236 },
21237 { /* ARMv6k */
21238 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
21239 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
21240 },
21241 };
21242 static char const thumb_noop[2][2][2] =
21243 {
21244 { /* Thumb-1 */
21245 {0xc0, 0x46}, /* LE */
21246 {0x46, 0xc0}, /* BE */
21247 },
21248 { /* Thumb-2 */
21249 {0x00, 0xbf}, /* LE */
21250 {0xbf, 0x00} /* BE */
21251 }
21252 };
21253 static char const wide_thumb_noop[2][4] =
21254 { /* Wide Thumb-2 */
21255 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
21256 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
21257 };
21258
21259 unsigned bytes, fix, noop_size;
21260 char * p;
21261 const char * noop;
21262 const char *narrow_noop = NULL;
21263 #ifdef OBJ_ELF
21264 enum mstate state;
21265 #endif
21266
21267 if (fragP->fr_type != rs_align_code)
21268 return;
21269
21270 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
21271 p = fragP->fr_literal + fragP->fr_fix;
21272 fix = 0;
21273
21274 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
21275 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
21276
21277 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
21278
21279 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
21280 {
21281 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21282 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
21283 {
21284 narrow_noop = thumb_noop[1][target_big_endian];
21285 noop = wide_thumb_noop[target_big_endian];
21286 }
21287 else
21288 noop = thumb_noop[0][target_big_endian];
21289 noop_size = 2;
21290 #ifdef OBJ_ELF
21291 state = MAP_THUMB;
21292 #endif
21293 }
21294 else
21295 {
21296 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
21297 ? selected_cpu : arm_arch_none,
21298 arm_ext_v6k) != 0]
21299 [target_big_endian];
21300 noop_size = 4;
21301 #ifdef OBJ_ELF
21302 state = MAP_ARM;
21303 #endif
21304 }
21305
21306 fragP->fr_var = noop_size;
21307
21308 if (bytes & (noop_size - 1))
21309 {
21310 fix = bytes & (noop_size - 1);
21311 #ifdef OBJ_ELF
21312 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
21313 #endif
21314 memset (p, 0, fix);
21315 p += fix;
21316 bytes -= fix;
21317 }
21318
21319 if (narrow_noop)
21320 {
21321 if (bytes & noop_size)
21322 {
21323 /* Insert a narrow noop. */
21324 memcpy (p, narrow_noop, noop_size);
21325 p += noop_size;
21326 bytes -= noop_size;
21327 fix += noop_size;
21328 }
21329
21330 /* Use wide noops for the remainder */
21331 noop_size = 4;
21332 }
21333
21334 while (bytes >= noop_size)
21335 {
21336 memcpy (p, noop, noop_size);
21337 p += noop_size;
21338 bytes -= noop_size;
21339 fix += noop_size;
21340 }
21341
21342 fragP->fr_fix += fix;
21343 }
21344
21345 /* Called from md_do_align. Used to create an alignment
21346 frag in a code section. */
21347
21348 void
21349 arm_frag_align_code (int n, int max)
21350 {
21351 char * p;
21352
21353 /* We assume that there will never be a requirement
21354 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
21355 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
21356 {
21357 char err_msg[128];
21358
21359 sprintf (err_msg,
21360 _("alignments greater than %d bytes not supported in .text sections."),
21361 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
21362 as_fatal ("%s", err_msg);
21363 }
21364
21365 p = frag_var (rs_align_code,
21366 MAX_MEM_FOR_RS_ALIGN_CODE,
21367 1,
21368 (relax_substateT) max,
21369 (symbolS *) NULL,
21370 (offsetT) n,
21371 (char *) NULL);
21372 *p = 0;
21373 }
21374
21375 /* Perform target specific initialisation of a frag.
21376 Note - despite the name this initialisation is not done when the frag
21377 is created, but only when its type is assigned. A frag can be created
21378 and used a long time before its type is set, so beware of assuming that
21379 this initialisationis performed first. */
21380
21381 #ifndef OBJ_ELF
21382 void
21383 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
21384 {
21385 /* Record whether this frag is in an ARM or a THUMB area. */
21386 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21387 }
21388
21389 #else /* OBJ_ELF is defined. */
21390 void
21391 arm_init_frag (fragS * fragP, int max_chars)
21392 {
21393 int frag_thumb_mode;
21394
21395 /* If the current ARM vs THUMB mode has not already
21396 been recorded into this frag then do so now. */
21397 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
21398 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
21399
21400 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
21401
21402 /* Record a mapping symbol for alignment frags. We will delete this
21403 later if the alignment ends up empty. */
21404 switch (fragP->fr_type)
21405 {
21406 case rs_align:
21407 case rs_align_test:
21408 case rs_fill:
21409 mapping_state_2 (MAP_DATA, max_chars);
21410 break;
21411 case rs_align_code:
21412 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
21413 break;
21414 default:
21415 break;
21416 }
21417 }
21418
21419 /* When we change sections we need to issue a new mapping symbol. */
21420
21421 void
21422 arm_elf_change_section (void)
21423 {
21424 /* Link an unlinked unwind index table section to the .text section. */
21425 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
21426 && elf_linked_to_section (now_seg) == NULL)
21427 elf_linked_to_section (now_seg) = text_section;
21428 }
21429
21430 int
21431 arm_elf_section_type (const char * str, size_t len)
21432 {
21433 if (len == 5 && strncmp (str, "exidx", 5) == 0)
21434 return SHT_ARM_EXIDX;
21435
21436 return -1;
21437 }
21438 \f
21439 /* Code to deal with unwinding tables. */
21440
21441 static void add_unwind_adjustsp (offsetT);
21442
21443 /* Generate any deferred unwind frame offset. */
21444
21445 static void
21446 flush_pending_unwind (void)
21447 {
21448 offsetT offset;
21449
21450 offset = unwind.pending_offset;
21451 unwind.pending_offset = 0;
21452 if (offset != 0)
21453 add_unwind_adjustsp (offset);
21454 }
21455
21456 /* Add an opcode to this list for this function. Two-byte opcodes should
21457 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21458 order. */
21459
21460 static void
21461 add_unwind_opcode (valueT op, int length)
21462 {
21463 /* Add any deferred stack adjustment. */
21464 if (unwind.pending_offset)
21465 flush_pending_unwind ();
21466
21467 unwind.sp_restored = 0;
21468
21469 if (unwind.opcode_count + length > unwind.opcode_alloc)
21470 {
21471 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21472 if (unwind.opcodes)
21473 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21474 unwind.opcode_alloc);
21475 else
21476 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21477 }
21478 while (length > 0)
21479 {
21480 length--;
21481 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21482 op >>= 8;
21483 unwind.opcode_count++;
21484 }
21485 }
21486
21487 /* Add unwind opcodes to adjust the stack pointer. */
21488
21489 static void
21490 add_unwind_adjustsp (offsetT offset)
21491 {
21492 valueT op;
21493
21494 if (offset > 0x200)
21495 {
21496 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21497 char bytes[5];
21498 int n;
21499 valueT o;
21500
21501 /* Long form: 0xb2, uleb128. */
21502 /* This might not fit in a word so add the individual bytes,
21503 remembering the list is built in reverse order. */
21504 o = (valueT) ((offset - 0x204) >> 2);
21505 if (o == 0)
21506 add_unwind_opcode (0, 1);
21507
21508 /* Calculate the uleb128 encoding of the offset. */
21509 n = 0;
21510 while (o)
21511 {
21512 bytes[n] = o & 0x7f;
21513 o >>= 7;
21514 if (o)
21515 bytes[n] |= 0x80;
21516 n++;
21517 }
21518 /* Add the insn. */
21519 for (; n; n--)
21520 add_unwind_opcode (bytes[n - 1], 1);
21521 add_unwind_opcode (0xb2, 1);
21522 }
21523 else if (offset > 0x100)
21524 {
21525 /* Two short opcodes. */
21526 add_unwind_opcode (0x3f, 1);
21527 op = (offset - 0x104) >> 2;
21528 add_unwind_opcode (op, 1);
21529 }
21530 else if (offset > 0)
21531 {
21532 /* Short opcode. */
21533 op = (offset - 4) >> 2;
21534 add_unwind_opcode (op, 1);
21535 }
21536 else if (offset < 0)
21537 {
21538 offset = -offset;
21539 while (offset > 0x100)
21540 {
21541 add_unwind_opcode (0x7f, 1);
21542 offset -= 0x100;
21543 }
21544 op = ((offset - 4) >> 2) | 0x40;
21545 add_unwind_opcode (op, 1);
21546 }
21547 }
21548
21549 /* Finish the list of unwind opcodes for this function. */
21550 static void
21551 finish_unwind_opcodes (void)
21552 {
21553 valueT op;
21554
21555 if (unwind.fp_used)
21556 {
21557 /* Adjust sp as necessary. */
21558 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21559 flush_pending_unwind ();
21560
21561 /* After restoring sp from the frame pointer. */
21562 op = 0x90 | unwind.fp_reg;
21563 add_unwind_opcode (op, 1);
21564 }
21565 else
21566 flush_pending_unwind ();
21567 }
21568
21569
21570 /* Start an exception table entry. If idx is nonzero this is an index table
21571 entry. */
21572
21573 static void
21574 start_unwind_section (const segT text_seg, int idx)
21575 {
21576 const char * text_name;
21577 const char * prefix;
21578 const char * prefix_once;
21579 const char * group_name;
21580 size_t prefix_len;
21581 size_t text_len;
21582 char * sec_name;
21583 size_t sec_name_len;
21584 int type;
21585 int flags;
21586 int linkonce;
21587
21588 if (idx)
21589 {
21590 prefix = ELF_STRING_ARM_unwind;
21591 prefix_once = ELF_STRING_ARM_unwind_once;
21592 type = SHT_ARM_EXIDX;
21593 }
21594 else
21595 {
21596 prefix = ELF_STRING_ARM_unwind_info;
21597 prefix_once = ELF_STRING_ARM_unwind_info_once;
21598 type = SHT_PROGBITS;
21599 }
21600
21601 text_name = segment_name (text_seg);
21602 if (streq (text_name, ".text"))
21603 text_name = "";
21604
21605 if (strncmp (text_name, ".gnu.linkonce.t.",
21606 strlen (".gnu.linkonce.t.")) == 0)
21607 {
21608 prefix = prefix_once;
21609 text_name += strlen (".gnu.linkonce.t.");
21610 }
21611
21612 prefix_len = strlen (prefix);
21613 text_len = strlen (text_name);
21614 sec_name_len = prefix_len + text_len;
21615 sec_name = (char *) xmalloc (sec_name_len + 1);
21616 memcpy (sec_name, prefix, prefix_len);
21617 memcpy (sec_name + prefix_len, text_name, text_len);
21618 sec_name[prefix_len + text_len] = '\0';
21619
21620 flags = SHF_ALLOC;
21621 linkonce = 0;
21622 group_name = 0;
21623
21624 /* Handle COMDAT group. */
21625 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21626 {
21627 group_name = elf_group_name (text_seg);
21628 if (group_name == NULL)
21629 {
21630 as_bad (_("Group section `%s' has no group signature"),
21631 segment_name (text_seg));
21632 ignore_rest_of_line ();
21633 return;
21634 }
21635 flags |= SHF_GROUP;
21636 linkonce = 1;
21637 }
21638
21639 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21640
21641 /* Set the section link for index tables. */
21642 if (idx)
21643 elf_linked_to_section (now_seg) = text_seg;
21644 }
21645
21646
21647 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21648 personality routine data. Returns zero, or the index table value for
21649 an inline entry. */
21650
21651 static valueT
21652 create_unwind_entry (int have_data)
21653 {
21654 int size;
21655 addressT where;
21656 char *ptr;
21657 /* The current word of data. */
21658 valueT data;
21659 /* The number of bytes left in this word. */
21660 int n;
21661
21662 finish_unwind_opcodes ();
21663
21664 /* Remember the current text section. */
21665 unwind.saved_seg = now_seg;
21666 unwind.saved_subseg = now_subseg;
21667
21668 start_unwind_section (now_seg, 0);
21669
21670 if (unwind.personality_routine == NULL)
21671 {
21672 if (unwind.personality_index == -2)
21673 {
21674 if (have_data)
21675 as_bad (_("handlerdata in cantunwind frame"));
21676 return 1; /* EXIDX_CANTUNWIND. */
21677 }
21678
21679 /* Use a default personality routine if none is specified. */
21680 if (unwind.personality_index == -1)
21681 {
21682 if (unwind.opcode_count > 3)
21683 unwind.personality_index = 1;
21684 else
21685 unwind.personality_index = 0;
21686 }
21687
21688 /* Space for the personality routine entry. */
21689 if (unwind.personality_index == 0)
21690 {
21691 if (unwind.opcode_count > 3)
21692 as_bad (_("too many unwind opcodes for personality routine 0"));
21693
21694 if (!have_data)
21695 {
21696 /* All the data is inline in the index table. */
21697 data = 0x80;
21698 n = 3;
21699 while (unwind.opcode_count > 0)
21700 {
21701 unwind.opcode_count--;
21702 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21703 n--;
21704 }
21705
21706 /* Pad with "finish" opcodes. */
21707 while (n--)
21708 data = (data << 8) | 0xb0;
21709
21710 return data;
21711 }
21712 size = 0;
21713 }
21714 else
21715 /* We get two opcodes "free" in the first word. */
21716 size = unwind.opcode_count - 2;
21717 }
21718 else
21719 {
21720 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21721 if (unwind.personality_index != -1)
21722 {
21723 as_bad (_("attempt to recreate an unwind entry"));
21724 return 1;
21725 }
21726
21727 /* An extra byte is required for the opcode count. */
21728 size = unwind.opcode_count + 1;
21729 }
21730
21731 size = (size + 3) >> 2;
21732 if (size > 0xff)
21733 as_bad (_("too many unwind opcodes"));
21734
21735 frag_align (2, 0, 0);
21736 record_alignment (now_seg, 2);
21737 unwind.table_entry = expr_build_dot ();
21738
21739 /* Allocate the table entry. */
21740 ptr = frag_more ((size << 2) + 4);
21741 /* PR 13449: Zero the table entries in case some of them are not used. */
21742 memset (ptr, 0, (size << 2) + 4);
21743 where = frag_now_fix () - ((size << 2) + 4);
21744
21745 switch (unwind.personality_index)
21746 {
21747 case -1:
21748 /* ??? Should this be a PLT generating relocation? */
21749 /* Custom personality routine. */
21750 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21751 BFD_RELOC_ARM_PREL31);
21752
21753 where += 4;
21754 ptr += 4;
21755
21756 /* Set the first byte to the number of additional words. */
21757 data = size > 0 ? size - 1 : 0;
21758 n = 3;
21759 break;
21760
21761 /* ABI defined personality routines. */
21762 case 0:
21763 /* Three opcodes bytes are packed into the first word. */
21764 data = 0x80;
21765 n = 3;
21766 break;
21767
21768 case 1:
21769 case 2:
21770 /* The size and first two opcode bytes go in the first word. */
21771 data = ((0x80 + unwind.personality_index) << 8) | size;
21772 n = 2;
21773 break;
21774
21775 default:
21776 /* Should never happen. */
21777 abort ();
21778 }
21779
21780 /* Pack the opcodes into words (MSB first), reversing the list at the same
21781 time. */
21782 while (unwind.opcode_count > 0)
21783 {
21784 if (n == 0)
21785 {
21786 md_number_to_chars (ptr, data, 4);
21787 ptr += 4;
21788 n = 4;
21789 data = 0;
21790 }
21791 unwind.opcode_count--;
21792 n--;
21793 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21794 }
21795
21796 /* Finish off the last word. */
21797 if (n < 4)
21798 {
21799 /* Pad with "finish" opcodes. */
21800 while (n--)
21801 data = (data << 8) | 0xb0;
21802
21803 md_number_to_chars (ptr, data, 4);
21804 }
21805
21806 if (!have_data)
21807 {
21808 /* Add an empty descriptor if there is no user-specified data. */
21809 ptr = frag_more (4);
21810 md_number_to_chars (ptr, 0, 4);
21811 }
21812
21813 return 0;
21814 }
21815
21816
21817 /* Initialize the DWARF-2 unwind information for this procedure. */
21818
21819 void
21820 tc_arm_frame_initial_instructions (void)
21821 {
21822 cfi_add_CFA_def_cfa (REG_SP, 0);
21823 }
21824 #endif /* OBJ_ELF */
21825
21826 /* Convert REGNAME to a DWARF-2 register number. */
21827
21828 int
21829 tc_arm_regname_to_dw2regnum (char *regname)
21830 {
21831 int reg = arm_reg_parse (&regname, REG_TYPE_RN);
21832 if (reg != FAIL)
21833 return reg;
21834
21835 /* PR 16694: Allow VFP registers as well. */
21836 reg = arm_reg_parse (&regname, REG_TYPE_VFS);
21837 if (reg != FAIL)
21838 return 64 + reg;
21839
21840 reg = arm_reg_parse (&regname, REG_TYPE_VFD);
21841 if (reg != FAIL)
21842 return reg + 256;
21843
21844 return -1;
21845 }
21846
21847 #ifdef TE_PE
21848 void
21849 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21850 {
21851 expressionS exp;
21852
21853 exp.X_op = O_secrel;
21854 exp.X_add_symbol = symbol;
21855 exp.X_add_number = 0;
21856 emit_expr (&exp, size);
21857 }
21858 #endif
21859
21860 /* MD interface: Symbol and relocation handling. */
21861
21862 /* Return the address within the segment that a PC-relative fixup is
21863 relative to. For ARM, PC-relative fixups applied to instructions
21864 are generally relative to the location of the fixup plus 8 bytes.
21865 Thumb branches are offset by 4, and Thumb loads relative to PC
21866 require special handling. */
21867
21868 long
21869 md_pcrel_from_section (fixS * fixP, segT seg)
21870 {
21871 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21872
21873 /* If this is pc-relative and we are going to emit a relocation
21874 then we just want to put out any pipeline compensation that the linker
21875 will need. Otherwise we want to use the calculated base.
21876 For WinCE we skip the bias for externals as well, since this
21877 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21878 if (fixP->fx_pcrel
21879 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21880 || (arm_force_relocation (fixP)
21881 #ifdef TE_WINCE
21882 && !S_IS_EXTERNAL (fixP->fx_addsy)
21883 #endif
21884 )))
21885 base = 0;
21886
21887
21888 switch (fixP->fx_r_type)
21889 {
21890 /* PC relative addressing on the Thumb is slightly odd as the
21891 bottom two bits of the PC are forced to zero for the
21892 calculation. This happens *after* application of the
21893 pipeline offset. However, Thumb adrl already adjusts for
21894 this, so we need not do it again. */
21895 case BFD_RELOC_ARM_THUMB_ADD:
21896 return base & ~3;
21897
21898 case BFD_RELOC_ARM_THUMB_OFFSET:
21899 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21900 case BFD_RELOC_ARM_T32_ADD_PC12:
21901 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21902 return (base + 4) & ~3;
21903
21904 /* Thumb branches are simply offset by +4. */
21905 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21906 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21907 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21908 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21909 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21910 return base + 4;
21911
21912 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21913 if (fixP->fx_addsy
21914 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21915 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21916 && ARM_IS_FUNC (fixP->fx_addsy)
21917 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21918 base = fixP->fx_where + fixP->fx_frag->fr_address;
21919 return base + 4;
21920
21921 /* BLX is like branches above, but forces the low two bits of PC to
21922 zero. */
21923 case BFD_RELOC_THUMB_PCREL_BLX:
21924 if (fixP->fx_addsy
21925 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21926 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21927 && THUMB_IS_FUNC (fixP->fx_addsy)
21928 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21929 base = fixP->fx_where + fixP->fx_frag->fr_address;
21930 return (base + 4) & ~3;
21931
21932 /* ARM mode branches are offset by +8. However, the Windows CE
21933 loader expects the relocation not to take this into account. */
21934 case BFD_RELOC_ARM_PCREL_BLX:
21935 if (fixP->fx_addsy
21936 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21937 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21938 && ARM_IS_FUNC (fixP->fx_addsy)
21939 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21940 base = fixP->fx_where + fixP->fx_frag->fr_address;
21941 return base + 8;
21942
21943 case BFD_RELOC_ARM_PCREL_CALL:
21944 if (fixP->fx_addsy
21945 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21946 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21947 && THUMB_IS_FUNC (fixP->fx_addsy)
21948 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21949 base = fixP->fx_where + fixP->fx_frag->fr_address;
21950 return base + 8;
21951
21952 case BFD_RELOC_ARM_PCREL_BRANCH:
21953 case BFD_RELOC_ARM_PCREL_JUMP:
21954 case BFD_RELOC_ARM_PLT32:
21955 #ifdef TE_WINCE
21956 /* When handling fixups immediately, because we have already
21957 discovered the value of a symbol, or the address of the frag involved
21958 we must account for the offset by +8, as the OS loader will never see the reloc.
21959 see fixup_segment() in write.c
21960 The S_IS_EXTERNAL test handles the case of global symbols.
21961 Those need the calculated base, not just the pipe compensation the linker will need. */
21962 if (fixP->fx_pcrel
21963 && fixP->fx_addsy != NULL
21964 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21965 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21966 return base + 8;
21967 return base;
21968 #else
21969 return base + 8;
21970 #endif
21971
21972
21973 /* ARM mode loads relative to PC are also offset by +8. Unlike
21974 branches, the Windows CE loader *does* expect the relocation
21975 to take this into account. */
21976 case BFD_RELOC_ARM_OFFSET_IMM:
21977 case BFD_RELOC_ARM_OFFSET_IMM8:
21978 case BFD_RELOC_ARM_HWLITERAL:
21979 case BFD_RELOC_ARM_LITERAL:
21980 case BFD_RELOC_ARM_CP_OFF_IMM:
21981 return base + 8;
21982
21983
21984 /* Other PC-relative relocations are un-offset. */
21985 default:
21986 return base;
21987 }
21988 }
21989
21990 static bfd_boolean flag_warn_syms = TRUE;
21991
21992 bfd_boolean
21993 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
21994 {
21995 /* PR 18347 - Warn if the user attempts to create a symbol with the same
21996 name as an ARM instruction. Whilst strictly speaking it is allowed, it
21997 does mean that the resulting code might be very confusing to the reader.
21998 Also this warning can be triggered if the user omits an operand before
21999 an immediate address, eg:
22000
22001 LDR =foo
22002
22003 GAS treats this as an assignment of the value of the symbol foo to a
22004 symbol LDR, and so (without this code) it will not issue any kind of
22005 warning or error message.
22006
22007 Note - ARM instructions are case-insensitive but the strings in the hash
22008 table are all stored in lower case, so we must first ensure that name is
22009 lower case too. */
22010 if (flag_warn_syms && arm_ops_hsh)
22011 {
22012 char * nbuf = strdup (name);
22013 char * p;
22014
22015 for (p = nbuf; *p; p++)
22016 *p = TOLOWER (*p);
22017 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22018 {
22019 static struct hash_control * already_warned = NULL;
22020
22021 if (already_warned == NULL)
22022 already_warned = hash_new ();
22023 /* Only warn about the symbol once. To keep the code
22024 simple we let hash_insert do the lookup for us. */
22025 if (hash_insert (already_warned, name, NULL) == NULL)
22026 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22027 }
22028 else
22029 free (nbuf);
22030 }
22031
22032 return FALSE;
22033 }
22034
22035 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22036 Otherwise we have no need to default values of symbols. */
22037
22038 symbolS *
22039 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22040 {
22041 #ifdef OBJ_ELF
22042 if (name[0] == '_' && name[1] == 'G'
22043 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22044 {
22045 if (!GOT_symbol)
22046 {
22047 if (symbol_find (name))
22048 as_bad (_("GOT already in the symbol table"));
22049
22050 GOT_symbol = symbol_new (name, undefined_section,
22051 (valueT) 0, & zero_address_frag);
22052 }
22053
22054 return GOT_symbol;
22055 }
22056 #endif
22057
22058 return NULL;
22059 }
22060
22061 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22062 computed as two separate immediate values, added together. We
22063 already know that this value cannot be computed by just one ARM
22064 instruction. */
22065
22066 static unsigned int
22067 validate_immediate_twopart (unsigned int val,
22068 unsigned int * highpart)
22069 {
22070 unsigned int a;
22071 unsigned int i;
22072
22073 for (i = 0; i < 32; i += 2)
22074 if (((a = rotate_left (val, i)) & 0xff) != 0)
22075 {
22076 if (a & 0xff00)
22077 {
22078 if (a & ~ 0xffff)
22079 continue;
22080 * highpart = (a >> 8) | ((i + 24) << 7);
22081 }
22082 else if (a & 0xff0000)
22083 {
22084 if (a & 0xff000000)
22085 continue;
22086 * highpart = (a >> 16) | ((i + 16) << 7);
22087 }
22088 else
22089 {
22090 gas_assert (a & 0xff000000);
22091 * highpart = (a >> 24) | ((i + 8) << 7);
22092 }
22093
22094 return (a & 0xff) | (i << 7);
22095 }
22096
22097 return FAIL;
22098 }
22099
22100 static int
22101 validate_offset_imm (unsigned int val, int hwse)
22102 {
22103 if ((hwse && val > 255) || val > 4095)
22104 return FAIL;
22105 return val;
22106 }
22107
22108 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22109 negative immediate constant by altering the instruction. A bit of
22110 a hack really.
22111 MOV <-> MVN
22112 AND <-> BIC
22113 ADC <-> SBC
22114 by inverting the second operand, and
22115 ADD <-> SUB
22116 CMP <-> CMN
22117 by negating the second operand. */
22118
22119 static int
22120 negate_data_op (unsigned long * instruction,
22121 unsigned long value)
22122 {
22123 int op, new_inst;
22124 unsigned long negated, inverted;
22125
22126 negated = encode_arm_immediate (-value);
22127 inverted = encode_arm_immediate (~value);
22128
22129 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22130 switch (op)
22131 {
22132 /* First negates. */
22133 case OPCODE_SUB: /* ADD <-> SUB */
22134 new_inst = OPCODE_ADD;
22135 value = negated;
22136 break;
22137
22138 case OPCODE_ADD:
22139 new_inst = OPCODE_SUB;
22140 value = negated;
22141 break;
22142
22143 case OPCODE_CMP: /* CMP <-> CMN */
22144 new_inst = OPCODE_CMN;
22145 value = negated;
22146 break;
22147
22148 case OPCODE_CMN:
22149 new_inst = OPCODE_CMP;
22150 value = negated;
22151 break;
22152
22153 /* Now Inverted ops. */
22154 case OPCODE_MOV: /* MOV <-> MVN */
22155 new_inst = OPCODE_MVN;
22156 value = inverted;
22157 break;
22158
22159 case OPCODE_MVN:
22160 new_inst = OPCODE_MOV;
22161 value = inverted;
22162 break;
22163
22164 case OPCODE_AND: /* AND <-> BIC */
22165 new_inst = OPCODE_BIC;
22166 value = inverted;
22167 break;
22168
22169 case OPCODE_BIC:
22170 new_inst = OPCODE_AND;
22171 value = inverted;
22172 break;
22173
22174 case OPCODE_ADC: /* ADC <-> SBC */
22175 new_inst = OPCODE_SBC;
22176 value = inverted;
22177 break;
22178
22179 case OPCODE_SBC:
22180 new_inst = OPCODE_ADC;
22181 value = inverted;
22182 break;
22183
22184 /* We cannot do anything. */
22185 default:
22186 return FAIL;
22187 }
22188
22189 if (value == (unsigned) FAIL)
22190 return FAIL;
22191
22192 *instruction &= OPCODE_MASK;
22193 *instruction |= new_inst << DATA_OP_SHIFT;
22194 return value;
22195 }
22196
22197 /* Like negate_data_op, but for Thumb-2. */
22198
22199 static unsigned int
22200 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
22201 {
22202 int op, new_inst;
22203 int rd;
22204 unsigned int negated, inverted;
22205
22206 negated = encode_thumb32_immediate (-value);
22207 inverted = encode_thumb32_immediate (~value);
22208
22209 rd = (*instruction >> 8) & 0xf;
22210 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
22211 switch (op)
22212 {
22213 /* ADD <-> SUB. Includes CMP <-> CMN. */
22214 case T2_OPCODE_SUB:
22215 new_inst = T2_OPCODE_ADD;
22216 value = negated;
22217 break;
22218
22219 case T2_OPCODE_ADD:
22220 new_inst = T2_OPCODE_SUB;
22221 value = negated;
22222 break;
22223
22224 /* ORR <-> ORN. Includes MOV <-> MVN. */
22225 case T2_OPCODE_ORR:
22226 new_inst = T2_OPCODE_ORN;
22227 value = inverted;
22228 break;
22229
22230 case T2_OPCODE_ORN:
22231 new_inst = T2_OPCODE_ORR;
22232 value = inverted;
22233 break;
22234
22235 /* AND <-> BIC. TST has no inverted equivalent. */
22236 case T2_OPCODE_AND:
22237 new_inst = T2_OPCODE_BIC;
22238 if (rd == 15)
22239 value = FAIL;
22240 else
22241 value = inverted;
22242 break;
22243
22244 case T2_OPCODE_BIC:
22245 new_inst = T2_OPCODE_AND;
22246 value = inverted;
22247 break;
22248
22249 /* ADC <-> SBC */
22250 case T2_OPCODE_ADC:
22251 new_inst = T2_OPCODE_SBC;
22252 value = inverted;
22253 break;
22254
22255 case T2_OPCODE_SBC:
22256 new_inst = T2_OPCODE_ADC;
22257 value = inverted;
22258 break;
22259
22260 /* We cannot do anything. */
22261 default:
22262 return FAIL;
22263 }
22264
22265 if (value == (unsigned int)FAIL)
22266 return FAIL;
22267
22268 *instruction &= T2_OPCODE_MASK;
22269 *instruction |= new_inst << T2_DATA_OP_SHIFT;
22270 return value;
22271 }
22272
22273 /* Read a 32-bit thumb instruction from buf. */
22274 static unsigned long
22275 get_thumb32_insn (char * buf)
22276 {
22277 unsigned long insn;
22278 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
22279 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22280
22281 return insn;
22282 }
22283
22284
22285 /* We usually want to set the low bit on the address of thumb function
22286 symbols. In particular .word foo - . should have the low bit set.
22287 Generic code tries to fold the difference of two symbols to
22288 a constant. Prevent this and force a relocation when the first symbols
22289 is a thumb function. */
22290
22291 bfd_boolean
22292 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
22293 {
22294 if (op == O_subtract
22295 && l->X_op == O_symbol
22296 && r->X_op == O_symbol
22297 && THUMB_IS_FUNC (l->X_add_symbol))
22298 {
22299 l->X_op = O_subtract;
22300 l->X_op_symbol = r->X_add_symbol;
22301 l->X_add_number -= r->X_add_number;
22302 return TRUE;
22303 }
22304
22305 /* Process as normal. */
22306 return FALSE;
22307 }
22308
22309 /* Encode Thumb2 unconditional branches and calls. The encoding
22310 for the 2 are identical for the immediate values. */
22311
22312 static void
22313 encode_thumb2_b_bl_offset (char * buf, offsetT value)
22314 {
22315 #define T2I1I2MASK ((1 << 13) | (1 << 11))
22316 offsetT newval;
22317 offsetT newval2;
22318 addressT S, I1, I2, lo, hi;
22319
22320 S = (value >> 24) & 0x01;
22321 I1 = (value >> 23) & 0x01;
22322 I2 = (value >> 22) & 0x01;
22323 hi = (value >> 12) & 0x3ff;
22324 lo = (value >> 1) & 0x7ff;
22325 newval = md_chars_to_number (buf, THUMB_SIZE);
22326 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22327 newval |= (S << 10) | hi;
22328 newval2 &= ~T2I1I2MASK;
22329 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
22330 md_number_to_chars (buf, newval, THUMB_SIZE);
22331 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22332 }
22333
22334 void
22335 md_apply_fix (fixS * fixP,
22336 valueT * valP,
22337 segT seg)
22338 {
22339 offsetT value = * valP;
22340 offsetT newval;
22341 unsigned int newimm;
22342 unsigned long temp;
22343 int sign;
22344 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
22345
22346 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
22347
22348 /* Note whether this will delete the relocation. */
22349
22350 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
22351 fixP->fx_done = 1;
22352
22353 /* On a 64-bit host, silently truncate 'value' to 32 bits for
22354 consistency with the behaviour on 32-bit hosts. Remember value
22355 for emit_reloc. */
22356 value &= 0xffffffff;
22357 value ^= 0x80000000;
22358 value -= 0x80000000;
22359
22360 *valP = value;
22361 fixP->fx_addnumber = value;
22362
22363 /* Same treatment for fixP->fx_offset. */
22364 fixP->fx_offset &= 0xffffffff;
22365 fixP->fx_offset ^= 0x80000000;
22366 fixP->fx_offset -= 0x80000000;
22367
22368 switch (fixP->fx_r_type)
22369 {
22370 case BFD_RELOC_NONE:
22371 /* This will need to go in the object file. */
22372 fixP->fx_done = 0;
22373 break;
22374
22375 case BFD_RELOC_ARM_IMMEDIATE:
22376 /* We claim that this fixup has been processed here,
22377 even if in fact we generate an error because we do
22378 not have a reloc for it, so tc_gen_reloc will reject it. */
22379 fixP->fx_done = 1;
22380
22381 if (fixP->fx_addsy)
22382 {
22383 const char *msg = 0;
22384
22385 if (! S_IS_DEFINED (fixP->fx_addsy))
22386 msg = _("undefined symbol %s used as an immediate value");
22387 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22388 msg = _("symbol %s is in a different section");
22389 else if (S_IS_WEAK (fixP->fx_addsy))
22390 msg = _("symbol %s is weak and may be overridden later");
22391
22392 if (msg)
22393 {
22394 as_bad_where (fixP->fx_file, fixP->fx_line,
22395 msg, S_GET_NAME (fixP->fx_addsy));
22396 break;
22397 }
22398 }
22399
22400 temp = md_chars_to_number (buf, INSN_SIZE);
22401
22402 /* If the offset is negative, we should use encoding A2 for ADR. */
22403 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
22404 newimm = negate_data_op (&temp, value);
22405 else
22406 {
22407 newimm = encode_arm_immediate (value);
22408
22409 /* If the instruction will fail, see if we can fix things up by
22410 changing the opcode. */
22411 if (newimm == (unsigned int) FAIL)
22412 newimm = negate_data_op (&temp, value);
22413 }
22414
22415 if (newimm == (unsigned int) FAIL)
22416 {
22417 as_bad_where (fixP->fx_file, fixP->fx_line,
22418 _("invalid constant (%lx) after fixup"),
22419 (unsigned long) value);
22420 break;
22421 }
22422
22423 newimm |= (temp & 0xfffff000);
22424 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22425 break;
22426
22427 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22428 {
22429 unsigned int highpart = 0;
22430 unsigned int newinsn = 0xe1a00000; /* nop. */
22431
22432 if (fixP->fx_addsy)
22433 {
22434 const char *msg = 0;
22435
22436 if (! S_IS_DEFINED (fixP->fx_addsy))
22437 msg = _("undefined symbol %s used as an immediate value");
22438 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
22439 msg = _("symbol %s is in a different section");
22440 else if (S_IS_WEAK (fixP->fx_addsy))
22441 msg = _("symbol %s is weak and may be overridden later");
22442
22443 if (msg)
22444 {
22445 as_bad_where (fixP->fx_file, fixP->fx_line,
22446 msg, S_GET_NAME (fixP->fx_addsy));
22447 break;
22448 }
22449 }
22450
22451 newimm = encode_arm_immediate (value);
22452 temp = md_chars_to_number (buf, INSN_SIZE);
22453
22454 /* If the instruction will fail, see if we can fix things up by
22455 changing the opcode. */
22456 if (newimm == (unsigned int) FAIL
22457 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
22458 {
22459 /* No ? OK - try using two ADD instructions to generate
22460 the value. */
22461 newimm = validate_immediate_twopart (value, & highpart);
22462
22463 /* Yes - then make sure that the second instruction is
22464 also an add. */
22465 if (newimm != (unsigned int) FAIL)
22466 newinsn = temp;
22467 /* Still No ? Try using a negated value. */
22468 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
22469 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
22470 /* Otherwise - give up. */
22471 else
22472 {
22473 as_bad_where (fixP->fx_file, fixP->fx_line,
22474 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
22475 (long) value);
22476 break;
22477 }
22478
22479 /* Replace the first operand in the 2nd instruction (which
22480 is the PC) with the destination register. We have
22481 already added in the PC in the first instruction and we
22482 do not want to do it again. */
22483 newinsn &= ~ 0xf0000;
22484 newinsn |= ((newinsn & 0x0f000) << 4);
22485 }
22486
22487 newimm |= (temp & 0xfffff000);
22488 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22489
22490 highpart |= (newinsn & 0xfffff000);
22491 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22492 }
22493 break;
22494
22495 case BFD_RELOC_ARM_OFFSET_IMM:
22496 if (!fixP->fx_done && seg->use_rela_p)
22497 value = 0;
22498
22499 case BFD_RELOC_ARM_LITERAL:
22500 sign = value > 0;
22501
22502 if (value < 0)
22503 value = - value;
22504
22505 if (validate_offset_imm (value, 0) == FAIL)
22506 {
22507 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22508 as_bad_where (fixP->fx_file, fixP->fx_line,
22509 _("invalid literal constant: pool needs to be closer"));
22510 else
22511 as_bad_where (fixP->fx_file, fixP->fx_line,
22512 _("bad immediate value for offset (%ld)"),
22513 (long) value);
22514 break;
22515 }
22516
22517 newval = md_chars_to_number (buf, INSN_SIZE);
22518 if (value == 0)
22519 newval &= 0xfffff000;
22520 else
22521 {
22522 newval &= 0xff7ff000;
22523 newval |= value | (sign ? INDEX_UP : 0);
22524 }
22525 md_number_to_chars (buf, newval, INSN_SIZE);
22526 break;
22527
22528 case BFD_RELOC_ARM_OFFSET_IMM8:
22529 case BFD_RELOC_ARM_HWLITERAL:
22530 sign = value > 0;
22531
22532 if (value < 0)
22533 value = - value;
22534
22535 if (validate_offset_imm (value, 1) == FAIL)
22536 {
22537 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22538 as_bad_where (fixP->fx_file, fixP->fx_line,
22539 _("invalid literal constant: pool needs to be closer"));
22540 else
22541 as_bad_where (fixP->fx_file, fixP->fx_line,
22542 _("bad immediate value for 8-bit offset (%ld)"),
22543 (long) value);
22544 break;
22545 }
22546
22547 newval = md_chars_to_number (buf, INSN_SIZE);
22548 if (value == 0)
22549 newval &= 0xfffff0f0;
22550 else
22551 {
22552 newval &= 0xff7ff0f0;
22553 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22554 }
22555 md_number_to_chars (buf, newval, INSN_SIZE);
22556 break;
22557
22558 case BFD_RELOC_ARM_T32_OFFSET_U8:
22559 if (value < 0 || value > 1020 || value % 4 != 0)
22560 as_bad_where (fixP->fx_file, fixP->fx_line,
22561 _("bad immediate value for offset (%ld)"), (long) value);
22562 value /= 4;
22563
22564 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22565 newval |= value;
22566 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22567 break;
22568
22569 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22570 /* This is a complicated relocation used for all varieties of Thumb32
22571 load/store instruction with immediate offset:
22572
22573 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22574 *4, optional writeback(W)
22575 (doubleword load/store)
22576
22577 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22578 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22579 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22580 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22581 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22582
22583 Uppercase letters indicate bits that are already encoded at
22584 this point. Lowercase letters are our problem. For the
22585 second block of instructions, the secondary opcode nybble
22586 (bits 8..11) is present, and bit 23 is zero, even if this is
22587 a PC-relative operation. */
22588 newval = md_chars_to_number (buf, THUMB_SIZE);
22589 newval <<= 16;
22590 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22591
22592 if ((newval & 0xf0000000) == 0xe0000000)
22593 {
22594 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22595 if (value >= 0)
22596 newval |= (1 << 23);
22597 else
22598 value = -value;
22599 if (value % 4 != 0)
22600 {
22601 as_bad_where (fixP->fx_file, fixP->fx_line,
22602 _("offset not a multiple of 4"));
22603 break;
22604 }
22605 value /= 4;
22606 if (value > 0xff)
22607 {
22608 as_bad_where (fixP->fx_file, fixP->fx_line,
22609 _("offset out of range"));
22610 break;
22611 }
22612 newval &= ~0xff;
22613 }
22614 else if ((newval & 0x000f0000) == 0x000f0000)
22615 {
22616 /* PC-relative, 12-bit offset. */
22617 if (value >= 0)
22618 newval |= (1 << 23);
22619 else
22620 value = -value;
22621 if (value > 0xfff)
22622 {
22623 as_bad_where (fixP->fx_file, fixP->fx_line,
22624 _("offset out of range"));
22625 break;
22626 }
22627 newval &= ~0xfff;
22628 }
22629 else if ((newval & 0x00000100) == 0x00000100)
22630 {
22631 /* Writeback: 8-bit, +/- offset. */
22632 if (value >= 0)
22633 newval |= (1 << 9);
22634 else
22635 value = -value;
22636 if (value > 0xff)
22637 {
22638 as_bad_where (fixP->fx_file, fixP->fx_line,
22639 _("offset out of range"));
22640 break;
22641 }
22642 newval &= ~0xff;
22643 }
22644 else if ((newval & 0x00000f00) == 0x00000e00)
22645 {
22646 /* T-instruction: positive 8-bit offset. */
22647 if (value < 0 || value > 0xff)
22648 {
22649 as_bad_where (fixP->fx_file, fixP->fx_line,
22650 _("offset out of range"));
22651 break;
22652 }
22653 newval &= ~0xff;
22654 newval |= value;
22655 }
22656 else
22657 {
22658 /* Positive 12-bit or negative 8-bit offset. */
22659 int limit;
22660 if (value >= 0)
22661 {
22662 newval |= (1 << 23);
22663 limit = 0xfff;
22664 }
22665 else
22666 {
22667 value = -value;
22668 limit = 0xff;
22669 }
22670 if (value > limit)
22671 {
22672 as_bad_where (fixP->fx_file, fixP->fx_line,
22673 _("offset out of range"));
22674 break;
22675 }
22676 newval &= ~limit;
22677 }
22678
22679 newval |= value;
22680 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22681 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22682 break;
22683
22684 case BFD_RELOC_ARM_SHIFT_IMM:
22685 newval = md_chars_to_number (buf, INSN_SIZE);
22686 if (((unsigned long) value) > 32
22687 || (value == 32
22688 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22689 {
22690 as_bad_where (fixP->fx_file, fixP->fx_line,
22691 _("shift expression is too large"));
22692 break;
22693 }
22694
22695 if (value == 0)
22696 /* Shifts of zero must be done as lsl. */
22697 newval &= ~0x60;
22698 else if (value == 32)
22699 value = 0;
22700 newval &= 0xfffff07f;
22701 newval |= (value & 0x1f) << 7;
22702 md_number_to_chars (buf, newval, INSN_SIZE);
22703 break;
22704
22705 case BFD_RELOC_ARM_T32_IMMEDIATE:
22706 case BFD_RELOC_ARM_T32_ADD_IMM:
22707 case BFD_RELOC_ARM_T32_IMM12:
22708 case BFD_RELOC_ARM_T32_ADD_PC12:
22709 /* We claim that this fixup has been processed here,
22710 even if in fact we generate an error because we do
22711 not have a reloc for it, so tc_gen_reloc will reject it. */
22712 fixP->fx_done = 1;
22713
22714 if (fixP->fx_addsy
22715 && ! S_IS_DEFINED (fixP->fx_addsy))
22716 {
22717 as_bad_where (fixP->fx_file, fixP->fx_line,
22718 _("undefined symbol %s used as an immediate value"),
22719 S_GET_NAME (fixP->fx_addsy));
22720 break;
22721 }
22722
22723 newval = md_chars_to_number (buf, THUMB_SIZE);
22724 newval <<= 16;
22725 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22726
22727 newimm = FAIL;
22728 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22729 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22730 {
22731 newimm = encode_thumb32_immediate (value);
22732 if (newimm == (unsigned int) FAIL)
22733 newimm = thumb32_negate_data_op (&newval, value);
22734 }
22735 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22736 && newimm == (unsigned int) FAIL)
22737 {
22738 /* Turn add/sum into addw/subw. */
22739 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22740 newval = (newval & 0xfeffffff) | 0x02000000;
22741 /* No flat 12-bit imm encoding for addsw/subsw. */
22742 if ((newval & 0x00100000) == 0)
22743 {
22744 /* 12 bit immediate for addw/subw. */
22745 if (value < 0)
22746 {
22747 value = -value;
22748 newval ^= 0x00a00000;
22749 }
22750 if (value > 0xfff)
22751 newimm = (unsigned int) FAIL;
22752 else
22753 newimm = value;
22754 }
22755 }
22756
22757 if (newimm == (unsigned int)FAIL)
22758 {
22759 as_bad_where (fixP->fx_file, fixP->fx_line,
22760 _("invalid constant (%lx) after fixup"),
22761 (unsigned long) value);
22762 break;
22763 }
22764
22765 newval |= (newimm & 0x800) << 15;
22766 newval |= (newimm & 0x700) << 4;
22767 newval |= (newimm & 0x0ff);
22768
22769 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22770 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22771 break;
22772
22773 case BFD_RELOC_ARM_SMC:
22774 if (((unsigned long) value) > 0xffff)
22775 as_bad_where (fixP->fx_file, fixP->fx_line,
22776 _("invalid smc expression"));
22777 newval = md_chars_to_number (buf, INSN_SIZE);
22778 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22779 md_number_to_chars (buf, newval, INSN_SIZE);
22780 break;
22781
22782 case BFD_RELOC_ARM_HVC:
22783 if (((unsigned long) value) > 0xffff)
22784 as_bad_where (fixP->fx_file, fixP->fx_line,
22785 _("invalid hvc expression"));
22786 newval = md_chars_to_number (buf, INSN_SIZE);
22787 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22788 md_number_to_chars (buf, newval, INSN_SIZE);
22789 break;
22790
22791 case BFD_RELOC_ARM_SWI:
22792 if (fixP->tc_fix_data != 0)
22793 {
22794 if (((unsigned long) value) > 0xff)
22795 as_bad_where (fixP->fx_file, fixP->fx_line,
22796 _("invalid swi expression"));
22797 newval = md_chars_to_number (buf, THUMB_SIZE);
22798 newval |= value;
22799 md_number_to_chars (buf, newval, THUMB_SIZE);
22800 }
22801 else
22802 {
22803 if (((unsigned long) value) > 0x00ffffff)
22804 as_bad_where (fixP->fx_file, fixP->fx_line,
22805 _("invalid swi expression"));
22806 newval = md_chars_to_number (buf, INSN_SIZE);
22807 newval |= value;
22808 md_number_to_chars (buf, newval, INSN_SIZE);
22809 }
22810 break;
22811
22812 case BFD_RELOC_ARM_MULTI:
22813 if (((unsigned long) value) > 0xffff)
22814 as_bad_where (fixP->fx_file, fixP->fx_line,
22815 _("invalid expression in load/store multiple"));
22816 newval = value | md_chars_to_number (buf, INSN_SIZE);
22817 md_number_to_chars (buf, newval, INSN_SIZE);
22818 break;
22819
22820 #ifdef OBJ_ELF
22821 case BFD_RELOC_ARM_PCREL_CALL:
22822
22823 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22824 && fixP->fx_addsy
22825 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22826 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22827 && THUMB_IS_FUNC (fixP->fx_addsy))
22828 /* Flip the bl to blx. This is a simple flip
22829 bit here because we generate PCREL_CALL for
22830 unconditional bls. */
22831 {
22832 newval = md_chars_to_number (buf, INSN_SIZE);
22833 newval = newval | 0x10000000;
22834 md_number_to_chars (buf, newval, INSN_SIZE);
22835 temp = 1;
22836 fixP->fx_done = 1;
22837 }
22838 else
22839 temp = 3;
22840 goto arm_branch_common;
22841
22842 case BFD_RELOC_ARM_PCREL_JUMP:
22843 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22844 && fixP->fx_addsy
22845 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22846 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22847 && THUMB_IS_FUNC (fixP->fx_addsy))
22848 {
22849 /* This would map to a bl<cond>, b<cond>,
22850 b<always> to a Thumb function. We
22851 need to force a relocation for this particular
22852 case. */
22853 newval = md_chars_to_number (buf, INSN_SIZE);
22854 fixP->fx_done = 0;
22855 }
22856
22857 case BFD_RELOC_ARM_PLT32:
22858 #endif
22859 case BFD_RELOC_ARM_PCREL_BRANCH:
22860 temp = 3;
22861 goto arm_branch_common;
22862
22863 case BFD_RELOC_ARM_PCREL_BLX:
22864
22865 temp = 1;
22866 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22867 && fixP->fx_addsy
22868 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22869 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22870 && ARM_IS_FUNC (fixP->fx_addsy))
22871 {
22872 /* Flip the blx to a bl and warn. */
22873 const char *name = S_GET_NAME (fixP->fx_addsy);
22874 newval = 0xeb000000;
22875 as_warn_where (fixP->fx_file, fixP->fx_line,
22876 _("blx to '%s' an ARM ISA state function changed to bl"),
22877 name);
22878 md_number_to_chars (buf, newval, INSN_SIZE);
22879 temp = 3;
22880 fixP->fx_done = 1;
22881 }
22882
22883 #ifdef OBJ_ELF
22884 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22885 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22886 #endif
22887
22888 arm_branch_common:
22889 /* We are going to store value (shifted right by two) in the
22890 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22891 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22892 also be be clear. */
22893 if (value & temp)
22894 as_bad_where (fixP->fx_file, fixP->fx_line,
22895 _("misaligned branch destination"));
22896 if ((value & (offsetT)0xfe000000) != (offsetT)0
22897 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22898 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22899
22900 if (fixP->fx_done || !seg->use_rela_p)
22901 {
22902 newval = md_chars_to_number (buf, INSN_SIZE);
22903 newval |= (value >> 2) & 0x00ffffff;
22904 /* Set the H bit on BLX instructions. */
22905 if (temp == 1)
22906 {
22907 if (value & 2)
22908 newval |= 0x01000000;
22909 else
22910 newval &= ~0x01000000;
22911 }
22912 md_number_to_chars (buf, newval, INSN_SIZE);
22913 }
22914 break;
22915
22916 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22917 /* CBZ can only branch forward. */
22918
22919 /* Attempts to use CBZ to branch to the next instruction
22920 (which, strictly speaking, are prohibited) will be turned into
22921 no-ops.
22922
22923 FIXME: It may be better to remove the instruction completely and
22924 perform relaxation. */
22925 if (value == -2)
22926 {
22927 newval = md_chars_to_number (buf, THUMB_SIZE);
22928 newval = 0xbf00; /* NOP encoding T1 */
22929 md_number_to_chars (buf, newval, THUMB_SIZE);
22930 }
22931 else
22932 {
22933 if (value & ~0x7e)
22934 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22935
22936 if (fixP->fx_done || !seg->use_rela_p)
22937 {
22938 newval = md_chars_to_number (buf, THUMB_SIZE);
22939 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22940 md_number_to_chars (buf, newval, THUMB_SIZE);
22941 }
22942 }
22943 break;
22944
22945 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22946 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22947 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22948
22949 if (fixP->fx_done || !seg->use_rela_p)
22950 {
22951 newval = md_chars_to_number (buf, THUMB_SIZE);
22952 newval |= (value & 0x1ff) >> 1;
22953 md_number_to_chars (buf, newval, THUMB_SIZE);
22954 }
22955 break;
22956
22957 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22958 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22959 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22960
22961 if (fixP->fx_done || !seg->use_rela_p)
22962 {
22963 newval = md_chars_to_number (buf, THUMB_SIZE);
22964 newval |= (value & 0xfff) >> 1;
22965 md_number_to_chars (buf, newval, THUMB_SIZE);
22966 }
22967 break;
22968
22969 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22970 if (fixP->fx_addsy
22971 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22972 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22973 && ARM_IS_FUNC (fixP->fx_addsy)
22974 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22975 {
22976 /* Force a relocation for a branch 20 bits wide. */
22977 fixP->fx_done = 0;
22978 }
22979 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22980 as_bad_where (fixP->fx_file, fixP->fx_line,
22981 _("conditional branch out of range"));
22982
22983 if (fixP->fx_done || !seg->use_rela_p)
22984 {
22985 offsetT newval2;
22986 addressT S, J1, J2, lo, hi;
22987
22988 S = (value & 0x00100000) >> 20;
22989 J2 = (value & 0x00080000) >> 19;
22990 J1 = (value & 0x00040000) >> 18;
22991 hi = (value & 0x0003f000) >> 12;
22992 lo = (value & 0x00000ffe) >> 1;
22993
22994 newval = md_chars_to_number (buf, THUMB_SIZE);
22995 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22996 newval |= (S << 10) | hi;
22997 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22998 md_number_to_chars (buf, newval, THUMB_SIZE);
22999 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23000 }
23001 break;
23002
23003 case BFD_RELOC_THUMB_PCREL_BLX:
23004 /* If there is a blx from a thumb state function to
23005 another thumb function flip this to a bl and warn
23006 about it. */
23007
23008 if (fixP->fx_addsy
23009 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23010 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23011 && THUMB_IS_FUNC (fixP->fx_addsy))
23012 {
23013 const char *name = S_GET_NAME (fixP->fx_addsy);
23014 as_warn_where (fixP->fx_file, fixP->fx_line,
23015 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23016 name);
23017 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23018 newval = newval | 0x1000;
23019 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23020 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23021 fixP->fx_done = 1;
23022 }
23023
23024
23025 goto thumb_bl_common;
23026
23027 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23028 /* A bl from Thumb state ISA to an internal ARM state function
23029 is converted to a blx. */
23030 if (fixP->fx_addsy
23031 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23032 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23033 && ARM_IS_FUNC (fixP->fx_addsy)
23034 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23035 {
23036 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23037 newval = newval & ~0x1000;
23038 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23039 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23040 fixP->fx_done = 1;
23041 }
23042
23043 thumb_bl_common:
23044
23045 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23046 /* For a BLX instruction, make sure that the relocation is rounded up
23047 to a word boundary. This follows the semantics of the instruction
23048 which specifies that bit 1 of the target address will come from bit
23049 1 of the base address. */
23050 value = (value + 3) & ~ 3;
23051
23052 #ifdef OBJ_ELF
23053 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23054 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23055 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23056 #endif
23057
23058 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23059 {
23060 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23061 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23062 else if ((value & ~0x1ffffff)
23063 && ((value & ~0x1ffffff) != ~0x1ffffff))
23064 as_bad_where (fixP->fx_file, fixP->fx_line,
23065 _("Thumb2 branch out of range"));
23066 }
23067
23068 if (fixP->fx_done || !seg->use_rela_p)
23069 encode_thumb2_b_bl_offset (buf, value);
23070
23071 break;
23072
23073 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23074 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23075 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23076
23077 if (fixP->fx_done || !seg->use_rela_p)
23078 encode_thumb2_b_bl_offset (buf, value);
23079
23080 break;
23081
23082 case BFD_RELOC_8:
23083 if (fixP->fx_done || !seg->use_rela_p)
23084 *buf = value;
23085 break;
23086
23087 case BFD_RELOC_16:
23088 if (fixP->fx_done || !seg->use_rela_p)
23089 md_number_to_chars (buf, value, 2);
23090 break;
23091
23092 #ifdef OBJ_ELF
23093 case BFD_RELOC_ARM_TLS_CALL:
23094 case BFD_RELOC_ARM_THM_TLS_CALL:
23095 case BFD_RELOC_ARM_TLS_DESCSEQ:
23096 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23097 case BFD_RELOC_ARM_TLS_GOTDESC:
23098 case BFD_RELOC_ARM_TLS_GD32:
23099 case BFD_RELOC_ARM_TLS_LE32:
23100 case BFD_RELOC_ARM_TLS_IE32:
23101 case BFD_RELOC_ARM_TLS_LDM32:
23102 case BFD_RELOC_ARM_TLS_LDO32:
23103 S_SET_THREAD_LOCAL (fixP->fx_addsy);
23104 break;
23105
23106 case BFD_RELOC_ARM_GOT32:
23107 case BFD_RELOC_ARM_GOTOFF:
23108 break;
23109
23110 case BFD_RELOC_ARM_GOT_PREL:
23111 if (fixP->fx_done || !seg->use_rela_p)
23112 md_number_to_chars (buf, value, 4);
23113 break;
23114
23115 case BFD_RELOC_ARM_TARGET2:
23116 /* TARGET2 is not partial-inplace, so we need to write the
23117 addend here for REL targets, because it won't be written out
23118 during reloc processing later. */
23119 if (fixP->fx_done || !seg->use_rela_p)
23120 md_number_to_chars (buf, fixP->fx_offset, 4);
23121 break;
23122 #endif
23123
23124 case BFD_RELOC_RVA:
23125 case BFD_RELOC_32:
23126 case BFD_RELOC_ARM_TARGET1:
23127 case BFD_RELOC_ARM_ROSEGREL32:
23128 case BFD_RELOC_ARM_SBREL32:
23129 case BFD_RELOC_32_PCREL:
23130 #ifdef TE_PE
23131 case BFD_RELOC_32_SECREL:
23132 #endif
23133 if (fixP->fx_done || !seg->use_rela_p)
23134 #ifdef TE_WINCE
23135 /* For WinCE we only do this for pcrel fixups. */
23136 if (fixP->fx_done || fixP->fx_pcrel)
23137 #endif
23138 md_number_to_chars (buf, value, 4);
23139 break;
23140
23141 #ifdef OBJ_ELF
23142 case BFD_RELOC_ARM_PREL31:
23143 if (fixP->fx_done || !seg->use_rela_p)
23144 {
23145 newval = md_chars_to_number (buf, 4) & 0x80000000;
23146 if ((value ^ (value >> 1)) & 0x40000000)
23147 {
23148 as_bad_where (fixP->fx_file, fixP->fx_line,
23149 _("rel31 relocation overflow"));
23150 }
23151 newval |= value & 0x7fffffff;
23152 md_number_to_chars (buf, newval, 4);
23153 }
23154 break;
23155 #endif
23156
23157 case BFD_RELOC_ARM_CP_OFF_IMM:
23158 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
23159 if (value < -1023 || value > 1023 || (value & 3))
23160 as_bad_where (fixP->fx_file, fixP->fx_line,
23161 _("co-processor offset out of range"));
23162 cp_off_common:
23163 sign = value > 0;
23164 if (value < 0)
23165 value = -value;
23166 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23167 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23168 newval = md_chars_to_number (buf, INSN_SIZE);
23169 else
23170 newval = get_thumb32_insn (buf);
23171 if (value == 0)
23172 newval &= 0xffffff00;
23173 else
23174 {
23175 newval &= 0xff7fff00;
23176 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
23177 }
23178 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23179 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
23180 md_number_to_chars (buf, newval, INSN_SIZE);
23181 else
23182 put_thumb32_insn (buf, newval);
23183 break;
23184
23185 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
23186 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
23187 if (value < -255 || value > 255)
23188 as_bad_where (fixP->fx_file, fixP->fx_line,
23189 _("co-processor offset out of range"));
23190 value *= 4;
23191 goto cp_off_common;
23192
23193 case BFD_RELOC_ARM_THUMB_OFFSET:
23194 newval = md_chars_to_number (buf, THUMB_SIZE);
23195 /* Exactly what ranges, and where the offset is inserted depends
23196 on the type of instruction, we can establish this from the
23197 top 4 bits. */
23198 switch (newval >> 12)
23199 {
23200 case 4: /* PC load. */
23201 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
23202 forced to zero for these loads; md_pcrel_from has already
23203 compensated for this. */
23204 if (value & 3)
23205 as_bad_where (fixP->fx_file, fixP->fx_line,
23206 _("invalid offset, target not word aligned (0x%08lX)"),
23207 (((unsigned long) fixP->fx_frag->fr_address
23208 + (unsigned long) fixP->fx_where) & ~3)
23209 + (unsigned long) value);
23210
23211 if (value & ~0x3fc)
23212 as_bad_where (fixP->fx_file, fixP->fx_line,
23213 _("invalid offset, value too big (0x%08lX)"),
23214 (long) value);
23215
23216 newval |= value >> 2;
23217 break;
23218
23219 case 9: /* SP load/store. */
23220 if (value & ~0x3fc)
23221 as_bad_where (fixP->fx_file, fixP->fx_line,
23222 _("invalid offset, value too big (0x%08lX)"),
23223 (long) value);
23224 newval |= value >> 2;
23225 break;
23226
23227 case 6: /* Word load/store. */
23228 if (value & ~0x7c)
23229 as_bad_where (fixP->fx_file, fixP->fx_line,
23230 _("invalid offset, value too big (0x%08lX)"),
23231 (long) value);
23232 newval |= value << 4; /* 6 - 2. */
23233 break;
23234
23235 case 7: /* Byte load/store. */
23236 if (value & ~0x1f)
23237 as_bad_where (fixP->fx_file, fixP->fx_line,
23238 _("invalid offset, value too big (0x%08lX)"),
23239 (long) value);
23240 newval |= value << 6;
23241 break;
23242
23243 case 8: /* Halfword load/store. */
23244 if (value & ~0x3e)
23245 as_bad_where (fixP->fx_file, fixP->fx_line,
23246 _("invalid offset, value too big (0x%08lX)"),
23247 (long) value);
23248 newval |= value << 5; /* 6 - 1. */
23249 break;
23250
23251 default:
23252 as_bad_where (fixP->fx_file, fixP->fx_line,
23253 "Unable to process relocation for thumb opcode: %lx",
23254 (unsigned long) newval);
23255 break;
23256 }
23257 md_number_to_chars (buf, newval, THUMB_SIZE);
23258 break;
23259
23260 case BFD_RELOC_ARM_THUMB_ADD:
23261 /* This is a complicated relocation, since we use it for all of
23262 the following immediate relocations:
23263
23264 3bit ADD/SUB
23265 8bit ADD/SUB
23266 9bit ADD/SUB SP word-aligned
23267 10bit ADD PC/SP word-aligned
23268
23269 The type of instruction being processed is encoded in the
23270 instruction field:
23271
23272 0x8000 SUB
23273 0x00F0 Rd
23274 0x000F Rs
23275 */
23276 newval = md_chars_to_number (buf, THUMB_SIZE);
23277 {
23278 int rd = (newval >> 4) & 0xf;
23279 int rs = newval & 0xf;
23280 int subtract = !!(newval & 0x8000);
23281
23282 /* Check for HI regs, only very restricted cases allowed:
23283 Adjusting SP, and using PC or SP to get an address. */
23284 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
23285 || (rs > 7 && rs != REG_SP && rs != REG_PC))
23286 as_bad_where (fixP->fx_file, fixP->fx_line,
23287 _("invalid Hi register with immediate"));
23288
23289 /* If value is negative, choose the opposite instruction. */
23290 if (value < 0)
23291 {
23292 value = -value;
23293 subtract = !subtract;
23294 if (value < 0)
23295 as_bad_where (fixP->fx_file, fixP->fx_line,
23296 _("immediate value out of range"));
23297 }
23298
23299 if (rd == REG_SP)
23300 {
23301 if (value & ~0x1fc)
23302 as_bad_where (fixP->fx_file, fixP->fx_line,
23303 _("invalid immediate for stack address calculation"));
23304 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
23305 newval |= value >> 2;
23306 }
23307 else if (rs == REG_PC || rs == REG_SP)
23308 {
23309 /* PR gas/18541. If the addition is for a defined symbol
23310 within range of an ADR instruction then accept it. */
23311 if (subtract
23312 && value == 4
23313 && fixP->fx_addsy != NULL)
23314 {
23315 subtract = 0;
23316
23317 if (! S_IS_DEFINED (fixP->fx_addsy)
23318 || S_GET_SEGMENT (fixP->fx_addsy) != seg
23319 || S_IS_WEAK (fixP->fx_addsy))
23320 {
23321 as_bad_where (fixP->fx_file, fixP->fx_line,
23322 _("address calculation needs a strongly defined nearby symbol"));
23323 }
23324 else
23325 {
23326 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
23327
23328 /* Round up to the next 4-byte boundary. */
23329 if (v & 3)
23330 v = (v + 3) & ~ 3;
23331 else
23332 v += 4;
23333 v = S_GET_VALUE (fixP->fx_addsy) - v;
23334
23335 if (v & ~0x3fc)
23336 {
23337 as_bad_where (fixP->fx_file, fixP->fx_line,
23338 _("symbol too far away"));
23339 }
23340 else
23341 {
23342 fixP->fx_done = 1;
23343 value = v;
23344 }
23345 }
23346 }
23347
23348 if (subtract || value & ~0x3fc)
23349 as_bad_where (fixP->fx_file, fixP->fx_line,
23350 _("invalid immediate for address calculation (value = 0x%08lX)"),
23351 (unsigned long) (subtract ? - value : value));
23352 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
23353 newval |= rd << 8;
23354 newval |= value >> 2;
23355 }
23356 else if (rs == rd)
23357 {
23358 if (value & ~0xff)
23359 as_bad_where (fixP->fx_file, fixP->fx_line,
23360 _("immediate value out of range"));
23361 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
23362 newval |= (rd << 8) | value;
23363 }
23364 else
23365 {
23366 if (value & ~0x7)
23367 as_bad_where (fixP->fx_file, fixP->fx_line,
23368 _("immediate value out of range"));
23369 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
23370 newval |= rd | (rs << 3) | (value << 6);
23371 }
23372 }
23373 md_number_to_chars (buf, newval, THUMB_SIZE);
23374 break;
23375
23376 case BFD_RELOC_ARM_THUMB_IMM:
23377 newval = md_chars_to_number (buf, THUMB_SIZE);
23378 if (value < 0 || value > 255)
23379 as_bad_where (fixP->fx_file, fixP->fx_line,
23380 _("invalid immediate: %ld is out of range"),
23381 (long) value);
23382 newval |= value;
23383 md_number_to_chars (buf, newval, THUMB_SIZE);
23384 break;
23385
23386 case BFD_RELOC_ARM_THUMB_SHIFT:
23387 /* 5bit shift value (0..32). LSL cannot take 32. */
23388 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
23389 temp = newval & 0xf800;
23390 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
23391 as_bad_where (fixP->fx_file, fixP->fx_line,
23392 _("invalid shift value: %ld"), (long) value);
23393 /* Shifts of zero must be encoded as LSL. */
23394 if (value == 0)
23395 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
23396 /* Shifts of 32 are encoded as zero. */
23397 else if (value == 32)
23398 value = 0;
23399 newval |= value << 6;
23400 md_number_to_chars (buf, newval, THUMB_SIZE);
23401 break;
23402
23403 case BFD_RELOC_VTABLE_INHERIT:
23404 case BFD_RELOC_VTABLE_ENTRY:
23405 fixP->fx_done = 0;
23406 return;
23407
23408 case BFD_RELOC_ARM_MOVW:
23409 case BFD_RELOC_ARM_MOVT:
23410 case BFD_RELOC_ARM_THUMB_MOVW:
23411 case BFD_RELOC_ARM_THUMB_MOVT:
23412 if (fixP->fx_done || !seg->use_rela_p)
23413 {
23414 /* REL format relocations are limited to a 16-bit addend. */
23415 if (!fixP->fx_done)
23416 {
23417 if (value < -0x8000 || value > 0x7fff)
23418 as_bad_where (fixP->fx_file, fixP->fx_line,
23419 _("offset out of range"));
23420 }
23421 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23422 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23423 {
23424 value >>= 16;
23425 }
23426
23427 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23428 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
23429 {
23430 newval = get_thumb32_insn (buf);
23431 newval &= 0xfbf08f00;
23432 newval |= (value & 0xf000) << 4;
23433 newval |= (value & 0x0800) << 15;
23434 newval |= (value & 0x0700) << 4;
23435 newval |= (value & 0x00ff);
23436 put_thumb32_insn (buf, newval);
23437 }
23438 else
23439 {
23440 newval = md_chars_to_number (buf, 4);
23441 newval &= 0xfff0f000;
23442 newval |= value & 0x0fff;
23443 newval |= (value & 0xf000) << 4;
23444 md_number_to_chars (buf, newval, 4);
23445 }
23446 }
23447 return;
23448
23449 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23450 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23451 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23452 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23453 gas_assert (!fixP->fx_done);
23454 {
23455 bfd_vma insn;
23456 bfd_boolean is_mov;
23457 bfd_vma encoded_addend = value;
23458
23459 /* Check that addend can be encoded in instruction. */
23460 if (!seg->use_rela_p && (value < 0 || value > 255))
23461 as_bad_where (fixP->fx_file, fixP->fx_line,
23462 _("the offset 0x%08lX is not representable"),
23463 (unsigned long) encoded_addend);
23464
23465 /* Extract the instruction. */
23466 insn = md_chars_to_number (buf, THUMB_SIZE);
23467 is_mov = (insn & 0xf800) == 0x2000;
23468
23469 /* Encode insn. */
23470 if (is_mov)
23471 {
23472 if (!seg->use_rela_p)
23473 insn |= encoded_addend;
23474 }
23475 else
23476 {
23477 int rd, rs;
23478
23479 /* Extract the instruction. */
23480 /* Encoding is the following
23481 0x8000 SUB
23482 0x00F0 Rd
23483 0x000F Rs
23484 */
23485 /* The following conditions must be true :
23486 - ADD
23487 - Rd == Rs
23488 - Rd <= 7
23489 */
23490 rd = (insn >> 4) & 0xf;
23491 rs = insn & 0xf;
23492 if ((insn & 0x8000) || (rd != rs) || rd > 7)
23493 as_bad_where (fixP->fx_file, fixP->fx_line,
23494 _("Unable to process relocation for thumb opcode: %lx"),
23495 (unsigned long) insn);
23496
23497 /* Encode as ADD immediate8 thumb 1 code. */
23498 insn = 0x3000 | (rd << 8);
23499
23500 /* Place the encoded addend into the first 8 bits of the
23501 instruction. */
23502 if (!seg->use_rela_p)
23503 insn |= encoded_addend;
23504 }
23505
23506 /* Update the instruction. */
23507 md_number_to_chars (buf, insn, THUMB_SIZE);
23508 }
23509 break;
23510
23511 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23512 case BFD_RELOC_ARM_ALU_PC_G0:
23513 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23514 case BFD_RELOC_ARM_ALU_PC_G1:
23515 case BFD_RELOC_ARM_ALU_PC_G2:
23516 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23517 case BFD_RELOC_ARM_ALU_SB_G0:
23518 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23519 case BFD_RELOC_ARM_ALU_SB_G1:
23520 case BFD_RELOC_ARM_ALU_SB_G2:
23521 gas_assert (!fixP->fx_done);
23522 if (!seg->use_rela_p)
23523 {
23524 bfd_vma insn;
23525 bfd_vma encoded_addend;
23526 bfd_vma addend_abs = abs (value);
23527
23528 /* Check that the absolute value of the addend can be
23529 expressed as an 8-bit constant plus a rotation. */
23530 encoded_addend = encode_arm_immediate (addend_abs);
23531 if (encoded_addend == (unsigned int) FAIL)
23532 as_bad_where (fixP->fx_file, fixP->fx_line,
23533 _("the offset 0x%08lX is not representable"),
23534 (unsigned long) addend_abs);
23535
23536 /* Extract the instruction. */
23537 insn = md_chars_to_number (buf, INSN_SIZE);
23538
23539 /* If the addend is positive, use an ADD instruction.
23540 Otherwise use a SUB. Take care not to destroy the S bit. */
23541 insn &= 0xff1fffff;
23542 if (value < 0)
23543 insn |= 1 << 22;
23544 else
23545 insn |= 1 << 23;
23546
23547 /* Place the encoded addend into the first 12 bits of the
23548 instruction. */
23549 insn &= 0xfffff000;
23550 insn |= encoded_addend;
23551
23552 /* Update the instruction. */
23553 md_number_to_chars (buf, insn, INSN_SIZE);
23554 }
23555 break;
23556
23557 case BFD_RELOC_ARM_LDR_PC_G0:
23558 case BFD_RELOC_ARM_LDR_PC_G1:
23559 case BFD_RELOC_ARM_LDR_PC_G2:
23560 case BFD_RELOC_ARM_LDR_SB_G0:
23561 case BFD_RELOC_ARM_LDR_SB_G1:
23562 case BFD_RELOC_ARM_LDR_SB_G2:
23563 gas_assert (!fixP->fx_done);
23564 if (!seg->use_rela_p)
23565 {
23566 bfd_vma insn;
23567 bfd_vma addend_abs = abs (value);
23568
23569 /* Check that the absolute value of the addend can be
23570 encoded in 12 bits. */
23571 if (addend_abs >= 0x1000)
23572 as_bad_where (fixP->fx_file, fixP->fx_line,
23573 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
23574 (unsigned long) addend_abs);
23575
23576 /* Extract the instruction. */
23577 insn = md_chars_to_number (buf, INSN_SIZE);
23578
23579 /* If the addend is negative, clear bit 23 of the instruction.
23580 Otherwise set it. */
23581 if (value < 0)
23582 insn &= ~(1 << 23);
23583 else
23584 insn |= 1 << 23;
23585
23586 /* Place the absolute value of the addend into the first 12 bits
23587 of the instruction. */
23588 insn &= 0xfffff000;
23589 insn |= addend_abs;
23590
23591 /* Update the instruction. */
23592 md_number_to_chars (buf, insn, INSN_SIZE);
23593 }
23594 break;
23595
23596 case BFD_RELOC_ARM_LDRS_PC_G0:
23597 case BFD_RELOC_ARM_LDRS_PC_G1:
23598 case BFD_RELOC_ARM_LDRS_PC_G2:
23599 case BFD_RELOC_ARM_LDRS_SB_G0:
23600 case BFD_RELOC_ARM_LDRS_SB_G1:
23601 case BFD_RELOC_ARM_LDRS_SB_G2:
23602 gas_assert (!fixP->fx_done);
23603 if (!seg->use_rela_p)
23604 {
23605 bfd_vma insn;
23606 bfd_vma addend_abs = abs (value);
23607
23608 /* Check that the absolute value of the addend can be
23609 encoded in 8 bits. */
23610 if (addend_abs >= 0x100)
23611 as_bad_where (fixP->fx_file, fixP->fx_line,
23612 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23613 (unsigned long) addend_abs);
23614
23615 /* Extract the instruction. */
23616 insn = md_chars_to_number (buf, INSN_SIZE);
23617
23618 /* If the addend is negative, clear bit 23 of the instruction.
23619 Otherwise set it. */
23620 if (value < 0)
23621 insn &= ~(1 << 23);
23622 else
23623 insn |= 1 << 23;
23624
23625 /* Place the first four bits of the absolute value of the addend
23626 into the first 4 bits of the instruction, and the remaining
23627 four into bits 8 .. 11. */
23628 insn &= 0xfffff0f0;
23629 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23630
23631 /* Update the instruction. */
23632 md_number_to_chars (buf, insn, INSN_SIZE);
23633 }
23634 break;
23635
23636 case BFD_RELOC_ARM_LDC_PC_G0:
23637 case BFD_RELOC_ARM_LDC_PC_G1:
23638 case BFD_RELOC_ARM_LDC_PC_G2:
23639 case BFD_RELOC_ARM_LDC_SB_G0:
23640 case BFD_RELOC_ARM_LDC_SB_G1:
23641 case BFD_RELOC_ARM_LDC_SB_G2:
23642 gas_assert (!fixP->fx_done);
23643 if (!seg->use_rela_p)
23644 {
23645 bfd_vma insn;
23646 bfd_vma addend_abs = abs (value);
23647
23648 /* Check that the absolute value of the addend is a multiple of
23649 four and, when divided by four, fits in 8 bits. */
23650 if (addend_abs & 0x3)
23651 as_bad_where (fixP->fx_file, fixP->fx_line,
23652 _("bad offset 0x%08lX (must be word-aligned)"),
23653 (unsigned long) addend_abs);
23654
23655 if ((addend_abs >> 2) > 0xff)
23656 as_bad_where (fixP->fx_file, fixP->fx_line,
23657 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23658 (unsigned long) addend_abs);
23659
23660 /* Extract the instruction. */
23661 insn = md_chars_to_number (buf, INSN_SIZE);
23662
23663 /* If the addend is negative, clear bit 23 of the instruction.
23664 Otherwise set it. */
23665 if (value < 0)
23666 insn &= ~(1 << 23);
23667 else
23668 insn |= 1 << 23;
23669
23670 /* Place the addend (divided by four) into the first eight
23671 bits of the instruction. */
23672 insn &= 0xfffffff0;
23673 insn |= addend_abs >> 2;
23674
23675 /* Update the instruction. */
23676 md_number_to_chars (buf, insn, INSN_SIZE);
23677 }
23678 break;
23679
23680 case BFD_RELOC_ARM_V4BX:
23681 /* This will need to go in the object file. */
23682 fixP->fx_done = 0;
23683 break;
23684
23685 case BFD_RELOC_UNUSED:
23686 default:
23687 as_bad_where (fixP->fx_file, fixP->fx_line,
23688 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23689 }
23690 }
23691
23692 /* Translate internal representation of relocation info to BFD target
23693 format. */
23694
23695 arelent *
23696 tc_gen_reloc (asection *section, fixS *fixp)
23697 {
23698 arelent * reloc;
23699 bfd_reloc_code_real_type code;
23700
23701 reloc = (arelent *) xmalloc (sizeof (arelent));
23702
23703 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23704 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23705 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23706
23707 if (fixp->fx_pcrel)
23708 {
23709 if (section->use_rela_p)
23710 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23711 else
23712 fixp->fx_offset = reloc->address;
23713 }
23714 reloc->addend = fixp->fx_offset;
23715
23716 switch (fixp->fx_r_type)
23717 {
23718 case BFD_RELOC_8:
23719 if (fixp->fx_pcrel)
23720 {
23721 code = BFD_RELOC_8_PCREL;
23722 break;
23723 }
23724
23725 case BFD_RELOC_16:
23726 if (fixp->fx_pcrel)
23727 {
23728 code = BFD_RELOC_16_PCREL;
23729 break;
23730 }
23731
23732 case BFD_RELOC_32:
23733 if (fixp->fx_pcrel)
23734 {
23735 code = BFD_RELOC_32_PCREL;
23736 break;
23737 }
23738
23739 case BFD_RELOC_ARM_MOVW:
23740 if (fixp->fx_pcrel)
23741 {
23742 code = BFD_RELOC_ARM_MOVW_PCREL;
23743 break;
23744 }
23745
23746 case BFD_RELOC_ARM_MOVT:
23747 if (fixp->fx_pcrel)
23748 {
23749 code = BFD_RELOC_ARM_MOVT_PCREL;
23750 break;
23751 }
23752
23753 case BFD_RELOC_ARM_THUMB_MOVW:
23754 if (fixp->fx_pcrel)
23755 {
23756 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23757 break;
23758 }
23759
23760 case BFD_RELOC_ARM_THUMB_MOVT:
23761 if (fixp->fx_pcrel)
23762 {
23763 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23764 break;
23765 }
23766
23767 case BFD_RELOC_NONE:
23768 case BFD_RELOC_ARM_PCREL_BRANCH:
23769 case BFD_RELOC_ARM_PCREL_BLX:
23770 case BFD_RELOC_RVA:
23771 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23772 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23773 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23774 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23775 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23776 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23777 case BFD_RELOC_VTABLE_ENTRY:
23778 case BFD_RELOC_VTABLE_INHERIT:
23779 #ifdef TE_PE
23780 case BFD_RELOC_32_SECREL:
23781 #endif
23782 code = fixp->fx_r_type;
23783 break;
23784
23785 case BFD_RELOC_THUMB_PCREL_BLX:
23786 #ifdef OBJ_ELF
23787 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23788 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23789 else
23790 #endif
23791 code = BFD_RELOC_THUMB_PCREL_BLX;
23792 break;
23793
23794 case BFD_RELOC_ARM_LITERAL:
23795 case BFD_RELOC_ARM_HWLITERAL:
23796 /* If this is called then the a literal has
23797 been referenced across a section boundary. */
23798 as_bad_where (fixp->fx_file, fixp->fx_line,
23799 _("literal referenced across section boundary"));
23800 return NULL;
23801
23802 #ifdef OBJ_ELF
23803 case BFD_RELOC_ARM_TLS_CALL:
23804 case BFD_RELOC_ARM_THM_TLS_CALL:
23805 case BFD_RELOC_ARM_TLS_DESCSEQ:
23806 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23807 case BFD_RELOC_ARM_GOT32:
23808 case BFD_RELOC_ARM_GOTOFF:
23809 case BFD_RELOC_ARM_GOT_PREL:
23810 case BFD_RELOC_ARM_PLT32:
23811 case BFD_RELOC_ARM_TARGET1:
23812 case BFD_RELOC_ARM_ROSEGREL32:
23813 case BFD_RELOC_ARM_SBREL32:
23814 case BFD_RELOC_ARM_PREL31:
23815 case BFD_RELOC_ARM_TARGET2:
23816 case BFD_RELOC_ARM_TLS_LDO32:
23817 case BFD_RELOC_ARM_PCREL_CALL:
23818 case BFD_RELOC_ARM_PCREL_JUMP:
23819 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23820 case BFD_RELOC_ARM_ALU_PC_G0:
23821 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23822 case BFD_RELOC_ARM_ALU_PC_G1:
23823 case BFD_RELOC_ARM_ALU_PC_G2:
23824 case BFD_RELOC_ARM_LDR_PC_G0:
23825 case BFD_RELOC_ARM_LDR_PC_G1:
23826 case BFD_RELOC_ARM_LDR_PC_G2:
23827 case BFD_RELOC_ARM_LDRS_PC_G0:
23828 case BFD_RELOC_ARM_LDRS_PC_G1:
23829 case BFD_RELOC_ARM_LDRS_PC_G2:
23830 case BFD_RELOC_ARM_LDC_PC_G0:
23831 case BFD_RELOC_ARM_LDC_PC_G1:
23832 case BFD_RELOC_ARM_LDC_PC_G2:
23833 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23834 case BFD_RELOC_ARM_ALU_SB_G0:
23835 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23836 case BFD_RELOC_ARM_ALU_SB_G1:
23837 case BFD_RELOC_ARM_ALU_SB_G2:
23838 case BFD_RELOC_ARM_LDR_SB_G0:
23839 case BFD_RELOC_ARM_LDR_SB_G1:
23840 case BFD_RELOC_ARM_LDR_SB_G2:
23841 case BFD_RELOC_ARM_LDRS_SB_G0:
23842 case BFD_RELOC_ARM_LDRS_SB_G1:
23843 case BFD_RELOC_ARM_LDRS_SB_G2:
23844 case BFD_RELOC_ARM_LDC_SB_G0:
23845 case BFD_RELOC_ARM_LDC_SB_G1:
23846 case BFD_RELOC_ARM_LDC_SB_G2:
23847 case BFD_RELOC_ARM_V4BX:
23848 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
23849 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
23850 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
23851 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
23852 code = fixp->fx_r_type;
23853 break;
23854
23855 case BFD_RELOC_ARM_TLS_GOTDESC:
23856 case BFD_RELOC_ARM_TLS_GD32:
23857 case BFD_RELOC_ARM_TLS_LE32:
23858 case BFD_RELOC_ARM_TLS_IE32:
23859 case BFD_RELOC_ARM_TLS_LDM32:
23860 /* BFD will include the symbol's address in the addend.
23861 But we don't want that, so subtract it out again here. */
23862 if (!S_IS_COMMON (fixp->fx_addsy))
23863 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23864 code = fixp->fx_r_type;
23865 break;
23866 #endif
23867
23868 case BFD_RELOC_ARM_IMMEDIATE:
23869 as_bad_where (fixp->fx_file, fixp->fx_line,
23870 _("internal relocation (type: IMMEDIATE) not fixed up"));
23871 return NULL;
23872
23873 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23874 as_bad_where (fixp->fx_file, fixp->fx_line,
23875 _("ADRL used for a symbol not defined in the same file"));
23876 return NULL;
23877
23878 case BFD_RELOC_ARM_OFFSET_IMM:
23879 if (section->use_rela_p)
23880 {
23881 code = fixp->fx_r_type;
23882 break;
23883 }
23884
23885 if (fixp->fx_addsy != NULL
23886 && !S_IS_DEFINED (fixp->fx_addsy)
23887 && S_IS_LOCAL (fixp->fx_addsy))
23888 {
23889 as_bad_where (fixp->fx_file, fixp->fx_line,
23890 _("undefined local label `%s'"),
23891 S_GET_NAME (fixp->fx_addsy));
23892 return NULL;
23893 }
23894
23895 as_bad_where (fixp->fx_file, fixp->fx_line,
23896 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23897 return NULL;
23898
23899 default:
23900 {
23901 char * type;
23902
23903 switch (fixp->fx_r_type)
23904 {
23905 case BFD_RELOC_NONE: type = "NONE"; break;
23906 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23907 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23908 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23909 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23910 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23911 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23912 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23913 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23914 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23915 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23916 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23917 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23918 default: type = _("<unknown>"); break;
23919 }
23920 as_bad_where (fixp->fx_file, fixp->fx_line,
23921 _("cannot represent %s relocation in this object file format"),
23922 type);
23923 return NULL;
23924 }
23925 }
23926
23927 #ifdef OBJ_ELF
23928 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23929 && GOT_symbol
23930 && fixp->fx_addsy == GOT_symbol)
23931 {
23932 code = BFD_RELOC_ARM_GOTPC;
23933 reloc->addend = fixp->fx_offset = reloc->address;
23934 }
23935 #endif
23936
23937 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23938
23939 if (reloc->howto == NULL)
23940 {
23941 as_bad_where (fixp->fx_file, fixp->fx_line,
23942 _("cannot represent %s relocation in this object file format"),
23943 bfd_get_reloc_code_name (code));
23944 return NULL;
23945 }
23946
23947 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23948 vtable entry to be used in the relocation's section offset. */
23949 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23950 reloc->address = fixp->fx_offset;
23951
23952 return reloc;
23953 }
23954
23955 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23956
23957 void
23958 cons_fix_new_arm (fragS * frag,
23959 int where,
23960 int size,
23961 expressionS * exp,
23962 bfd_reloc_code_real_type reloc)
23963 {
23964 int pcrel = 0;
23965
23966 /* Pick a reloc.
23967 FIXME: @@ Should look at CPU word size. */
23968 switch (size)
23969 {
23970 case 1:
23971 reloc = BFD_RELOC_8;
23972 break;
23973 case 2:
23974 reloc = BFD_RELOC_16;
23975 break;
23976 case 4:
23977 default:
23978 reloc = BFD_RELOC_32;
23979 break;
23980 case 8:
23981 reloc = BFD_RELOC_64;
23982 break;
23983 }
23984
23985 #ifdef TE_PE
23986 if (exp->X_op == O_secrel)
23987 {
23988 exp->X_op = O_symbol;
23989 reloc = BFD_RELOC_32_SECREL;
23990 }
23991 #endif
23992
23993 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23994 }
23995
23996 #if defined (OBJ_COFF)
23997 void
23998 arm_validate_fix (fixS * fixP)
23999 {
24000 /* If the destination of the branch is a defined symbol which does not have
24001 the THUMB_FUNC attribute, then we must be calling a function which has
24002 the (interfacearm) attribute. We look for the Thumb entry point to that
24003 function and change the branch to refer to that function instead. */
24004 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24005 && fixP->fx_addsy != NULL
24006 && S_IS_DEFINED (fixP->fx_addsy)
24007 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24008 {
24009 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24010 }
24011 }
24012 #endif
24013
24014
24015 int
24016 arm_force_relocation (struct fix * fixp)
24017 {
24018 #if defined (OBJ_COFF) && defined (TE_PE)
24019 if (fixp->fx_r_type == BFD_RELOC_RVA)
24020 return 1;
24021 #endif
24022
24023 /* In case we have a call or a branch to a function in ARM ISA mode from
24024 a thumb function or vice-versa force the relocation. These relocations
24025 are cleared off for some cores that might have blx and simple transformations
24026 are possible. */
24027
24028 #ifdef OBJ_ELF
24029 switch (fixp->fx_r_type)
24030 {
24031 case BFD_RELOC_ARM_PCREL_JUMP:
24032 case BFD_RELOC_ARM_PCREL_CALL:
24033 case BFD_RELOC_THUMB_PCREL_BLX:
24034 if (THUMB_IS_FUNC (fixp->fx_addsy))
24035 return 1;
24036 break;
24037
24038 case BFD_RELOC_ARM_PCREL_BLX:
24039 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24040 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24041 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24042 if (ARM_IS_FUNC (fixp->fx_addsy))
24043 return 1;
24044 break;
24045
24046 default:
24047 break;
24048 }
24049 #endif
24050
24051 /* Resolve these relocations even if the symbol is extern or weak.
24052 Technically this is probably wrong due to symbol preemption.
24053 In practice these relocations do not have enough range to be useful
24054 at dynamic link time, and some code (e.g. in the Linux kernel)
24055 expects these references to be resolved. */
24056 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
24057 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
24058 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
24059 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
24060 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24061 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
24062 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
24063 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
24064 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
24065 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
24066 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
24067 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
24068 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
24069 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
24070 return 0;
24071
24072 /* Always leave these relocations for the linker. */
24073 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24074 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24075 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24076 return 1;
24077
24078 /* Always generate relocations against function symbols. */
24079 if (fixp->fx_r_type == BFD_RELOC_32
24080 && fixp->fx_addsy
24081 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
24082 return 1;
24083
24084 return generic_force_reloc (fixp);
24085 }
24086
24087 #if defined (OBJ_ELF) || defined (OBJ_COFF)
24088 /* Relocations against function names must be left unadjusted,
24089 so that the linker can use this information to generate interworking
24090 stubs. The MIPS version of this function
24091 also prevents relocations that are mips-16 specific, but I do not
24092 know why it does this.
24093
24094 FIXME:
24095 There is one other problem that ought to be addressed here, but
24096 which currently is not: Taking the address of a label (rather
24097 than a function) and then later jumping to that address. Such
24098 addresses also ought to have their bottom bit set (assuming that
24099 they reside in Thumb code), but at the moment they will not. */
24100
24101 bfd_boolean
24102 arm_fix_adjustable (fixS * fixP)
24103 {
24104 if (fixP->fx_addsy == NULL)
24105 return 1;
24106
24107 /* Preserve relocations against symbols with function type. */
24108 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
24109 return FALSE;
24110
24111 if (THUMB_IS_FUNC (fixP->fx_addsy)
24112 && fixP->fx_subsy == NULL)
24113 return FALSE;
24114
24115 /* We need the symbol name for the VTABLE entries. */
24116 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
24117 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24118 return FALSE;
24119
24120 /* Don't allow symbols to be discarded on GOT related relocs. */
24121 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
24122 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
24123 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
24124 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
24125 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
24126 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
24127 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
24128 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
24129 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
24130 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
24131 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
24132 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
24133 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
24134 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
24135 return FALSE;
24136
24137 /* Similarly for group relocations. */
24138 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
24139 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
24140 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
24141 return FALSE;
24142
24143 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
24144 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
24145 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24146 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
24147 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
24148 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24149 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
24150 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
24151 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
24152 return FALSE;
24153
24154 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
24155 offsets, so keep these symbols. */
24156 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
24157 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
24158 return FALSE;
24159
24160 return TRUE;
24161 }
24162 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
24163
24164 #ifdef OBJ_ELF
24165 const char *
24166 elf32_arm_target_format (void)
24167 {
24168 #ifdef TE_SYMBIAN
24169 return (target_big_endian
24170 ? "elf32-bigarm-symbian"
24171 : "elf32-littlearm-symbian");
24172 #elif defined (TE_VXWORKS)
24173 return (target_big_endian
24174 ? "elf32-bigarm-vxworks"
24175 : "elf32-littlearm-vxworks");
24176 #elif defined (TE_NACL)
24177 return (target_big_endian
24178 ? "elf32-bigarm-nacl"
24179 : "elf32-littlearm-nacl");
24180 #else
24181 if (target_big_endian)
24182 return "elf32-bigarm";
24183 else
24184 return "elf32-littlearm";
24185 #endif
24186 }
24187
24188 void
24189 armelf_frob_symbol (symbolS * symp,
24190 int * puntp)
24191 {
24192 elf_frob_symbol (symp, puntp);
24193 }
24194 #endif
24195
24196 /* MD interface: Finalization. */
24197
24198 void
24199 arm_cleanup (void)
24200 {
24201 literal_pool * pool;
24202
24203 /* Ensure that all the IT blocks are properly closed. */
24204 check_it_blocks_finished ();
24205
24206 for (pool = list_of_pools; pool; pool = pool->next)
24207 {
24208 /* Put it at the end of the relevant section. */
24209 subseg_set (pool->section, pool->sub_section);
24210 #ifdef OBJ_ELF
24211 arm_elf_change_section ();
24212 #endif
24213 s_ltorg (0);
24214 }
24215 }
24216
24217 #ifdef OBJ_ELF
24218 /* Remove any excess mapping symbols generated for alignment frags in
24219 SEC. We may have created a mapping symbol before a zero byte
24220 alignment; remove it if there's a mapping symbol after the
24221 alignment. */
24222 static void
24223 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
24224 void *dummy ATTRIBUTE_UNUSED)
24225 {
24226 segment_info_type *seginfo = seg_info (sec);
24227 fragS *fragp;
24228
24229 if (seginfo == NULL || seginfo->frchainP == NULL)
24230 return;
24231
24232 for (fragp = seginfo->frchainP->frch_root;
24233 fragp != NULL;
24234 fragp = fragp->fr_next)
24235 {
24236 symbolS *sym = fragp->tc_frag_data.last_map;
24237 fragS *next = fragp->fr_next;
24238
24239 /* Variable-sized frags have been converted to fixed size by
24240 this point. But if this was variable-sized to start with,
24241 there will be a fixed-size frag after it. So don't handle
24242 next == NULL. */
24243 if (sym == NULL || next == NULL)
24244 continue;
24245
24246 if (S_GET_VALUE (sym) < next->fr_address)
24247 /* Not at the end of this frag. */
24248 continue;
24249 know (S_GET_VALUE (sym) == next->fr_address);
24250
24251 do
24252 {
24253 if (next->tc_frag_data.first_map != NULL)
24254 {
24255 /* Next frag starts with a mapping symbol. Discard this
24256 one. */
24257 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24258 break;
24259 }
24260
24261 if (next->fr_next == NULL)
24262 {
24263 /* This mapping symbol is at the end of the section. Discard
24264 it. */
24265 know (next->fr_fix == 0 && next->fr_var == 0);
24266 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
24267 break;
24268 }
24269
24270 /* As long as we have empty frags without any mapping symbols,
24271 keep looking. */
24272 /* If the next frag is non-empty and does not start with a
24273 mapping symbol, then this mapping symbol is required. */
24274 if (next->fr_address != next->fr_next->fr_address)
24275 break;
24276
24277 next = next->fr_next;
24278 }
24279 while (next != NULL);
24280 }
24281 }
24282 #endif
24283
24284 /* Adjust the symbol table. This marks Thumb symbols as distinct from
24285 ARM ones. */
24286
24287 void
24288 arm_adjust_symtab (void)
24289 {
24290 #ifdef OBJ_COFF
24291 symbolS * sym;
24292
24293 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24294 {
24295 if (ARM_IS_THUMB (sym))
24296 {
24297 if (THUMB_IS_FUNC (sym))
24298 {
24299 /* Mark the symbol as a Thumb function. */
24300 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
24301 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
24302 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
24303
24304 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
24305 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
24306 else
24307 as_bad (_("%s: unexpected function type: %d"),
24308 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
24309 }
24310 else switch (S_GET_STORAGE_CLASS (sym))
24311 {
24312 case C_EXT:
24313 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
24314 break;
24315 case C_STAT:
24316 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
24317 break;
24318 case C_LABEL:
24319 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
24320 break;
24321 default:
24322 /* Do nothing. */
24323 break;
24324 }
24325 }
24326
24327 if (ARM_IS_INTERWORK (sym))
24328 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
24329 }
24330 #endif
24331 #ifdef OBJ_ELF
24332 symbolS * sym;
24333 char bind;
24334
24335 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
24336 {
24337 if (ARM_IS_THUMB (sym))
24338 {
24339 elf_symbol_type * elf_sym;
24340
24341 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
24342 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
24343
24344 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
24345 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
24346 {
24347 /* If it's a .thumb_func, declare it as so,
24348 otherwise tag label as .code 16. */
24349 if (THUMB_IS_FUNC (sym))
24350 elf_sym->internal_elf_sym.st_target_internal
24351 = ST_BRANCH_TO_THUMB;
24352 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24353 elf_sym->internal_elf_sym.st_info =
24354 ELF_ST_INFO (bind, STT_ARM_16BIT);
24355 }
24356 }
24357 }
24358
24359 /* Remove any overlapping mapping symbols generated by alignment frags. */
24360 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
24361 /* Now do generic ELF adjustments. */
24362 elf_adjust_symtab ();
24363 #endif
24364 }
24365
24366 /* MD interface: Initialization. */
24367
24368 static void
24369 set_constant_flonums (void)
24370 {
24371 int i;
24372
24373 for (i = 0; i < NUM_FLOAT_VALS; i++)
24374 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
24375 abort ();
24376 }
24377
24378 /* Auto-select Thumb mode if it's the only available instruction set for the
24379 given architecture. */
24380
24381 static void
24382 autoselect_thumb_from_cpu_variant (void)
24383 {
24384 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
24385 opcode_select (16);
24386 }
24387
24388 void
24389 md_begin (void)
24390 {
24391 unsigned mach;
24392 unsigned int i;
24393
24394 if ( (arm_ops_hsh = hash_new ()) == NULL
24395 || (arm_cond_hsh = hash_new ()) == NULL
24396 || (arm_shift_hsh = hash_new ()) == NULL
24397 || (arm_psr_hsh = hash_new ()) == NULL
24398 || (arm_v7m_psr_hsh = hash_new ()) == NULL
24399 || (arm_reg_hsh = hash_new ()) == NULL
24400 || (arm_reloc_hsh = hash_new ()) == NULL
24401 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
24402 as_fatal (_("virtual memory exhausted"));
24403
24404 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
24405 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
24406 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
24407 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
24408 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
24409 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
24410 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
24411 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
24412 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
24413 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
24414 (void *) (v7m_psrs + i));
24415 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
24416 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
24417 for (i = 0;
24418 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
24419 i++)
24420 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
24421 (void *) (barrier_opt_names + i));
24422 #ifdef OBJ_ELF
24423 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
24424 {
24425 struct reloc_entry * entry = reloc_names + i;
24426
24427 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
24428 /* This makes encode_branch() use the EABI versions of this relocation. */
24429 entry->reloc = BFD_RELOC_UNUSED;
24430
24431 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
24432 }
24433 #endif
24434
24435 set_constant_flonums ();
24436
24437 /* Set the cpu variant based on the command-line options. We prefer
24438 -mcpu= over -march= if both are set (as for GCC); and we prefer
24439 -mfpu= over any other way of setting the floating point unit.
24440 Use of legacy options with new options are faulted. */
24441 if (legacy_cpu)
24442 {
24443 if (mcpu_cpu_opt || march_cpu_opt)
24444 as_bad (_("use of old and new-style options to set CPU type"));
24445
24446 mcpu_cpu_opt = legacy_cpu;
24447 }
24448 else if (!mcpu_cpu_opt)
24449 mcpu_cpu_opt = march_cpu_opt;
24450
24451 if (legacy_fpu)
24452 {
24453 if (mfpu_opt)
24454 as_bad (_("use of old and new-style options to set FPU type"));
24455
24456 mfpu_opt = legacy_fpu;
24457 }
24458 else if (!mfpu_opt)
24459 {
24460 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
24461 || defined (TE_NetBSD) || defined (TE_VXWORKS))
24462 /* Some environments specify a default FPU. If they don't, infer it
24463 from the processor. */
24464 if (mcpu_fpu_opt)
24465 mfpu_opt = mcpu_fpu_opt;
24466 else
24467 mfpu_opt = march_fpu_opt;
24468 #else
24469 mfpu_opt = &fpu_default;
24470 #endif
24471 }
24472
24473 if (!mfpu_opt)
24474 {
24475 if (mcpu_cpu_opt != NULL)
24476 mfpu_opt = &fpu_default;
24477 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
24478 mfpu_opt = &fpu_arch_vfp_v2;
24479 else
24480 mfpu_opt = &fpu_arch_fpa;
24481 }
24482
24483 #ifdef CPU_DEFAULT
24484 if (!mcpu_cpu_opt)
24485 {
24486 mcpu_cpu_opt = &cpu_default;
24487 selected_cpu = cpu_default;
24488 }
24489 else if (no_cpu_selected ())
24490 selected_cpu = cpu_default;
24491 #else
24492 if (mcpu_cpu_opt)
24493 selected_cpu = *mcpu_cpu_opt;
24494 else
24495 mcpu_cpu_opt = &arm_arch_any;
24496 #endif
24497
24498 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24499
24500 autoselect_thumb_from_cpu_variant ();
24501
24502 arm_arch_used = thumb_arch_used = arm_arch_none;
24503
24504 #if defined OBJ_COFF || defined OBJ_ELF
24505 {
24506 unsigned int flags = 0;
24507
24508 #if defined OBJ_ELF
24509 flags = meabi_flags;
24510
24511 switch (meabi_flags)
24512 {
24513 case EF_ARM_EABI_UNKNOWN:
24514 #endif
24515 /* Set the flags in the private structure. */
24516 if (uses_apcs_26) flags |= F_APCS26;
24517 if (support_interwork) flags |= F_INTERWORK;
24518 if (uses_apcs_float) flags |= F_APCS_FLOAT;
24519 if (pic_code) flags |= F_PIC;
24520 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
24521 flags |= F_SOFT_FLOAT;
24522
24523 switch (mfloat_abi_opt)
24524 {
24525 case ARM_FLOAT_ABI_SOFT:
24526 case ARM_FLOAT_ABI_SOFTFP:
24527 flags |= F_SOFT_FLOAT;
24528 break;
24529
24530 case ARM_FLOAT_ABI_HARD:
24531 if (flags & F_SOFT_FLOAT)
24532 as_bad (_("hard-float conflicts with specified fpu"));
24533 break;
24534 }
24535
24536 /* Using pure-endian doubles (even if soft-float). */
24537 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
24538 flags |= F_VFP_FLOAT;
24539
24540 #if defined OBJ_ELF
24541 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
24542 flags |= EF_ARM_MAVERICK_FLOAT;
24543 break;
24544
24545 case EF_ARM_EABI_VER4:
24546 case EF_ARM_EABI_VER5:
24547 /* No additional flags to set. */
24548 break;
24549
24550 default:
24551 abort ();
24552 }
24553 #endif
24554 bfd_set_private_flags (stdoutput, flags);
24555
24556 /* We have run out flags in the COFF header to encode the
24557 status of ATPCS support, so instead we create a dummy,
24558 empty, debug section called .arm.atpcs. */
24559 if (atpcs)
24560 {
24561 asection * sec;
24562
24563 sec = bfd_make_section (stdoutput, ".arm.atpcs");
24564
24565 if (sec != NULL)
24566 {
24567 bfd_set_section_flags
24568 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
24569 bfd_set_section_size (stdoutput, sec, 0);
24570 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
24571 }
24572 }
24573 }
24574 #endif
24575
24576 /* Record the CPU type as well. */
24577 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
24578 mach = bfd_mach_arm_iWMMXt2;
24579 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
24580 mach = bfd_mach_arm_iWMMXt;
24581 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
24582 mach = bfd_mach_arm_XScale;
24583 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
24584 mach = bfd_mach_arm_ep9312;
24585 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
24586 mach = bfd_mach_arm_5TE;
24587 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
24588 {
24589 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24590 mach = bfd_mach_arm_5T;
24591 else
24592 mach = bfd_mach_arm_5;
24593 }
24594 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24595 {
24596 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24597 mach = bfd_mach_arm_4T;
24598 else
24599 mach = bfd_mach_arm_4;
24600 }
24601 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24602 mach = bfd_mach_arm_3M;
24603 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24604 mach = bfd_mach_arm_3;
24605 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24606 mach = bfd_mach_arm_2a;
24607 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24608 mach = bfd_mach_arm_2;
24609 else
24610 mach = bfd_mach_arm_unknown;
24611
24612 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24613 }
24614
24615 /* Command line processing. */
24616
24617 /* md_parse_option
24618 Invocation line includes a switch not recognized by the base assembler.
24619 See if it's a processor-specific option.
24620
24621 This routine is somewhat complicated by the need for backwards
24622 compatibility (since older releases of gcc can't be changed).
24623 The new options try to make the interface as compatible as
24624 possible with GCC.
24625
24626 New options (supported) are:
24627
24628 -mcpu=<cpu name> Assemble for selected processor
24629 -march=<architecture name> Assemble for selected architecture
24630 -mfpu=<fpu architecture> Assemble for selected FPU.
24631 -EB/-mbig-endian Big-endian
24632 -EL/-mlittle-endian Little-endian
24633 -k Generate PIC code
24634 -mthumb Start in Thumb mode
24635 -mthumb-interwork Code supports ARM/Thumb interworking
24636
24637 -m[no-]warn-deprecated Warn about deprecated features
24638 -m[no-]warn-syms Warn when symbols match instructions
24639
24640 For now we will also provide support for:
24641
24642 -mapcs-32 32-bit Program counter
24643 -mapcs-26 26-bit Program counter
24644 -macps-float Floats passed in FP registers
24645 -mapcs-reentrant Reentrant code
24646 -matpcs
24647 (sometime these will probably be replaced with -mapcs=<list of options>
24648 and -matpcs=<list of options>)
24649
24650 The remaining options are only supported for back-wards compatibility.
24651 Cpu variants, the arm part is optional:
24652 -m[arm]1 Currently not supported.
24653 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24654 -m[arm]3 Arm 3 processor
24655 -m[arm]6[xx], Arm 6 processors
24656 -m[arm]7[xx][t][[d]m] Arm 7 processors
24657 -m[arm]8[10] Arm 8 processors
24658 -m[arm]9[20][tdmi] Arm 9 processors
24659 -mstrongarm[110[0]] StrongARM processors
24660 -mxscale XScale processors
24661 -m[arm]v[2345[t[e]]] Arm architectures
24662 -mall All (except the ARM1)
24663 FP variants:
24664 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24665 -mfpe-old (No float load/store multiples)
24666 -mvfpxd VFP Single precision
24667 -mvfp All VFP
24668 -mno-fpu Disable all floating point instructions
24669
24670 The following CPU names are recognized:
24671 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24672 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24673 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24674 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24675 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24676 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24677 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24678
24679 */
24680
24681 const char * md_shortopts = "m:k";
24682
24683 #ifdef ARM_BI_ENDIAN
24684 #define OPTION_EB (OPTION_MD_BASE + 0)
24685 #define OPTION_EL (OPTION_MD_BASE + 1)
24686 #else
24687 #if TARGET_BYTES_BIG_ENDIAN
24688 #define OPTION_EB (OPTION_MD_BASE + 0)
24689 #else
24690 #define OPTION_EL (OPTION_MD_BASE + 1)
24691 #endif
24692 #endif
24693 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24694
24695 struct option md_longopts[] =
24696 {
24697 #ifdef OPTION_EB
24698 {"EB", no_argument, NULL, OPTION_EB},
24699 #endif
24700 #ifdef OPTION_EL
24701 {"EL", no_argument, NULL, OPTION_EL},
24702 #endif
24703 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24704 {NULL, no_argument, NULL, 0}
24705 };
24706
24707
24708 size_t md_longopts_size = sizeof (md_longopts);
24709
24710 struct arm_option_table
24711 {
24712 char *option; /* Option name to match. */
24713 char *help; /* Help information. */
24714 int *var; /* Variable to change. */
24715 int value; /* What to change it to. */
24716 char *deprecated; /* If non-null, print this message. */
24717 };
24718
24719 struct arm_option_table arm_opts[] =
24720 {
24721 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24722 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24723 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24724 &support_interwork, 1, NULL},
24725 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24726 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24727 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24728 1, NULL},
24729 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24730 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24731 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24732 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24733 NULL},
24734
24735 /* These are recognized by the assembler, but have no affect on code. */
24736 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24737 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24738
24739 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24740 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24741 &warn_on_deprecated, 0, NULL},
24742 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
24743 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
24744 {NULL, NULL, NULL, 0, NULL}
24745 };
24746
24747 struct arm_legacy_option_table
24748 {
24749 char *option; /* Option name to match. */
24750 const arm_feature_set **var; /* Variable to change. */
24751 const arm_feature_set value; /* What to change it to. */
24752 char *deprecated; /* If non-null, print this message. */
24753 };
24754
24755 const struct arm_legacy_option_table arm_legacy_opts[] =
24756 {
24757 /* DON'T add any new processors to this list -- we want the whole list
24758 to go away... Add them to the processors table instead. */
24759 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24760 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24761 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24762 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24763 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24764 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24765 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24766 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24767 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24768 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24769 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24770 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24771 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24772 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24773 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24774 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24775 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24776 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24777 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24778 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24779 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24780 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24781 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24782 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24783 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24784 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24785 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24786 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24787 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24788 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24789 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24790 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24791 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24792 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24793 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24794 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24795 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24796 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24797 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24798 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24799 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24800 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24801 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24802 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24803 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24804 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24805 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24806 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24807 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24808 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24809 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24810 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24811 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24812 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24813 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24814 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24815 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24816 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24817 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24818 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24819 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24820 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24821 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24822 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24823 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24824 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24825 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24826 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24827 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24828 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24829 N_("use -mcpu=strongarm110")},
24830 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24831 N_("use -mcpu=strongarm1100")},
24832 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24833 N_("use -mcpu=strongarm1110")},
24834 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24835 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24836 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24837
24838 /* Architecture variants -- don't add any more to this list either. */
24839 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24840 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24841 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24842 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24843 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24844 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24845 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24846 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24847 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24848 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24849 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24850 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24851 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24852 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24853 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24854 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24855 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24856 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24857
24858 /* Floating point variants -- don't add any more to this list either. */
24859 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24860 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24861 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24862 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24863 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24864
24865 {NULL, NULL, ARM_ARCH_NONE, NULL}
24866 };
24867
24868 struct arm_cpu_option_table
24869 {
24870 char *name;
24871 size_t name_len;
24872 const arm_feature_set value;
24873 /* For some CPUs we assume an FPU unless the user explicitly sets
24874 -mfpu=... */
24875 const arm_feature_set default_fpu;
24876 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24877 case. */
24878 const char *canonical_name;
24879 };
24880
24881 /* This list should, at a minimum, contain all the cpu names
24882 recognized by GCC. */
24883 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24884 static const struct arm_cpu_option_table arm_cpus[] =
24885 {
24886 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24887 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24888 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24889 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24890 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24891 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24892 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24893 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24894 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24895 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24896 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24897 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24898 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24899 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24900 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24901 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24902 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24903 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24904 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24905 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24906 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24907 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24908 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24909 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24910 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24911 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24912 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24913 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24914 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24915 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24916 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24917 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24918 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24919 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24920 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24921 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24922 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24923 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24924 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24925 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24926 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24927 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24928 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24929 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24930 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24931 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24932 /* For V5 or later processors we default to using VFP; but the user
24933 should really set the FPU type explicitly. */
24934 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24935 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24936 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24937 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24938 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24939 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24940 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24941 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24942 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24943 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24944 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24945 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24946 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24947 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24948 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24949 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24950 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24951 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24952 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24953 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24954 "ARM1026EJ-S"),
24955 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24956 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24957 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24958 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24959 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24960 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24961 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24962 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24963 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24964 "ARM1136JF-S"),
24965 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24966 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24967 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24968 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24969 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24970 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6KZ, FPU_NONE, NULL),
24971 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6KZ, FPU_ARCH_VFP_V2, NULL),
24972 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24973 FPU_NONE, "Cortex-A5"),
24974 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24975 "Cortex-A7"),
24976 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24977 ARM_FEATURE_COPROC (FPU_VFP_V3
24978 | FPU_NEON_EXT_V1),
24979 "Cortex-A8"),
24980 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24981 ARM_FEATURE_COPROC (FPU_VFP_V3
24982 | FPU_NEON_EXT_V1),
24983 "Cortex-A9"),
24984 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24985 "Cortex-A12"),
24986 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24987 "Cortex-A15"),
24988 ARM_CPU_OPT ("cortex-a17", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24989 "Cortex-A17"),
24990 ARM_CPU_OPT ("cortex-a32", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24991 "Cortex-A32"),
24992 ARM_CPU_OPT ("cortex-a35", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24993 "Cortex-A35"),
24994 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24995 "Cortex-A53"),
24996 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24997 "Cortex-A57"),
24998 ARM_CPU_OPT ("cortex-a72", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24999 "Cortex-A72"),
25000 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
25001 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
25002 "Cortex-R4F"),
25003 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
25004 FPU_NONE, "Cortex-R5"),
25005 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
25006 FPU_ARCH_VFP_V3D16,
25007 "Cortex-R7"),
25008 ARM_CPU_OPT ("cortex-m7", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M7"),
25009 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
25010 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
25011 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
25012 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
25013 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
25014 ARM_CPU_OPT ("exynos-m1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25015 "Samsung " \
25016 "Exynos M1"),
25017 ARM_CPU_OPT ("qdf24xx", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25018 "Qualcomm "
25019 "QDF24XX"),
25020
25021 /* ??? XSCALE is really an architecture. */
25022 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25023 /* ??? iwmmxt is not a processor. */
25024 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
25025 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
25026 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
25027 /* Maverick */
25028 ARM_CPU_OPT ("ep9312", ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
25029 FPU_ARCH_MAVERICK, "ARM920T"),
25030 /* Marvell processors. */
25031 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25032 | ARM_EXT_SEC,
25033 ARM_EXT2_V6T2_V8M),
25034 FPU_ARCH_VFP_V3D16, NULL),
25035 ARM_CPU_OPT ("marvell-whitney", ARM_FEATURE_CORE (ARM_AEXT_V7A | ARM_EXT_MP
25036 | ARM_EXT_SEC,
25037 ARM_EXT2_V6T2_V8M),
25038 FPU_ARCH_NEON_VFP_V4, NULL),
25039 /* APM X-Gene family. */
25040 ARM_CPU_OPT ("xgene1", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25041 "APM X-Gene 1"),
25042 ARM_CPU_OPT ("xgene2", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25043 "APM X-Gene 2"),
25044
25045 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
25046 };
25047 #undef ARM_CPU_OPT
25048
25049 struct arm_arch_option_table
25050 {
25051 char *name;
25052 size_t name_len;
25053 const arm_feature_set value;
25054 const arm_feature_set default_fpu;
25055 };
25056
25057 /* This list should, at a minimum, contain all the architecture names
25058 recognized by GCC. */
25059 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
25060 static const struct arm_arch_option_table arm_archs[] =
25061 {
25062 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
25063 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
25064 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
25065 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
25066 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
25067 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
25068 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
25069 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
25070 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
25071 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
25072 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
25073 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
25074 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
25075 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
25076 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
25077 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
25078 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
25079 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
25080 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
25081 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
25082 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
25083 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
25084 kept to preserve existing behaviour. */
25085 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25086 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
25087 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
25088 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
25089 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
25090 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
25091 kept to preserve existing behaviour. */
25092 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25093 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
25094 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
25095 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
25096 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
25097 /* The official spelling of the ARMv7 profile variants is the dashed form.
25098 Accept the non-dashed form for compatibility with old toolchains. */
25099 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25100 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
25101 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25102 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25103 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
25104 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
25105 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
25106 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
25107 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
25108 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
25109 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
25110 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
25111 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
25112 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
25113 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
25114 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
25115 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
25116 };
25117 #undef ARM_ARCH_OPT
25118
25119 /* ISA extensions in the co-processor and main instruction set space. */
25120 struct arm_option_extension_value_table
25121 {
25122 char *name;
25123 size_t name_len;
25124 const arm_feature_set merge_value;
25125 const arm_feature_set clear_value;
25126 const arm_feature_set allowed_archs;
25127 };
25128
25129 /* The following table must be in alphabetical order with a NULL last entry.
25130 */
25131 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, AA }
25132 static const struct arm_option_extension_value_table arm_extensions[] =
25133 {
25134 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
25135 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25136 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
25137 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
25138 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25139 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
25140 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25141 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25142 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
25143 ARM_ARCH_V8_2A),
25144 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25145 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
25146 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25147 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
25148 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ANY),
25149 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
25150 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ANY),
25151 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
25152 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ANY),
25153 ARM_EXT_OPT ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25154 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
25155 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A | ARM_EXT_V7R)),
25156 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
25157 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
25158 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25159 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25160 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
25161 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
25162 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
25163 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
25164 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25165 ARM_EXT_OPT ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25166 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
25167 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V7A)),
25168 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
25169 | ARM_EXT_DIV),
25170 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
25171 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
25172 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8,
25173 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
25174 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
25175 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
25176 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ANY),
25177 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE }
25178 };
25179 #undef ARM_EXT_OPT
25180
25181 /* ISA floating-point and Advanced SIMD extensions. */
25182 struct arm_option_fpu_value_table
25183 {
25184 char *name;
25185 const arm_feature_set value;
25186 };
25187
25188 /* This list should, at a minimum, contain all the fpu names
25189 recognized by GCC. */
25190 static const struct arm_option_fpu_value_table arm_fpus[] =
25191 {
25192 {"softfpa", FPU_NONE},
25193 {"fpe", FPU_ARCH_FPE},
25194 {"fpe2", FPU_ARCH_FPE},
25195 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
25196 {"fpa", FPU_ARCH_FPA},
25197 {"fpa10", FPU_ARCH_FPA},
25198 {"fpa11", FPU_ARCH_FPA},
25199 {"arm7500fe", FPU_ARCH_FPA},
25200 {"softvfp", FPU_ARCH_VFP},
25201 {"softvfp+vfp", FPU_ARCH_VFP_V2},
25202 {"vfp", FPU_ARCH_VFP_V2},
25203 {"vfp9", FPU_ARCH_VFP_V2},
25204 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
25205 {"vfp10", FPU_ARCH_VFP_V2},
25206 {"vfp10-r0", FPU_ARCH_VFP_V1},
25207 {"vfpxd", FPU_ARCH_VFP_V1xD},
25208 {"vfpv2", FPU_ARCH_VFP_V2},
25209 {"vfpv3", FPU_ARCH_VFP_V3},
25210 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
25211 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
25212 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
25213 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
25214 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
25215 {"arm1020t", FPU_ARCH_VFP_V1},
25216 {"arm1020e", FPU_ARCH_VFP_V2},
25217 {"arm1136jfs", FPU_ARCH_VFP_V2},
25218 {"arm1136jf-s", FPU_ARCH_VFP_V2},
25219 {"maverick", FPU_ARCH_MAVERICK},
25220 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
25221 {"neon-fp16", FPU_ARCH_NEON_FP16},
25222 {"vfpv4", FPU_ARCH_VFP_V4},
25223 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
25224 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
25225 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
25226 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
25227 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
25228 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
25229 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
25230 {"crypto-neon-fp-armv8",
25231 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
25232 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
25233 {"crypto-neon-fp-armv8.1",
25234 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
25235 {NULL, ARM_ARCH_NONE}
25236 };
25237
25238 struct arm_option_value_table
25239 {
25240 char *name;
25241 long value;
25242 };
25243
25244 static const struct arm_option_value_table arm_float_abis[] =
25245 {
25246 {"hard", ARM_FLOAT_ABI_HARD},
25247 {"softfp", ARM_FLOAT_ABI_SOFTFP},
25248 {"soft", ARM_FLOAT_ABI_SOFT},
25249 {NULL, 0}
25250 };
25251
25252 #ifdef OBJ_ELF
25253 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
25254 static const struct arm_option_value_table arm_eabis[] =
25255 {
25256 {"gnu", EF_ARM_EABI_UNKNOWN},
25257 {"4", EF_ARM_EABI_VER4},
25258 {"5", EF_ARM_EABI_VER5},
25259 {NULL, 0}
25260 };
25261 #endif
25262
25263 struct arm_long_option_table
25264 {
25265 char * option; /* Substring to match. */
25266 char * help; /* Help information. */
25267 int (* func) (char * subopt); /* Function to decode sub-option. */
25268 char * deprecated; /* If non-null, print this message. */
25269 };
25270
25271 static bfd_boolean
25272 arm_parse_extension (char *str, const arm_feature_set **opt_p)
25273 {
25274 arm_feature_set *ext_set = (arm_feature_set *)
25275 xmalloc (sizeof (arm_feature_set));
25276
25277 /* We insist on extensions being specified in alphabetical order, and with
25278 extensions being added before being removed. We achieve this by having
25279 the global ARM_EXTENSIONS table in alphabetical order, and using the
25280 ADDING_VALUE variable to indicate whether we are adding an extension (1)
25281 or removing it (0) and only allowing it to change in the order
25282 -1 -> 1 -> 0. */
25283 const struct arm_option_extension_value_table * opt = NULL;
25284 int adding_value = -1;
25285
25286 /* Copy the feature set, so that we can modify it. */
25287 *ext_set = **opt_p;
25288 *opt_p = ext_set;
25289
25290 while (str != NULL && *str != 0)
25291 {
25292 char *ext;
25293 size_t len;
25294
25295 if (*str != '+')
25296 {
25297 as_bad (_("invalid architectural extension"));
25298 return FALSE;
25299 }
25300
25301 str++;
25302 ext = strchr (str, '+');
25303
25304 if (ext != NULL)
25305 len = ext - str;
25306 else
25307 len = strlen (str);
25308
25309 if (len >= 2 && strncmp (str, "no", 2) == 0)
25310 {
25311 if (adding_value != 0)
25312 {
25313 adding_value = 0;
25314 opt = arm_extensions;
25315 }
25316
25317 len -= 2;
25318 str += 2;
25319 }
25320 else if (len > 0)
25321 {
25322 if (adding_value == -1)
25323 {
25324 adding_value = 1;
25325 opt = arm_extensions;
25326 }
25327 else if (adding_value != 1)
25328 {
25329 as_bad (_("must specify extensions to add before specifying "
25330 "those to remove"));
25331 return FALSE;
25332 }
25333 }
25334
25335 if (len == 0)
25336 {
25337 as_bad (_("missing architectural extension"));
25338 return FALSE;
25339 }
25340
25341 gas_assert (adding_value != -1);
25342 gas_assert (opt != NULL);
25343
25344 /* Scan over the options table trying to find an exact match. */
25345 for (; opt->name != NULL; opt++)
25346 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25347 {
25348 /* Check we can apply the extension to this architecture. */
25349 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
25350 {
25351 as_bad (_("extension does not apply to the base architecture"));
25352 return FALSE;
25353 }
25354
25355 /* Add or remove the extension. */
25356 if (adding_value)
25357 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
25358 else
25359 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
25360
25361 break;
25362 }
25363
25364 if (opt->name == NULL)
25365 {
25366 /* Did we fail to find an extension because it wasn't specified in
25367 alphabetical order, or because it does not exist? */
25368
25369 for (opt = arm_extensions; opt->name != NULL; opt++)
25370 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25371 break;
25372
25373 if (opt->name == NULL)
25374 as_bad (_("unknown architectural extension `%s'"), str);
25375 else
25376 as_bad (_("architectural extensions must be specified in "
25377 "alphabetical order"));
25378
25379 return FALSE;
25380 }
25381 else
25382 {
25383 /* We should skip the extension we've just matched the next time
25384 round. */
25385 opt++;
25386 }
25387
25388 str = ext;
25389 };
25390
25391 return TRUE;
25392 }
25393
25394 static bfd_boolean
25395 arm_parse_cpu (char *str)
25396 {
25397 const struct arm_cpu_option_table *opt;
25398 char *ext = strchr (str, '+');
25399 size_t len;
25400
25401 if (ext != NULL)
25402 len = ext - str;
25403 else
25404 len = strlen (str);
25405
25406 if (len == 0)
25407 {
25408 as_bad (_("missing cpu name `%s'"), str);
25409 return FALSE;
25410 }
25411
25412 for (opt = arm_cpus; opt->name != NULL; opt++)
25413 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25414 {
25415 mcpu_cpu_opt = &opt->value;
25416 mcpu_fpu_opt = &opt->default_fpu;
25417 if (opt->canonical_name)
25418 {
25419 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
25420 strcpy (selected_cpu_name, opt->canonical_name);
25421 }
25422 else
25423 {
25424 size_t i;
25425
25426 if (len >= sizeof selected_cpu_name)
25427 len = (sizeof selected_cpu_name) - 1;
25428
25429 for (i = 0; i < len; i++)
25430 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25431 selected_cpu_name[i] = 0;
25432 }
25433
25434 if (ext != NULL)
25435 return arm_parse_extension (ext, &mcpu_cpu_opt);
25436
25437 return TRUE;
25438 }
25439
25440 as_bad (_("unknown cpu `%s'"), str);
25441 return FALSE;
25442 }
25443
25444 static bfd_boolean
25445 arm_parse_arch (char *str)
25446 {
25447 const struct arm_arch_option_table *opt;
25448 char *ext = strchr (str, '+');
25449 size_t len;
25450
25451 if (ext != NULL)
25452 len = ext - str;
25453 else
25454 len = strlen (str);
25455
25456 if (len == 0)
25457 {
25458 as_bad (_("missing architecture name `%s'"), str);
25459 return FALSE;
25460 }
25461
25462 for (opt = arm_archs; opt->name != NULL; opt++)
25463 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
25464 {
25465 march_cpu_opt = &opt->value;
25466 march_fpu_opt = &opt->default_fpu;
25467 strcpy (selected_cpu_name, opt->name);
25468
25469 if (ext != NULL)
25470 return arm_parse_extension (ext, &march_cpu_opt);
25471
25472 return TRUE;
25473 }
25474
25475 as_bad (_("unknown architecture `%s'\n"), str);
25476 return FALSE;
25477 }
25478
25479 static bfd_boolean
25480 arm_parse_fpu (char * str)
25481 {
25482 const struct arm_option_fpu_value_table * opt;
25483
25484 for (opt = arm_fpus; opt->name != NULL; opt++)
25485 if (streq (opt->name, str))
25486 {
25487 mfpu_opt = &opt->value;
25488 return TRUE;
25489 }
25490
25491 as_bad (_("unknown floating point format `%s'\n"), str);
25492 return FALSE;
25493 }
25494
25495 static bfd_boolean
25496 arm_parse_float_abi (char * str)
25497 {
25498 const struct arm_option_value_table * opt;
25499
25500 for (opt = arm_float_abis; opt->name != NULL; opt++)
25501 if (streq (opt->name, str))
25502 {
25503 mfloat_abi_opt = opt->value;
25504 return TRUE;
25505 }
25506
25507 as_bad (_("unknown floating point abi `%s'\n"), str);
25508 return FALSE;
25509 }
25510
25511 #ifdef OBJ_ELF
25512 static bfd_boolean
25513 arm_parse_eabi (char * str)
25514 {
25515 const struct arm_option_value_table *opt;
25516
25517 for (opt = arm_eabis; opt->name != NULL; opt++)
25518 if (streq (opt->name, str))
25519 {
25520 meabi_flags = opt->value;
25521 return TRUE;
25522 }
25523 as_bad (_("unknown EABI `%s'\n"), str);
25524 return FALSE;
25525 }
25526 #endif
25527
25528 static bfd_boolean
25529 arm_parse_it_mode (char * str)
25530 {
25531 bfd_boolean ret = TRUE;
25532
25533 if (streq ("arm", str))
25534 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
25535 else if (streq ("thumb", str))
25536 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
25537 else if (streq ("always", str))
25538 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
25539 else if (streq ("never", str))
25540 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
25541 else
25542 {
25543 as_bad (_("unknown implicit IT mode `%s', should be "\
25544 "arm, thumb, always, or never."), str);
25545 ret = FALSE;
25546 }
25547
25548 return ret;
25549 }
25550
25551 static bfd_boolean
25552 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
25553 {
25554 codecomposer_syntax = TRUE;
25555 arm_comment_chars[0] = ';';
25556 arm_line_separator_chars[0] = 0;
25557 return TRUE;
25558 }
25559
25560 struct arm_long_option_table arm_long_opts[] =
25561 {
25562 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
25563 arm_parse_cpu, NULL},
25564 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
25565 arm_parse_arch, NULL},
25566 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
25567 arm_parse_fpu, NULL},
25568 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
25569 arm_parse_float_abi, NULL},
25570 #ifdef OBJ_ELF
25571 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
25572 arm_parse_eabi, NULL},
25573 #endif
25574 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
25575 arm_parse_it_mode, NULL},
25576 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
25577 arm_ccs_mode, NULL},
25578 {NULL, NULL, 0, NULL}
25579 };
25580
25581 int
25582 md_parse_option (int c, char * arg)
25583 {
25584 struct arm_option_table *opt;
25585 const struct arm_legacy_option_table *fopt;
25586 struct arm_long_option_table *lopt;
25587
25588 switch (c)
25589 {
25590 #ifdef OPTION_EB
25591 case OPTION_EB:
25592 target_big_endian = 1;
25593 break;
25594 #endif
25595
25596 #ifdef OPTION_EL
25597 case OPTION_EL:
25598 target_big_endian = 0;
25599 break;
25600 #endif
25601
25602 case OPTION_FIX_V4BX:
25603 fix_v4bx = TRUE;
25604 break;
25605
25606 case 'a':
25607 /* Listing option. Just ignore these, we don't support additional
25608 ones. */
25609 return 0;
25610
25611 default:
25612 for (opt = arm_opts; opt->option != NULL; opt++)
25613 {
25614 if (c == opt->option[0]
25615 && ((arg == NULL && opt->option[1] == 0)
25616 || streq (arg, opt->option + 1)))
25617 {
25618 /* If the option is deprecated, tell the user. */
25619 if (warn_on_deprecated && opt->deprecated != NULL)
25620 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25621 arg ? arg : "", _(opt->deprecated));
25622
25623 if (opt->var != NULL)
25624 *opt->var = opt->value;
25625
25626 return 1;
25627 }
25628 }
25629
25630 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
25631 {
25632 if (c == fopt->option[0]
25633 && ((arg == NULL && fopt->option[1] == 0)
25634 || streq (arg, fopt->option + 1)))
25635 {
25636 /* If the option is deprecated, tell the user. */
25637 if (warn_on_deprecated && fopt->deprecated != NULL)
25638 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
25639 arg ? arg : "", _(fopt->deprecated));
25640
25641 if (fopt->var != NULL)
25642 *fopt->var = &fopt->value;
25643
25644 return 1;
25645 }
25646 }
25647
25648 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25649 {
25650 /* These options are expected to have an argument. */
25651 if (c == lopt->option[0]
25652 && arg != NULL
25653 && strncmp (arg, lopt->option + 1,
25654 strlen (lopt->option + 1)) == 0)
25655 {
25656 /* If the option is deprecated, tell the user. */
25657 if (warn_on_deprecated && lopt->deprecated != NULL)
25658 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
25659 _(lopt->deprecated));
25660
25661 /* Call the sup-option parser. */
25662 return lopt->func (arg + strlen (lopt->option) - 1);
25663 }
25664 }
25665
25666 return 0;
25667 }
25668
25669 return 1;
25670 }
25671
25672 void
25673 md_show_usage (FILE * fp)
25674 {
25675 struct arm_option_table *opt;
25676 struct arm_long_option_table *lopt;
25677
25678 fprintf (fp, _(" ARM-specific assembler options:\n"));
25679
25680 for (opt = arm_opts; opt->option != NULL; opt++)
25681 if (opt->help != NULL)
25682 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25683
25684 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25685 if (lopt->help != NULL)
25686 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25687
25688 #ifdef OPTION_EB
25689 fprintf (fp, _("\
25690 -EB assemble code for a big-endian cpu\n"));
25691 #endif
25692
25693 #ifdef OPTION_EL
25694 fprintf (fp, _("\
25695 -EL assemble code for a little-endian cpu\n"));
25696 #endif
25697
25698 fprintf (fp, _("\
25699 --fix-v4bx Allow BX in ARMv4 code\n"));
25700 }
25701
25702
25703 #ifdef OBJ_ELF
25704 typedef struct
25705 {
25706 int val;
25707 arm_feature_set flags;
25708 } cpu_arch_ver_table;
25709
25710 /* Mapping from CPU features to EABI CPU arch values. As a general rule, table
25711 must be sorted least features first but some reordering is needed, eg. for
25712 Thumb-2 instructions to be detected as coming from ARMv6T2. */
25713 static const cpu_arch_ver_table cpu_arch_ver[] =
25714 {
25715 {1, ARM_ARCH_V4},
25716 {2, ARM_ARCH_V4T},
25717 {3, ARM_ARCH_V5},
25718 {3, ARM_ARCH_V5T},
25719 {4, ARM_ARCH_V5TE},
25720 {5, ARM_ARCH_V5TEJ},
25721 {6, ARM_ARCH_V6},
25722 {9, ARM_ARCH_V6K},
25723 {7, ARM_ARCH_V6Z},
25724 {11, ARM_ARCH_V6M},
25725 {12, ARM_ARCH_V6SM},
25726 {8, ARM_ARCH_V6T2},
25727 {10, ARM_ARCH_V7VE},
25728 {10, ARM_ARCH_V7R},
25729 {10, ARM_ARCH_V7M},
25730 {14, ARM_ARCH_V8A},
25731 {16, ARM_ARCH_V8M_BASE},
25732 {17, ARM_ARCH_V8M_MAIN},
25733 {0, ARM_ARCH_NONE}
25734 };
25735
25736 /* Set an attribute if it has not already been set by the user. */
25737 static void
25738 aeabi_set_attribute_int (int tag, int value)
25739 {
25740 if (tag < 1
25741 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25742 || !attributes_set_explicitly[tag])
25743 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25744 }
25745
25746 static void
25747 aeabi_set_attribute_string (int tag, const char *value)
25748 {
25749 if (tag < 1
25750 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25751 || !attributes_set_explicitly[tag])
25752 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25753 }
25754
25755 /* Set the public EABI object attributes. */
25756 void
25757 aeabi_set_public_attributes (void)
25758 {
25759 int arch;
25760 char profile;
25761 int virt_sec = 0;
25762 int fp16_optional = 0;
25763 arm_feature_set flags;
25764 arm_feature_set tmp;
25765 arm_feature_set arm_arch_v8m_base = ARM_ARCH_V8M_BASE;
25766 const cpu_arch_ver_table *p;
25767
25768 /* Choose the architecture based on the capabilities of the requested cpu
25769 (if any) and/or the instructions actually used. */
25770 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25771 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25772 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25773
25774 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25775 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25776
25777 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25778 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25779
25780 selected_cpu = flags;
25781
25782 /* Allow the user to override the reported architecture. */
25783 if (object_arch)
25784 {
25785 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25786 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25787 }
25788
25789 /* We need to make sure that the attributes do not identify us as v6S-M
25790 when the only v6S-M feature in use is the Operating System Extensions. */
25791 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25792 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25793 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25794
25795 tmp = flags;
25796 arch = 0;
25797 for (p = cpu_arch_ver; p->val; p++)
25798 {
25799 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25800 {
25801 arch = p->val;
25802 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25803 }
25804 }
25805
25806 /* The table lookup above finds the last architecture to contribute
25807 a new feature. Unfortunately, Tag13 is a subset of the union of
25808 v6T2 and v7-M, so it is never seen as contributing a new feature.
25809 We can not search for the last entry which is entirely used,
25810 because if no CPU is specified we build up only those flags
25811 actually used. Perhaps we should separate out the specified
25812 and implicit cases. Avoid taking this path for -march=all by
25813 checking for contradictory v7-A / v7-M features. */
25814 if (arch == TAG_CPU_ARCH_V7
25815 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25816 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25817 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25818 arch = TAG_CPU_ARCH_V7E_M;
25819
25820 ARM_CLEAR_FEATURE (tmp, flags, arm_arch_v8m_base);
25821 if (arch == TAG_CPU_ARCH_V8M_BASE && ARM_CPU_HAS_FEATURE (tmp, arm_arch_any))
25822 arch = TAG_CPU_ARCH_V8M_MAIN;
25823
25824 /* In cpu_arch_ver ARMv8-A is before ARMv8-M for atomics to be detected as
25825 coming from ARMv8-A. However, since ARMv8-A has more instructions than
25826 ARMv8-M, -march=all must be detected as ARMv8-A. */
25827 if (arch == TAG_CPU_ARCH_V8M_MAIN
25828 && ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
25829 arch = TAG_CPU_ARCH_V8;
25830
25831 /* Tag_CPU_name. */
25832 if (selected_cpu_name[0])
25833 {
25834 char *q;
25835
25836 q = selected_cpu_name;
25837 if (strncmp (q, "armv", 4) == 0)
25838 {
25839 int i;
25840
25841 q += 4;
25842 for (i = 0; q[i]; i++)
25843 q[i] = TOUPPER (q[i]);
25844 }
25845 aeabi_set_attribute_string (Tag_CPU_name, q);
25846 }
25847
25848 /* Tag_CPU_arch. */
25849 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25850
25851 /* Tag_CPU_arch_profile. */
25852 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25853 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25854 || (ARM_CPU_HAS_FEATURE (flags, arm_ext_atomics)
25855 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m)))
25856 profile = 'A';
25857 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25858 profile = 'R';
25859 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25860 profile = 'M';
25861 else
25862 profile = '\0';
25863
25864 if (profile != '\0')
25865 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25866
25867 /* Tag_ARM_ISA_use. */
25868 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25869 || arch == 0)
25870 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25871
25872 /* Tag_THUMB_ISA_use. */
25873 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25874 || arch == 0)
25875 {
25876 int thumb_isa_use;
25877
25878 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25879 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25880 thumb_isa_use = 3;
25881 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
25882 thumb_isa_use = 2;
25883 else
25884 thumb_isa_use = 1;
25885 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
25886 }
25887
25888 /* Tag_VFP_arch. */
25889 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
25890 aeabi_set_attribute_int (Tag_VFP_arch,
25891 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25892 ? 7 : 8);
25893 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25894 aeabi_set_attribute_int (Tag_VFP_arch,
25895 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25896 ? 5 : 6);
25897 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25898 {
25899 fp16_optional = 1;
25900 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25901 }
25902 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25903 {
25904 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25905 fp16_optional = 1;
25906 }
25907 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25908 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25909 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25910 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25911 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25912
25913 /* Tag_ABI_HardFP_use. */
25914 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25915 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25916 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25917
25918 /* Tag_WMMX_arch. */
25919 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25920 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25921 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25922 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25923
25924 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25925 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25926 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25927 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25928 {
25929 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25930 {
25931 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25932 }
25933 else
25934 {
25935 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25936 fp16_optional = 1;
25937 }
25938 }
25939
25940 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25941 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25942 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25943
25944 /* Tag_DIV_use.
25945
25946 We set Tag_DIV_use to two when integer divide instructions have been used
25947 in ARM state, or when Thumb integer divide instructions have been used,
25948 but we have no architecture profile set, nor have we any ARM instructions.
25949
25950 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
25951 by the base architecture.
25952
25953 For new architectures we will have to check these tests. */
25954 gas_assert (arch <= TAG_CPU_ARCH_V8
25955 || (arch >= TAG_CPU_ARCH_V8M_BASE
25956 && arch <= TAG_CPU_ARCH_V8M_MAIN));
25957 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
25958 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
25959 aeabi_set_attribute_int (Tag_DIV_use, 0);
25960 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25961 || (profile == '\0'
25962 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25963 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25964 aeabi_set_attribute_int (Tag_DIV_use, 2);
25965
25966 /* Tag_MP_extension_use. */
25967 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25968 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25969
25970 /* Tag Virtualization_use. */
25971 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25972 virt_sec |= 1;
25973 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25974 virt_sec |= 2;
25975 if (virt_sec != 0)
25976 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25977 }
25978
25979 /* Add the default contents for the .ARM.attributes section. */
25980 void
25981 arm_md_end (void)
25982 {
25983 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25984 return;
25985
25986 aeabi_set_public_attributes ();
25987 }
25988 #endif /* OBJ_ELF */
25989
25990
25991 /* Parse a .cpu directive. */
25992
25993 static void
25994 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25995 {
25996 const struct arm_cpu_option_table *opt;
25997 char *name;
25998 char saved_char;
25999
26000 name = input_line_pointer;
26001 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26002 input_line_pointer++;
26003 saved_char = *input_line_pointer;
26004 *input_line_pointer = 0;
26005
26006 /* Skip the first "all" entry. */
26007 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
26008 if (streq (opt->name, name))
26009 {
26010 mcpu_cpu_opt = &opt->value;
26011 selected_cpu = opt->value;
26012 if (opt->canonical_name)
26013 strcpy (selected_cpu_name, opt->canonical_name);
26014 else
26015 {
26016 int i;
26017 for (i = 0; opt->name[i]; i++)
26018 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26019
26020 selected_cpu_name[i] = 0;
26021 }
26022 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26023 *input_line_pointer = saved_char;
26024 demand_empty_rest_of_line ();
26025 return;
26026 }
26027 as_bad (_("unknown cpu `%s'"), name);
26028 *input_line_pointer = saved_char;
26029 ignore_rest_of_line ();
26030 }
26031
26032
26033 /* Parse a .arch directive. */
26034
26035 static void
26036 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
26037 {
26038 const struct arm_arch_option_table *opt;
26039 char saved_char;
26040 char *name;
26041
26042 name = input_line_pointer;
26043 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26044 input_line_pointer++;
26045 saved_char = *input_line_pointer;
26046 *input_line_pointer = 0;
26047
26048 /* Skip the first "all" entry. */
26049 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26050 if (streq (opt->name, name))
26051 {
26052 mcpu_cpu_opt = &opt->value;
26053 selected_cpu = opt->value;
26054 strcpy (selected_cpu_name, opt->name);
26055 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26056 *input_line_pointer = saved_char;
26057 demand_empty_rest_of_line ();
26058 return;
26059 }
26060
26061 as_bad (_("unknown architecture `%s'\n"), name);
26062 *input_line_pointer = saved_char;
26063 ignore_rest_of_line ();
26064 }
26065
26066
26067 /* Parse a .object_arch directive. */
26068
26069 static void
26070 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
26071 {
26072 const struct arm_arch_option_table *opt;
26073 char saved_char;
26074 char *name;
26075
26076 name = input_line_pointer;
26077 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26078 input_line_pointer++;
26079 saved_char = *input_line_pointer;
26080 *input_line_pointer = 0;
26081
26082 /* Skip the first "all" entry. */
26083 for (opt = arm_archs + 1; opt->name != NULL; opt++)
26084 if (streq (opt->name, name))
26085 {
26086 object_arch = &opt->value;
26087 *input_line_pointer = saved_char;
26088 demand_empty_rest_of_line ();
26089 return;
26090 }
26091
26092 as_bad (_("unknown architecture `%s'\n"), name);
26093 *input_line_pointer = saved_char;
26094 ignore_rest_of_line ();
26095 }
26096
26097 /* Parse a .arch_extension directive. */
26098
26099 static void
26100 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
26101 {
26102 const struct arm_option_extension_value_table *opt;
26103 char saved_char;
26104 char *name;
26105 int adding_value = 1;
26106
26107 name = input_line_pointer;
26108 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26109 input_line_pointer++;
26110 saved_char = *input_line_pointer;
26111 *input_line_pointer = 0;
26112
26113 if (strlen (name) >= 2
26114 && strncmp (name, "no", 2) == 0)
26115 {
26116 adding_value = 0;
26117 name += 2;
26118 }
26119
26120 for (opt = arm_extensions; opt->name != NULL; opt++)
26121 if (streq (opt->name, name))
26122 {
26123 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
26124 {
26125 as_bad (_("architectural extension `%s' is not allowed for the "
26126 "current base architecture"), name);
26127 break;
26128 }
26129
26130 if (adding_value)
26131 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu,
26132 opt->merge_value);
26133 else
26134 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->clear_value);
26135
26136 mcpu_cpu_opt = &selected_cpu;
26137 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26138 *input_line_pointer = saved_char;
26139 demand_empty_rest_of_line ();
26140 return;
26141 }
26142
26143 if (opt->name == NULL)
26144 as_bad (_("unknown architecture extension `%s'\n"), name);
26145
26146 *input_line_pointer = saved_char;
26147 ignore_rest_of_line ();
26148 }
26149
26150 /* Parse a .fpu directive. */
26151
26152 static void
26153 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
26154 {
26155 const struct arm_option_fpu_value_table *opt;
26156 char saved_char;
26157 char *name;
26158
26159 name = input_line_pointer;
26160 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
26161 input_line_pointer++;
26162 saved_char = *input_line_pointer;
26163 *input_line_pointer = 0;
26164
26165 for (opt = arm_fpus; opt->name != NULL; opt++)
26166 if (streq (opt->name, name))
26167 {
26168 mfpu_opt = &opt->value;
26169 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
26170 *input_line_pointer = saved_char;
26171 demand_empty_rest_of_line ();
26172 return;
26173 }
26174
26175 as_bad (_("unknown floating point format `%s'\n"), name);
26176 *input_line_pointer = saved_char;
26177 ignore_rest_of_line ();
26178 }
26179
26180 /* Copy symbol information. */
26181
26182 void
26183 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
26184 {
26185 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
26186 }
26187
26188 #ifdef OBJ_ELF
26189 /* Given a symbolic attribute NAME, return the proper integer value.
26190 Returns -1 if the attribute is not known. */
26191
26192 int
26193 arm_convert_symbolic_attribute (const char *name)
26194 {
26195 static const struct
26196 {
26197 const char * name;
26198 const int tag;
26199 }
26200 attribute_table[] =
26201 {
26202 /* When you modify this table you should
26203 also modify the list in doc/c-arm.texi. */
26204 #define T(tag) {#tag, tag}
26205 T (Tag_CPU_raw_name),
26206 T (Tag_CPU_name),
26207 T (Tag_CPU_arch),
26208 T (Tag_CPU_arch_profile),
26209 T (Tag_ARM_ISA_use),
26210 T (Tag_THUMB_ISA_use),
26211 T (Tag_FP_arch),
26212 T (Tag_VFP_arch),
26213 T (Tag_WMMX_arch),
26214 T (Tag_Advanced_SIMD_arch),
26215 T (Tag_PCS_config),
26216 T (Tag_ABI_PCS_R9_use),
26217 T (Tag_ABI_PCS_RW_data),
26218 T (Tag_ABI_PCS_RO_data),
26219 T (Tag_ABI_PCS_GOT_use),
26220 T (Tag_ABI_PCS_wchar_t),
26221 T (Tag_ABI_FP_rounding),
26222 T (Tag_ABI_FP_denormal),
26223 T (Tag_ABI_FP_exceptions),
26224 T (Tag_ABI_FP_user_exceptions),
26225 T (Tag_ABI_FP_number_model),
26226 T (Tag_ABI_align_needed),
26227 T (Tag_ABI_align8_needed),
26228 T (Tag_ABI_align_preserved),
26229 T (Tag_ABI_align8_preserved),
26230 T (Tag_ABI_enum_size),
26231 T (Tag_ABI_HardFP_use),
26232 T (Tag_ABI_VFP_args),
26233 T (Tag_ABI_WMMX_args),
26234 T (Tag_ABI_optimization_goals),
26235 T (Tag_ABI_FP_optimization_goals),
26236 T (Tag_compatibility),
26237 T (Tag_CPU_unaligned_access),
26238 T (Tag_FP_HP_extension),
26239 T (Tag_VFP_HP_extension),
26240 T (Tag_ABI_FP_16bit_format),
26241 T (Tag_MPextension_use),
26242 T (Tag_DIV_use),
26243 T (Tag_nodefaults),
26244 T (Tag_also_compatible_with),
26245 T (Tag_conformance),
26246 T (Tag_T2EE_use),
26247 T (Tag_Virtualization_use),
26248 /* We deliberately do not include Tag_MPextension_use_legacy. */
26249 #undef T
26250 };
26251 unsigned int i;
26252
26253 if (name == NULL)
26254 return -1;
26255
26256 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
26257 if (streq (name, attribute_table[i].name))
26258 return attribute_table[i].tag;
26259
26260 return -1;
26261 }
26262
26263
26264 /* Apply sym value for relocations only in the case that they are for
26265 local symbols in the same segment as the fixup and you have the
26266 respective architectural feature for blx and simple switches. */
26267 int
26268 arm_apply_sym_value (struct fix * fixP, segT this_seg)
26269 {
26270 if (fixP->fx_addsy
26271 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
26272 /* PR 17444: If the local symbol is in a different section then a reloc
26273 will always be generated for it, so applying the symbol value now
26274 will result in a double offset being stored in the relocation. */
26275 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
26276 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
26277 {
26278 switch (fixP->fx_r_type)
26279 {
26280 case BFD_RELOC_ARM_PCREL_BLX:
26281 case BFD_RELOC_THUMB_PCREL_BRANCH23:
26282 if (ARM_IS_FUNC (fixP->fx_addsy))
26283 return 1;
26284 break;
26285
26286 case BFD_RELOC_ARM_PCREL_CALL:
26287 case BFD_RELOC_THUMB_PCREL_BLX:
26288 if (THUMB_IS_FUNC (fixP->fx_addsy))
26289 return 1;
26290 break;
26291
26292 default:
26293 break;
26294 }
26295
26296 }
26297 return 0;
26298 }
26299 #endif /* OBJ_ELF */
This page took 0.619315 seconds and 4 git commands to generate.